diff options
author | Olof Johansson <olof@lixom.net> | 2013-08-29 19:12:04 -0700 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-08-29 19:12:04 -0700 |
commit | 2bbc31ef2b398d17016c6438016cc76913990950 (patch) | |
tree | 10de572013948341054d73acfab0f6d77ae35cbd /include/xen/interface/io/blkif.h | |
parent | a3e008d1b5e39abe81e233bde623604841ff9a73 (diff) | |
parent | af0721966195b3966214edd54af0d5f84d65419a (diff) |
Merge tag 'omap-for-v3.12/fixes-non-critical-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/fixes-non-critical
From Tony Lindgren:
Omap fixes for the merge window that are not urgent enough
for the -rc series.
* tag 'omap-for-v3.12/fixes-non-critical-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
ARM: OMAP2: use 'int' instead of 'unsigned' for variable 'gpmc_irq_start'
ARM: OMAP2: remove useless variable 'ret'
ARM: OMAP: dma: fix error return code in omap_system_dma_probe()
ARM: OMAP2+: fix wrong address when loading PRM_FRAC_INCREMENTOR_DENUMERATOR_RELOAD
ARM: OMAP2+: am33xx-restart: trigger warm reset on omap2+ boards
ARM: OMAP2: Use a consistent AM33XX SoC option description
ARM: OMAP2+: Remove legacy device creation for McPDM and DMIC
+ Linux 3.11-rc6
Diffstat (limited to 'include/xen/interface/io/blkif.h')
-rw-r--r-- | include/xen/interface/io/blkif.h | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ffd4652de91c..65e12099ef89 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t; #define BLKIF_OP_DISCARD 5 /* + * Recognized if "feature-max-indirect-segments" in present in the backend + * xenbus info. The "feature-max-indirect-segments" node contains the maximum + * number of segments allowed by the backend per request. If the node is + * present, the frontend might use blkif_request_indirect structs in order to + * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The + * maximum number of indirect segments is fixed by the backend, but the + * frontend can issue requests with any number of indirect segments as long as + * it's less than the number provided by the backend. The indirect_grefs field + * in blkif_request_indirect should be filled by the frontend with the + * grant references of the pages that are holding the indirect segments. + * This pages are filled with an array of blkif_request_segment_aligned + * that hold the information about the segments. The number of indirect + * pages to use is determined by the maximum number of segments + * a indirect request contains. Every indirect page can contain a maximum + * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), + * so to calculate the number of indirect pages to use we have to do + * ceil(indirect_segments/512). + * + * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* + * create the "feature-max-indirect-segments" node! + */ +#define BLKIF_OP_INDIRECT 6 + +/* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 +#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 + +struct blkif_request_segment_aligned { + grant_ref_t gref; /* reference to I/O buffer frame */ + /* @first_sect: first sector in frame to transfer (inclusive). */ + /* @last_sect: last sector in frame to transfer (inclusive). */ + uint8_t first_sect, last_sect; + uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ +} __attribute__((__packed__)); + struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ @@ -147,12 +181,31 @@ struct blkif_request_other { uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); +struct blkif_request_indirect { + uint8_t indirect_op; + uint16_t nr_segments; +#ifdef CONFIG_X86_64 + uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ +#endif + uint64_t id; + blkif_sector_t sector_number; + blkif_vdev_t handle; + uint16_t _pad2; + grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; +#ifdef CONFIG_X86_64 + uint32_t _pad3; /* make it 64 byte aligned */ +#else + uint64_t _pad3; /* make it 64 byte aligned */ +#endif +} __attribute__((__packed__)); + struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; + struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); |