Merge branch 'for-3.11/drivers' of git://git.kernel.dk/linux-block
Pull block IO driver bits from Jens Axboe: "As I mentioned in the core block pull request, due to real life circumstances the driver pull request would be late. Now it looks like -rc2 late... On the plus side, apart form the rsxx update, these are all things that I could argue could go in later in the cycle as they are fixes and not features. So even though things are late, it's not ALL bad. The pull request contains: - Updates to bcache, all bug fixes, from Kent. - A pile of drbd bug fixes (no big features this time!). - xen blk front/back fixes. - rsxx driver updates, some of them deferred form 3.10. So should be well cooked by now" * 'for-3.11/drivers' of git://git.kernel.dk/linux-block: (63 commits) bcache: Allocation kthread fixes bcache: Fix GC_SECTORS_USED() calculation bcache: Journal replay fix bcache: Shutdown fix bcache: Fix a sysfs splat on shutdown bcache: Advertise that flushes are supported bcache: check for allocation failures bcache: Fix a dumb race bcache: Use standard utility code bcache: Update email address bcache: Delete fuzz tester bcache: Document shrinker reserve better bcache: FUA fixes drbd: Allow online change of al-stripes and al-stripe-size drbd: Constants should be UPPERCASE drbd: Ignore the exit code of a fence-peer handler if it returns too late drbd: Fix rcu_read_lock balance on error path drbd: fix error return code in drbd_init() drbd: Do not sleep inside rcu bcache: Refresh usage docs ...
This commit is contained in:
@@ -102,6 +102,30 @@ typedef uint64_t blkif_sector_t;
|
||||
*/
|
||||
#define BLKIF_OP_DISCARD 5
|
||||
|
||||
/*
|
||||
* Recognized if "feature-max-indirect-segments" in present in the backend
|
||||
* xenbus info. The "feature-max-indirect-segments" node contains the maximum
|
||||
* number of segments allowed by the backend per request. If the node is
|
||||
* present, the frontend might use blkif_request_indirect structs in order to
|
||||
* issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
|
||||
* maximum number of indirect segments is fixed by the backend, but the
|
||||
* frontend can issue requests with any number of indirect segments as long as
|
||||
* it's less than the number provided by the backend. The indirect_grefs field
|
||||
* in blkif_request_indirect should be filled by the frontend with the
|
||||
* grant references of the pages that are holding the indirect segments.
|
||||
* This pages are filled with an array of blkif_request_segment_aligned
|
||||
* that hold the information about the segments. The number of indirect
|
||||
* pages to use is determined by the maximum number of segments
|
||||
* a indirect request contains. Every indirect page can contain a maximum
|
||||
* of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
|
||||
* so to calculate the number of indirect pages to use we have to do
|
||||
* ceil(indirect_segments/512).
|
||||
*
|
||||
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
|
||||
* create the "feature-max-indirect-segments" node!
|
||||
*/
|
||||
#define BLKIF_OP_INDIRECT 6
|
||||
|
||||
/*
|
||||
* Maximum scatter/gather segments per request.
|
||||
* This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
|
||||
@@ -109,6 +133,16 @@ typedef uint64_t blkif_sector_t;
|
||||
*/
|
||||
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
|
||||
|
||||
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
|
||||
|
||||
struct blkif_request_segment_aligned {
|
||||
grant_ref_t gref; /* reference to I/O buffer frame */
|
||||
/* @first_sect: first sector in frame to transfer (inclusive). */
|
||||
/* @last_sect: last sector in frame to transfer (inclusive). */
|
||||
uint8_t first_sect, last_sect;
|
||||
uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
|
||||
} __attribute__((__packed__));
|
||||
|
||||
struct blkif_request_rw {
|
||||
uint8_t nr_segments; /* number of segments */
|
||||
blkif_vdev_t handle; /* only for read/write requests */
|
||||
@@ -147,12 +181,31 @@ struct blkif_request_other {
|
||||
uint64_t id; /* private guest value, echoed in resp */
|
||||
} __attribute__((__packed__));
|
||||
|
||||
struct blkif_request_indirect {
|
||||
uint8_t indirect_op;
|
||||
uint16_t nr_segments;
|
||||
#ifdef CONFIG_X86_64
|
||||
uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
|
||||
#endif
|
||||
uint64_t id;
|
||||
blkif_sector_t sector_number;
|
||||
blkif_vdev_t handle;
|
||||
uint16_t _pad2;
|
||||
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
|
||||
#ifdef CONFIG_X86_64
|
||||
uint32_t _pad3; /* make it 64 byte aligned */
|
||||
#else
|
||||
uint64_t _pad3; /* make it 64 byte aligned */
|
||||
#endif
|
||||
} __attribute__((__packed__));
|
||||
|
||||
struct blkif_request {
|
||||
uint8_t operation; /* BLKIF_OP_??? */
|
||||
union {
|
||||
struct blkif_request_rw rw;
|
||||
struct blkif_request_discard discard;
|
||||
struct blkif_request_other other;
|
||||
struct blkif_request_indirect indirect;
|
||||
} u;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
|
@@ -188,6 +188,11 @@ struct __name##_back_ring { \
|
||||
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
||||
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
|
||||
|
||||
/* Ill-behaved frontend determination: Can there be this many requests? */
|
||||
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
|
||||
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
|
||||
|
||||
|
||||
#define RING_PUSH_REQUESTS(_r) do { \
|
||||
wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
||||
|
Reference in New Issue
Block a user