drop vmerge accounting
Remove hw_segments field from struct bio and struct request. Without virtual merge accounting they have no purpose. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:

committed by
Jens Axboe

parent
b8b3e16cfe
commit
5df97b91b5
@@ -2026,7 +2026,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
|||||||
|
|
||||||
if (bio_has_data(bio)) {
|
if (bio_has_data(bio)) {
|
||||||
rq->nr_phys_segments = bio_phys_segments(q, bio);
|
rq->nr_phys_segments = bio_phys_segments(q, bio);
|
||||||
rq->nr_hw_segments = bio_hw_segments(q, bio);
|
|
||||||
rq->buffer = bio_data(bio);
|
rq->buffer = bio_data(bio);
|
||||||
}
|
}
|
||||||
rq->current_nr_sectors = bio_cur_sectors(bio);
|
rq->current_nr_sectors = bio_cur_sectors(bio);
|
||||||
|
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
|
|||||||
void blk_recalc_rq_segments(struct request *rq)
|
void blk_recalc_rq_segments(struct request *rq)
|
||||||
{
|
{
|
||||||
int nr_phys_segs;
|
int nr_phys_segs;
|
||||||
int nr_hw_segs;
|
|
||||||
unsigned int phys_size;
|
unsigned int phys_size;
|
||||||
unsigned int hw_size;
|
|
||||||
struct bio_vec *bv, *bvprv = NULL;
|
struct bio_vec *bv, *bvprv = NULL;
|
||||||
int seg_size;
|
int seg_size;
|
||||||
int hw_seg_size;
|
|
||||||
int cluster;
|
int cluster;
|
||||||
struct req_iterator iter;
|
struct req_iterator iter;
|
||||||
int high, highprv = 1;
|
int high, highprv = 1;
|
||||||
@@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||||
hw_seg_size = seg_size = 0;
|
seg_size = 0;
|
||||||
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
|
phys_size = nr_phys_segs = 0;
|
||||||
rq_for_each_segment(bv, rq, iter) {
|
rq_for_each_segment(bv, rq, iter) {
|
||||||
/*
|
/*
|
||||||
* the trick here is making sure that a high page is never
|
* the trick here is making sure that a high page is never
|
||||||
@@ -76,30 +73,17 @@ void blk_recalc_rq_segments(struct request *rq)
|
|||||||
goto new_segment;
|
goto new_segment;
|
||||||
|
|
||||||
seg_size += bv->bv_len;
|
seg_size += bv->bv_len;
|
||||||
hw_seg_size += bv->bv_len;
|
|
||||||
bvprv = bv;
|
bvprv = bv;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
new_segment:
|
new_segment:
|
||||||
if (nr_hw_segs == 1 &&
|
|
||||||
hw_seg_size > rq->bio->bi_hw_front_size)
|
|
||||||
rq->bio->bi_hw_front_size = hw_seg_size;
|
|
||||||
hw_seg_size = bv->bv_len;
|
|
||||||
nr_hw_segs++;
|
|
||||||
|
|
||||||
nr_phys_segs++;
|
nr_phys_segs++;
|
||||||
bvprv = bv;
|
bvprv = bv;
|
||||||
seg_size = bv->bv_len;
|
seg_size = bv->bv_len;
|
||||||
highprv = high;
|
highprv = high;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nr_hw_segs == 1 &&
|
|
||||||
hw_seg_size > rq->bio->bi_hw_front_size)
|
|
||||||
rq->bio->bi_hw_front_size = hw_seg_size;
|
|
||||||
if (hw_seg_size > rq->biotail->bi_hw_back_size)
|
|
||||||
rq->biotail->bi_hw_back_size = hw_seg_size;
|
|
||||||
rq->nr_phys_segments = nr_phys_segs;
|
rq->nr_phys_segments = nr_phys_segs;
|
||||||
rq->nr_hw_segments = nr_hw_segs;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
||||||
@@ -112,7 +96,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
|||||||
blk_recalc_rq_segments(&rq);
|
blk_recalc_rq_segments(&rq);
|
||||||
bio->bi_next = nxt;
|
bio->bi_next = nxt;
|
||||||
bio->bi_phys_segments = rq.nr_phys_segments;
|
bio->bi_phys_segments = rq.nr_phys_segments;
|
||||||
bio->bi_hw_segments = rq.nr_hw_segments;
|
|
||||||
bio->bi_flags |= (1 << BIO_SEG_VALID);
|
bio->bi_flags |= (1 << BIO_SEG_VALID);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_recount_segments);
|
EXPORT_SYMBOL(blk_recount_segments);
|
||||||
@@ -255,10 +238,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
|||||||
struct request *req,
|
struct request *req,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
int nr_hw_segs = bio_hw_segments(q, bio);
|
|
||||||
int nr_phys_segs = bio_phys_segments(q, bio);
|
int nr_phys_segs = bio_phys_segments(q, bio);
|
||||||
|
|
||||||
if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
|
if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
|
||||||
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
|
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
|
||||||
req->cmd_flags |= REQ_NOMERGE;
|
req->cmd_flags |= REQ_NOMERGE;
|
||||||
if (req == q->last_merge)
|
if (req == q->last_merge)
|
||||||
@@ -270,7 +252,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
|||||||
* This will form the start of a new hw segment. Bump both
|
* This will form the start of a new hw segment. Bump both
|
||||||
* counters.
|
* counters.
|
||||||
*/
|
*/
|
||||||
req->nr_hw_segments += nr_hw_segs;
|
|
||||||
req->nr_phys_segments += nr_phys_segs;
|
req->nr_phys_segments += nr_phys_segs;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@@ -328,7 +309,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
|||||||
struct request *next)
|
struct request *next)
|
||||||
{
|
{
|
||||||
int total_phys_segments;
|
int total_phys_segments;
|
||||||
int total_hw_segments;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First check if the either of the requests are re-queued
|
* First check if the either of the requests are re-queued
|
||||||
@@ -350,14 +330,11 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
|||||||
if (total_phys_segments > q->max_phys_segments)
|
if (total_phys_segments > q->max_phys_segments)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
|
if (total_phys_segments > q->max_hw_segments)
|
||||||
|
|
||||||
if (total_hw_segments > q->max_hw_segments)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Merge is OK... */
|
/* Merge is OK... */
|
||||||
req->nr_phys_segments = total_phys_segments;
|
req->nr_phys_segments = total_phys_segments;
|
||||||
req->nr_hw_segments = total_hw_segments;
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -790,7 +790,6 @@ struct request *elv_next_request(struct request_queue *q)
|
|||||||
* device can handle
|
* device can handle
|
||||||
*/
|
*/
|
||||||
rq->nr_phys_segments++;
|
rq->nr_phys_segments++;
|
||||||
rq->nr_hw_segments++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!q->prep_rq_fn)
|
if (!q->prep_rq_fn)
|
||||||
@@ -813,7 +812,6 @@ struct request *elv_next_request(struct request_queue *q)
|
|||||||
* so that we don't add it again
|
* so that we don't add it again
|
||||||
*/
|
*/
|
||||||
--rq->nr_phys_segments;
|
--rq->nr_phys_segments;
|
||||||
--rq->nr_hw_segments;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rq = NULL;
|
rq = NULL;
|
||||||
|
@@ -1302,9 +1302,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
|
|||||||
sbio->bi_size = r1_bio->sectors << 9;
|
sbio->bi_size = r1_bio->sectors << 9;
|
||||||
sbio->bi_idx = 0;
|
sbio->bi_idx = 0;
|
||||||
sbio->bi_phys_segments = 0;
|
sbio->bi_phys_segments = 0;
|
||||||
sbio->bi_hw_segments = 0;
|
|
||||||
sbio->bi_hw_front_size = 0;
|
|
||||||
sbio->bi_hw_back_size = 0;
|
|
||||||
sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
||||||
sbio->bi_flags |= 1 << BIO_UPTODATE;
|
sbio->bi_flags |= 1 << BIO_UPTODATE;
|
||||||
sbio->bi_next = NULL;
|
sbio->bi_next = NULL;
|
||||||
|
@@ -1345,9 +1345,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
|
|||||||
tbio->bi_size = r10_bio->sectors << 9;
|
tbio->bi_size = r10_bio->sectors << 9;
|
||||||
tbio->bi_idx = 0;
|
tbio->bi_idx = 0;
|
||||||
tbio->bi_phys_segments = 0;
|
tbio->bi_phys_segments = 0;
|
||||||
tbio->bi_hw_segments = 0;
|
|
||||||
tbio->bi_hw_front_size = 0;
|
|
||||||
tbio->bi_hw_back_size = 0;
|
|
||||||
tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
||||||
tbio->bi_flags |= 1 << BIO_UPTODATE;
|
tbio->bi_flags |= 1 << BIO_UPTODATE;
|
||||||
tbio->bi_next = NULL;
|
tbio->bi_next = NULL;
|
||||||
|
12
fs/bio.c
12
fs/bio.c
@@ -208,14 +208,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
|
|||||||
return bio->bi_phys_segments;
|
return bio->bi_phys_segments;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
|
|
||||||
blk_recount_segments(q, bio);
|
|
||||||
|
|
||||||
return bio->bi_hw_segments;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __bio_clone - clone a bio
|
* __bio_clone - clone a bio
|
||||||
* @bio: destination bio
|
* @bio: destination bio
|
||||||
@@ -350,7 +342,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
while (bio->bi_phys_segments >= q->max_phys_segments
|
while (bio->bi_phys_segments >= q->max_phys_segments
|
||||||
|| bio->bi_hw_segments >= q->max_hw_segments) {
|
|| bio->bi_phys_segments >= q->max_hw_segments) {
|
||||||
|
|
||||||
if (retried_segments)
|
if (retried_segments)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -399,7 +391,6 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
|||||||
|
|
||||||
bio->bi_vcnt++;
|
bio->bi_vcnt++;
|
||||||
bio->bi_phys_segments++;
|
bio->bi_phys_segments++;
|
||||||
bio->bi_hw_segments++;
|
|
||||||
done:
|
done:
|
||||||
bio->bi_size += len;
|
bio->bi_size += len;
|
||||||
return len;
|
return len;
|
||||||
@@ -1381,7 +1372,6 @@ EXPORT_SYMBOL(bio_init);
|
|||||||
EXPORT_SYMBOL(__bio_clone);
|
EXPORT_SYMBOL(__bio_clone);
|
||||||
EXPORT_SYMBOL(bio_clone);
|
EXPORT_SYMBOL(bio_clone);
|
||||||
EXPORT_SYMBOL(bio_phys_segments);
|
EXPORT_SYMBOL(bio_phys_segments);
|
||||||
EXPORT_SYMBOL(bio_hw_segments);
|
|
||||||
EXPORT_SYMBOL(bio_add_page);
|
EXPORT_SYMBOL(bio_add_page);
|
||||||
EXPORT_SYMBOL(bio_add_pc_page);
|
EXPORT_SYMBOL(bio_add_pc_page);
|
||||||
EXPORT_SYMBOL(bio_get_nr_vecs);
|
EXPORT_SYMBOL(bio_get_nr_vecs);
|
||||||
|
@@ -77,21 +77,8 @@ struct bio {
|
|||||||
*/
|
*/
|
||||||
unsigned short bi_phys_segments;
|
unsigned short bi_phys_segments;
|
||||||
|
|
||||||
/* Number of segments after physical and DMA remapping
|
|
||||||
* hardware coalescing is performed.
|
|
||||||
*/
|
|
||||||
unsigned short bi_hw_segments;
|
|
||||||
|
|
||||||
unsigned int bi_size; /* residual I/O count */
|
unsigned int bi_size; /* residual I/O count */
|
||||||
|
|
||||||
/*
|
|
||||||
* To keep track of the max hw size, we account for the
|
|
||||||
* sizes of the first and last virtually mergeable segments
|
|
||||||
* in this bio
|
|
||||||
*/
|
|
||||||
unsigned int bi_hw_front_size;
|
|
||||||
unsigned int bi_hw_back_size;
|
|
||||||
|
|
||||||
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
|
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
|
||||||
|
|
||||||
struct bio_vec *bi_io_vec; /* the actual vec list */
|
struct bio_vec *bi_io_vec; /* the actual vec list */
|
||||||
@@ -113,7 +100,7 @@ struct bio {
|
|||||||
#define BIO_UPTODATE 0 /* ok after I/O completion */
|
#define BIO_UPTODATE 0 /* ok after I/O completion */
|
||||||
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
|
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
|
||||||
#define BIO_EOF 2 /* out-out-bounds error */
|
#define BIO_EOF 2 /* out-out-bounds error */
|
||||||
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
|
#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
|
||||||
#define BIO_CLONED 4 /* doesn't own data */
|
#define BIO_CLONED 4 /* doesn't own data */
|
||||||
#define BIO_BOUNCED 5 /* bio is a bounce bio */
|
#define BIO_BOUNCED 5 /* bio is a bounce bio */
|
||||||
#define BIO_USER_MAPPED 6 /* contains user pages */
|
#define BIO_USER_MAPPED 6 /* contains user pages */
|
||||||
@@ -324,7 +311,6 @@ extern void bio_free(struct bio *, struct bio_set *);
|
|||||||
extern void bio_endio(struct bio *, int);
|
extern void bio_endio(struct bio *, int);
|
||||||
struct request_queue;
|
struct request_queue;
|
||||||
extern int bio_phys_segments(struct request_queue *, struct bio *);
|
extern int bio_phys_segments(struct request_queue *, struct bio *);
|
||||||
extern int bio_hw_segments(struct request_queue *, struct bio *);
|
|
||||||
|
|
||||||
extern void __bio_clone(struct bio *, struct bio *);
|
extern void __bio_clone(struct bio *, struct bio *);
|
||||||
extern struct bio *bio_clone(struct bio *, gfp_t);
|
extern struct bio *bio_clone(struct bio *, gfp_t);
|
||||||
|
@@ -189,13 +189,6 @@ struct request {
|
|||||||
*/
|
*/
|
||||||
unsigned short nr_phys_segments;
|
unsigned short nr_phys_segments;
|
||||||
|
|
||||||
/* Number of scatter-gather addr+len pairs after
|
|
||||||
* physical and DMA remapping hardware coalescing is performed.
|
|
||||||
* This is the number of scatter-gather entries the driver
|
|
||||||
* will actually have to deal with after DMA mapping is done.
|
|
||||||
*/
|
|
||||||
unsigned short nr_hw_segments;
|
|
||||||
|
|
||||||
unsigned short ioprio;
|
unsigned short ioprio;
|
||||||
|
|
||||||
void *special;
|
void *special;
|
||||||
|
Reference in New Issue
Block a user