block: simplify BIOVEC_PHYS_MERGEABLE

Turn the macro into an inline, move it to blk.h and simplify the
arch hooks a bit.

Also rename the function to biovec_phys_mergeable as there is no need
to shout.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig
2018-09-24 09:43:50 +02:00
committed by Jens Axboe
parent 27ca1d4ed0
commit 6a9f5f240a
8 changed files with 28 additions and 30 deletions

View File

@@ -731,7 +731,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
}
/* If we may be able to merge these biovecs, force a recount */
if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
if (bio->bi_vcnt > 1 && biovec_phys_mergeable(bvec-1, bvec))
bio_clear_flag(bio, BIO_SEG_VALID);
done:

View File

@@ -49,7 +49,7 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
if (!biovec_phys_mergeable(&ivprv, &iv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
@@ -95,7 +95,7 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
if (!biovec_phys_mergeable(&ivprv, &iv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))

View File

@@ -21,7 +21,7 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
struct bio *prev, struct bio_vec *prev_last_bv,
struct bio_vec *next_first_bv)
{
if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
if (!biovec_phys_mergeable(prev_last_bv, next_first_bv))
return false;
if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
return false;
@@ -199,7 +199,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
if (!biovec_phys_mergeable(bvprvp, &bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
goto new_segment;
@@ -332,7 +332,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
if (!biovec_phys_mergeable(&bvprv, &bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
goto new_segment;
@@ -414,7 +414,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
bio_get_last_bvec(bio, &end_bv);
bio_get_first_bvec(nxt, &nxt_bv);
if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
if (!biovec_phys_mergeable(&end_bv, &nxt_bv))
return 0;
/*
@@ -439,7 +439,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
if (!biovec_phys_mergeable(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;

View File

@@ -149,6 +149,20 @@ static inline void blk_queue_enter_live(struct request_queue *q)
percpu_ref_get(&q->q_usage_counter);
}
#ifndef ARCH_BIOVEC_PHYS_MERGEABLE
#define ARCH_BIOVEC_PHYS_MERGEABLE(vec1, vec2) true
#endif
static inline bool biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2)
{
if (bvec_to_phys(vec1) + vec1->bv_len != bvec_to_phys(vec2))
return false;
if (!ARCH_BIOVEC_PHYS_MERGEABLE(vec1, vec2))
return false;
return true;
}
static inline bool __bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{