bio: first step in sanitizing the bio->bi_rw flag testing
Get rid of any functions that test for these bits and make callers use bio_rw_flagged() directly. Then it is at least directly apparent what variable and flag they check. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
此提交包含在:
@@ -1129,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
|
||||
if (error == -EOPNOTSUPP)
|
||||
goto out;
|
||||
|
||||
if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
|
||||
if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||
goto out;
|
||||
|
||||
if (unlikely(error)) {
|
||||
|
@@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
|
||||
if (!error)
|
||||
return 0; /* I/O complete */
|
||||
|
||||
if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
|
||||
if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||
return error;
|
||||
|
||||
if (error == -EOPNOTSUPP)
|
||||
|
@@ -586,7 +586,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
*/
|
||||
spin_lock_irqsave(&md->deferred_lock, flags);
|
||||
if (__noflush_suspending(md)) {
|
||||
if (!bio_barrier(io->bio))
|
||||
if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
|
||||
bio_list_add_head(&md->deferred,
|
||||
io->bio);
|
||||
} else
|
||||
@@ -598,7 +598,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
io_error = io->error;
|
||||
bio = io->bio;
|
||||
|
||||
if (bio_barrier(bio)) {
|
||||
if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
|
||||
/*
|
||||
* There can be just one barrier request so we use
|
||||
* a per-device variable for error reporting.
|
||||
@@ -1209,7 +1209,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
||||
|
||||
ci.map = dm_get_table(md);
|
||||
if (unlikely(!ci.map)) {
|
||||
if (!bio_barrier(bio))
|
||||
if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||
bio_io_error(bio);
|
||||
else
|
||||
if (!md->barrier_error)
|
||||
@@ -1321,7 +1321,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
|
||||
* we have to queue this io for later.
|
||||
*/
|
||||
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
|
||||
unlikely(bio_barrier(bio))) {
|
||||
unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
up_read(&md->io_lock);
|
||||
|
||||
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
|
||||
@@ -1344,7 +1344,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct mapped_device *md = q->queuedata;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
@@ -2164,7 +2164,7 @@ static void dm_wq_work(struct work_struct *work)
|
||||
if (dm_request_based(md))
|
||||
generic_make_request(c);
|
||||
else {
|
||||
if (bio_barrier(c))
|
||||
if (bio_rw_flagged(c, BIO_RW_BARRIER))
|
||||
process_barrier(md, c);
|
||||
else
|
||||
__split_and_process_bio(md, c);
|
||||
|
@@ -288,7 +288,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|
||||
sector_t start_sector;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error)
|
||||
|
||||
if (uptodate)
|
||||
multipath_end_bh_io(mp_bh, 0);
|
||||
else if (!bio_rw_ahead(bio)) {
|
||||
else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
|
||||
/*
|
||||
* oops, IO error:
|
||||
*/
|
||||
@@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
|
||||
const int rw = bio_data_dir(bio);
|
||||
int cpu;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -448,7 +448,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
|
||||
const int rw = bio_data_dir(bio);
|
||||
int cpu;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -782,8 +782,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
struct bio_list bl;
|
||||
struct page **behind_pages = NULL;
|
||||
const int rw = bio_data_dir(bio);
|
||||
const int do_sync = bio_sync(bio);
|
||||
int cpu, do_barriers;
|
||||
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||
int cpu;
|
||||
bool do_barriers;
|
||||
mdk_rdev_t *blocked_rdev;
|
||||
|
||||
/*
|
||||
@@ -797,7 +798,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
|
||||
md_write_start(mddev, bio); /* wait on superblock update early */
|
||||
|
||||
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
|
||||
if (unlikely(!mddev->barriers_work &&
|
||||
bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
if (rw == WRITE)
|
||||
md_write_end(mddev);
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
@@ -925,7 +927,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
atomic_set(&r1_bio->remaining, 0);
|
||||
atomic_set(&r1_bio->behind_remaining, 0);
|
||||
|
||||
do_barriers = bio_barrier(bio);
|
||||
do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
|
||||
if (do_barriers)
|
||||
set_bit(R1BIO_Barrier, &r1_bio->state);
|
||||
|
||||
@@ -1600,7 +1602,7 @@ static void raid1d(mddev_t *mddev)
|
||||
* We already have a nr_pending reference on these rdevs.
|
||||
*/
|
||||
int i;
|
||||
const int do_sync = bio_sync(r1_bio->master_bio);
|
||||
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
||||
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
|
||||
clear_bit(R1BIO_Barrier, &r1_bio->state);
|
||||
for (i=0; i < conf->raid_disks; i++)
|
||||
@@ -1654,7 +1656,7 @@ static void raid1d(mddev_t *mddev)
|
||||
(unsigned long long)r1_bio->sector);
|
||||
raid_end_bio_io(r1_bio);
|
||||
} else {
|
||||
const int do_sync = bio_sync(r1_bio->master_bio);
|
||||
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
||||
r1_bio->bios[r1_bio->read_disk] =
|
||||
mddev->ro ? IO_BLOCKED : NULL;
|
||||
r1_bio->read_disk = disk;
|
||||
|
@@ -796,12 +796,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||
int i;
|
||||
int chunk_sects = conf->chunk_mask + 1;
|
||||
const int rw = bio_data_dir(bio);
|
||||
const int do_sync = bio_sync(bio);
|
||||
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||
struct bio_list bl;
|
||||
unsigned long flags;
|
||||
mdk_rdev_t *blocked_rdev;
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
@@ -1610,7 +1610,7 @@ static void raid10d(mddev_t *mddev)
|
||||
raid_end_bio_io(r10_bio);
|
||||
bio_put(bio);
|
||||
} else {
|
||||
const int do_sync = bio_sync(r10_bio->master_bio);
|
||||
const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
|
||||
bio_put(bio);
|
||||
rdev = conf->mirrors[mirror].rdev;
|
||||
if (printk_ratelimit())
|
||||
|
@@ -3606,7 +3606,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
|
||||
const int rw = bio_data_dir(bi);
|
||||
int cpu, remaining;
|
||||
|
||||
if (unlikely(bio_barrier(bi))) {
|
||||
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
|
||||
bio_endio(bi, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
新增問題並參考
封鎖使用者