block: get rid of bio_rw and READA
These two are confusing leftover of the old world order, combining values of the REQ_OP_ and REQ_ namespaces. For callers that don't special case we mostly just replace bi_rw with bio_data_dir or op_is_write, except for the few cases where a switch over the REQ_OP_ values makes more sense. Any check for READA is replaced with an explicit check for REQ_RAHEAD. Also remove the READA alias for REQ_RAHEAD. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
@@ -528,7 +528,7 @@ static void read_callback(unsigned long error, void *context)
|
||||
DMWARN_LIMIT("Read failure on mirror device %s. "
|
||||
"Trying alternative device.",
|
||||
m->dev->name);
|
||||
queue_bio(m->ms, bio, bio_rw(bio));
|
||||
queue_bio(m->ms, bio, bio_data_dir(bio));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1193,7 +1193,7 @@ static void mirror_dtr(struct dm_target *ti)
|
||||
*/
|
||||
static int mirror_map(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
int r, rw = bio_rw(bio);
|
||||
int r, rw = bio_data_dir(bio);
|
||||
struct mirror *m;
|
||||
struct mirror_set *ms = ti->private;
|
||||
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
|
||||
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
|
||||
* If region is not in-sync queue the bio.
|
||||
*/
|
||||
if (!r || (r == -EWOULDBLOCK)) {
|
||||
if (rw == READA)
|
||||
if (bio->bi_rw & REQ_RAHEAD)
|
||||
return -EWOULDBLOCK;
|
||||
|
||||
queue_bio(ms, bio, rw);
|
||||
@@ -1242,7 +1242,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
|
||||
{
|
||||
int rw = bio_rw(bio);
|
||||
int rw = bio_data_dir(bio);
|
||||
struct mirror_set *ms = (struct mirror_set *) ti->private;
|
||||
struct mirror *m = NULL;
|
||||
struct dm_bio_details *bd = NULL;
|
||||
|
@@ -1696,7 +1696,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
|
||||
* to copy an exception */
|
||||
down_write(&s->lock);
|
||||
|
||||
if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
|
||||
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
|
||||
bio_data_dir(bio) == WRITE)) {
|
||||
r = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -1713,7 +1714,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
|
||||
* flags so we should only get this if we are
|
||||
* writeable.
|
||||
*/
|
||||
if (bio_rw(bio) == WRITE) {
|
||||
if (bio_data_dir(bio) == WRITE) {
|
||||
pe = __lookup_pending_exception(s, chunk);
|
||||
if (!pe) {
|
||||
up_write(&s->lock);
|
||||
@@ -1819,7 +1820,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
|
||||
e = dm_lookup_exception(&s->complete, chunk);
|
||||
if (e) {
|
||||
/* Queue writes overlapping with chunks being merged */
|
||||
if (bio_rw(bio) == WRITE &&
|
||||
if (bio_data_dir(bio) == WRITE &&
|
||||
chunk >= s->first_merging_chunk &&
|
||||
chunk < (s->first_merging_chunk +
|
||||
s->num_merging_chunks)) {
|
||||
@@ -1831,7 +1832,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
remap_exception(s, e, bio, chunk);
|
||||
|
||||
if (bio_rw(bio) == WRITE)
|
||||
if (bio_data_dir(bio) == WRITE)
|
||||
track_chunk(s, bio, chunk);
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -1839,7 +1840,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
|
||||
redirect_to_origin:
|
||||
bio->bi_bdev = s->origin->bdev;
|
||||
|
||||
if (bio_rw(bio) == WRITE) {
|
||||
if (bio_data_dir(bio) == WRITE) {
|
||||
up_write(&s->lock);
|
||||
return do_origin(s->origin, bio);
|
||||
}
|
||||
@@ -2288,7 +2289,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
|
||||
if (unlikely(bio->bi_rw & REQ_PREFLUSH))
|
||||
return DM_MAPIO_REMAPPED;
|
||||
|
||||
if (bio_rw(bio) != WRITE)
|
||||
if (bio_data_dir(bio) != WRITE)
|
||||
return DM_MAPIO_REMAPPED;
|
||||
|
||||
available_sectors = o->split_boundary -
|
||||
|
@@ -35,16 +35,19 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
*/
|
||||
static int zero_map(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
switch(bio_rw(bio)) {
|
||||
case READ:
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
if (bio->bi_rw & REQ_RAHEAD) {
|
||||
/* readahead of null bytes only wastes buffer cache */
|
||||
return -EIO;
|
||||
}
|
||||
zero_fill_bio(bio);
|
||||
break;
|
||||
case READA:
|
||||
/* readahead of null bytes only wastes buffer cache */
|
||||
return -EIO;
|
||||
case WRITE:
|
||||
case REQ_OP_WRITE:
|
||||
/* writes get silently dropped */
|
||||
break;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
bio_endio(bio);
|
||||
|
@@ -1833,7 +1833,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
||||
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
if (bio_rw(bio) != READA)
|
||||
if (!(bio->bi_rw & REQ_RAHEAD))
|
||||
queue_io(md, bio);
|
||||
else
|
||||
bio_io_error(bio);
|
||||
|
@@ -1105,7 +1105,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
||||
bitmap = mddev->bitmap;
|
||||
|
||||
/*
|
||||
* make_request() can abort the operation when READA is being
|
||||
* make_request() can abort the operation when read-ahead is being
|
||||
* used and no empty request is available.
|
||||
*
|
||||
*/
|
||||
|
@@ -5233,7 +5233,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
|
||||
(unsigned long long)logical_sector);
|
||||
|
||||
sh = raid5_get_active_stripe(conf, new_sector, previous,
|
||||
(bi->bi_rw&RWA_MASK), 0);
|
||||
(bi->bi_rw & REQ_RAHEAD), 0);
|
||||
if (sh) {
|
||||
if (unlikely(previous)) {
|
||||
/* expansion might have moved on while waiting for a
|
||||
|
Reference in New Issue
Block a user