block,fs: use REQ_* flags directly
Remove the WRITE_* and READ_SYNC wrappers, and just use the flags directly. Where applicable this also drops usage of the bio_set_op_attrs wrapper. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:

committed by
Jens Axboe

parent
a2b809672e
commit
70fd76140a
@@ -3485,9 +3485,9 @@ static int write_dev_supers(struct btrfs_device *device,
|
||||
* to go down lazy.
|
||||
*/
|
||||
if (i == 0)
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
|
||||
else
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
||||
if (ret)
|
||||
errors++;
|
||||
}
|
||||
@@ -3551,7 +3551,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
bio->bi_bdev = device->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
init_completion(&device->flush_wait);
|
||||
bio->bi_private = &device->flush_wait;
|
||||
device->flush_bio = bio;
|
||||
|
@@ -127,7 +127,7 @@ struct extent_page_data {
|
||||
*/
|
||||
unsigned int extent_locked:1;
|
||||
|
||||
/* tells the submit_bio code to use a WRITE_SYNC */
|
||||
/* tells the submit_bio code to use REQ_SYNC */
|
||||
unsigned int sync_io:1;
|
||||
};
|
||||
|
||||
@@ -2047,7 +2047,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
||||
return -EIO;
|
||||
}
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
bio_add_page(bio, page, length, pg_offset);
|
||||
|
||||
if (btrfsic_submit_bio_wait(bio)) {
|
||||
@@ -2388,7 +2388,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
||||
struct bio *bio;
|
||||
int read_mode;
|
||||
int read_mode = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
@@ -2404,9 +2404,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
}
|
||||
|
||||
if (failed_bio->bi_vcnt > 1)
|
||||
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
|
||||
else
|
||||
read_mode = READ_SYNC;
|
||||
read_mode |= REQ_FAILFAST_DEV;
|
||||
|
||||
phy_offset >>= inode->i_sb->s_blocksize_bits;
|
||||
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
|
||||
@@ -3484,7 +3482,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
||||
unsigned long nr_written = 0;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
write_flags = WRITE_SYNC;
|
||||
write_flags = REQ_SYNC;
|
||||
|
||||
trace___extent_writepage(page, inode, wbc);
|
||||
|
||||
@@ -3729,7 +3727,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
||||
unsigned long i, num_pages;
|
||||
unsigned long bio_flags = 0;
|
||||
unsigned long start, end;
|
||||
int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
|
||||
int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
|
||||
int ret = 0;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
|
||||
@@ -4076,7 +4074,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
|
||||
int ret;
|
||||
|
||||
bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
|
||||
epd->sync_io ? WRITE_SYNC : 0);
|
||||
epd->sync_io ? REQ_SYNC : 0);
|
||||
|
||||
ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
|
||||
BUG_ON(ret < 0); /* -ENOMEM */
|
||||
|
@@ -7917,7 +7917,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
struct io_failure_record *failrec;
|
||||
struct bio *bio;
|
||||
int isector;
|
||||
int read_mode;
|
||||
int read_mode = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
@@ -7936,9 +7936,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
if ((failed_bio->bi_vcnt > 1)
|
||||
|| (failed_bio->bi_io_vec->bv_len
|
||||
> BTRFS_I(inode)->root->sectorsize))
|
||||
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
|
||||
else
|
||||
read_mode = READ_SYNC;
|
||||
read_mode |= REQ_FAILFAST_DEV;
|
||||
|
||||
isector = start - btrfs_io_bio(failed_bio)->logical;
|
||||
isector >>= inode->i_sb->s_blocksize_bits;
|
||||
|
@@ -4440,7 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (ret != PAGE_SIZE) {
|
||||
leave_with_eio:
|
||||
|
@@ -6023,7 +6023,7 @@ static void btrfs_end_bio(struct bio *bio)
|
||||
else
|
||||
btrfs_dev_stat_inc(dev,
|
||||
BTRFS_DEV_STAT_READ_ERRS);
|
||||
if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
|
||||
if (bio->bi_opf & REQ_PREFLUSH)
|
||||
btrfs_dev_stat_inc(dev,
|
||||
BTRFS_DEV_STAT_FLUSH_ERRS);
|
||||
btrfs_dev_stat_print_on_error(dev);
|
||||
|
@@ -62,7 +62,7 @@ struct btrfs_device {
|
||||
int running_pending;
|
||||
/* regular prio bios */
|
||||
struct btrfs_pending_bios pending_bios;
|
||||
/* WRITE_SYNC bios */
|
||||
/* sync bios */
|
||||
struct btrfs_pending_bios pending_sync_bios;
|
||||
|
||||
struct block_device *bdev;
|
||||
|
Reference in New Issue
Block a user