block,fs: use REQ_* flags directly
Remove the WRITE_* and READ_SYNC wrappers, and just use the flags directly. Where applicable this also drops usage of the bio_set_op_attrs wrapper. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:

committed by
Jens Axboe

parent
a2b809672e
commit
70fd76140a
@@ -3485,9 +3485,9 @@ static int write_dev_supers(struct btrfs_device *device,
|
||||
* to go down lazy.
|
||||
*/
|
||||
if (i == 0)
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
|
||||
else
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
|
||||
ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
||||
if (ret)
|
||||
errors++;
|
||||
}
|
||||
@@ -3551,7 +3551,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
bio->bi_bdev = device->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
init_completion(&device->flush_wait);
|
||||
bio->bi_private = &device->flush_wait;
|
||||
device->flush_bio = bio;
|
||||
|
@@ -127,7 +127,7 @@ struct extent_page_data {
|
||||
*/
|
||||
unsigned int extent_locked:1;
|
||||
|
||||
/* tells the submit_bio code to use a WRITE_SYNC */
|
||||
/* tells the submit_bio code to use REQ_SYNC */
|
||||
unsigned int sync_io:1;
|
||||
};
|
||||
|
||||
@@ -2047,7 +2047,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
||||
return -EIO;
|
||||
}
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
bio_add_page(bio, page, length, pg_offset);
|
||||
|
||||
if (btrfsic_submit_bio_wait(bio)) {
|
||||
@@ -2388,7 +2388,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
||||
struct bio *bio;
|
||||
int read_mode;
|
||||
int read_mode = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
@@ -2404,9 +2404,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
}
|
||||
|
||||
if (failed_bio->bi_vcnt > 1)
|
||||
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
|
||||
else
|
||||
read_mode = READ_SYNC;
|
||||
read_mode |= REQ_FAILFAST_DEV;
|
||||
|
||||
phy_offset >>= inode->i_sb->s_blocksize_bits;
|
||||
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
|
||||
@@ -3484,7 +3482,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
||||
unsigned long nr_written = 0;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
write_flags = WRITE_SYNC;
|
||||
write_flags = REQ_SYNC;
|
||||
|
||||
trace___extent_writepage(page, inode, wbc);
|
||||
|
||||
@@ -3729,7 +3727,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
||||
unsigned long i, num_pages;
|
||||
unsigned long bio_flags = 0;
|
||||
unsigned long start, end;
|
||||
int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
|
||||
int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
|
||||
int ret = 0;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
|
||||
@@ -4076,7 +4074,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
|
||||
int ret;
|
||||
|
||||
bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
|
||||
epd->sync_io ? WRITE_SYNC : 0);
|
||||
epd->sync_io ? REQ_SYNC : 0);
|
||||
|
||||
ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
|
||||
BUG_ON(ret < 0); /* -ENOMEM */
|
||||
|
@@ -7917,7 +7917,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
struct io_failure_record *failrec;
|
||||
struct bio *bio;
|
||||
int isector;
|
||||
int read_mode;
|
||||
int read_mode = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
|
||||
@@ -7936,9 +7936,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
|
||||
if ((failed_bio->bi_vcnt > 1)
|
||||
|| (failed_bio->bi_io_vec->bv_len
|
||||
> BTRFS_I(inode)->root->sectorsize))
|
||||
read_mode = READ_SYNC | REQ_FAILFAST_DEV;
|
||||
else
|
||||
read_mode = READ_SYNC;
|
||||
read_mode |= REQ_FAILFAST_DEV;
|
||||
|
||||
isector = start - btrfs_io_bio(failed_bio)->logical;
|
||||
isector >>= inode->i_sb->s_blocksize_bits;
|
||||
|
@@ -4440,7 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (ret != PAGE_SIZE) {
|
||||
leave_with_eio:
|
||||
|
@@ -6023,7 +6023,7 @@ static void btrfs_end_bio(struct bio *bio)
|
||||
else
|
||||
btrfs_dev_stat_inc(dev,
|
||||
BTRFS_DEV_STAT_READ_ERRS);
|
||||
if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
|
||||
if (bio->bi_opf & REQ_PREFLUSH)
|
||||
btrfs_dev_stat_inc(dev,
|
||||
BTRFS_DEV_STAT_FLUSH_ERRS);
|
||||
btrfs_dev_stat_print_on_error(dev);
|
||||
|
@@ -62,7 +62,7 @@ struct btrfs_device {
|
||||
int running_pending;
|
||||
/* regular prio bios */
|
||||
struct btrfs_pending_bios pending_bios;
|
||||
/* WRITE_SYNC bios */
|
||||
/* sync bios */
|
||||
struct btrfs_pending_bios pending_sync_bios;
|
||||
|
||||
struct block_device *bdev;
|
||||
|
@@ -753,7 +753,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
||||
* still in flight on potentially older
|
||||
* contents.
|
||||
*/
|
||||
write_dirty_buffer(bh, WRITE_SYNC);
|
||||
write_dirty_buffer(bh, REQ_SYNC);
|
||||
|
||||
/*
|
||||
* Kick off IO for the previous mapping. Note
|
||||
@@ -1684,7 +1684,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
|
||||
* prevents this contention from occurring.
|
||||
*
|
||||
* If block_write_full_page() is called with wbc->sync_mode ==
|
||||
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
|
||||
* WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
|
||||
* causes the writes to be flagged as synchronous writes.
|
||||
*/
|
||||
int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
@@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
struct buffer_head *bh, *head;
|
||||
unsigned int blocksize, bbits;
|
||||
int nr_underway = 0;
|
||||
int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
|
||||
int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
|
||||
|
||||
head = create_page_buffers(page, inode,
|
||||
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
||||
@@ -3210,7 +3210,7 @@ EXPORT_SYMBOL(__sync_dirty_buffer);
|
||||
|
||||
int sync_dirty_buffer(struct buffer_head *bh)
|
||||
{
|
||||
return __sync_dirty_buffer(bh, WRITE_SYNC);
|
||||
return __sync_dirty_buffer(bh, REQ_SYNC);
|
||||
}
|
||||
EXPORT_SYMBOL(sync_dirty_buffer);
|
||||
|
||||
|
@@ -1209,7 +1209,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
||||
dio->inode = inode;
|
||||
if (iov_iter_rw(iter) == WRITE) {
|
||||
dio->op = REQ_OP_WRITE;
|
||||
dio->op_flags = WRITE_ODIRECT;
|
||||
dio->op_flags = REQ_SYNC | REQ_IDLE;
|
||||
} else {
|
||||
dio->op = REQ_OP_READ;
|
||||
}
|
||||
|
@@ -35,7 +35,7 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Write the MMP block using WRITE_SYNC to try to get the block on-disk
|
||||
* Write the MMP block using REQ_SYNC to try to get the block on-disk
|
||||
* faster.
|
||||
*/
|
||||
static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
|
||||
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
|
||||
lock_buffer(bh);
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
get_bh(bh);
|
||||
submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh);
|
||||
submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
|
||||
wait_on_buffer(bh);
|
||||
sb_end_write(sb);
|
||||
if (unlikely(!buffer_uptodate(bh)))
|
||||
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
|
||||
get_bh(*bh);
|
||||
lock_buffer(*bh);
|
||||
(*bh)->b_end_io = end_buffer_read_sync;
|
||||
submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh);
|
||||
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
|
||||
wait_on_buffer(*bh);
|
||||
if (!buffer_uptodate(*bh)) {
|
||||
ret = -EIO;
|
||||
|
@@ -340,7 +340,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
|
||||
|
||||
if (bio) {
|
||||
int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
|
||||
WRITE_SYNC : 0;
|
||||
REQ_SYNC : 0;
|
||||
bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
|
||||
submit_bio(io->io_bio);
|
||||
}
|
||||
|
@@ -4553,7 +4553,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
|
||||
unlock_buffer(sbh);
|
||||
if (sync) {
|
||||
error = __sync_dirty_buffer(sbh,
|
||||
test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
|
||||
test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@@ -65,7 +65,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
|
||||
.sbi = sbi,
|
||||
.type = META,
|
||||
.op = REQ_OP_READ,
|
||||
.op_flags = READ_SYNC | REQ_META | REQ_PRIO,
|
||||
.op_flags = REQ_META | REQ_PRIO,
|
||||
.old_blkaddr = index,
|
||||
.new_blkaddr = index,
|
||||
.encrypted_page = NULL,
|
||||
@@ -160,7 +160,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
|
||||
.sbi = sbi,
|
||||
.type = META,
|
||||
.op = REQ_OP_READ,
|
||||
.op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
|
||||
.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
struct blk_plug plug;
|
||||
|
@@ -198,11 +198,9 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
|
||||
if (type >= META_FLUSH) {
|
||||
io->fio.type = META_FLUSH;
|
||||
io->fio.op = REQ_OP_WRITE;
|
||||
if (test_opt(sbi, NOBARRIER))
|
||||
io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
|
||||
else
|
||||
io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
|
||||
REQ_PRIO;
|
||||
io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
|
||||
if (!test_opt(sbi, NOBARRIER))
|
||||
io->fio.op_flags |= REQ_FUA;
|
||||
}
|
||||
__submit_merged_bio(io);
|
||||
out:
|
||||
@@ -483,7 +481,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
|
||||
return page;
|
||||
f2fs_put_page(page, 0);
|
||||
|
||||
page = get_read_data_page(inode, index, READ_SYNC, false);
|
||||
page = get_read_data_page(inode, index, 0, false);
|
||||
if (IS_ERR(page))
|
||||
return page;
|
||||
|
||||
@@ -509,7 +507,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
repeat:
|
||||
page = get_read_data_page(inode, index, READ_SYNC, for_write);
|
||||
page = get_read_data_page(inode, index, 0, for_write);
|
||||
if (IS_ERR(page))
|
||||
return page;
|
||||
|
||||
@@ -1251,7 +1249,7 @@ static int f2fs_write_data_page(struct page *page,
|
||||
.sbi = sbi,
|
||||
.type = DATA,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
|
||||
.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0,
|
||||
.page = page,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
@@ -1663,7 +1661,7 @@ repeat:
|
||||
err = PTR_ERR(bio);
|
||||
goto fail;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
||||
bio_put(bio);
|
||||
err = -EFAULT;
|
||||
|
@@ -550,7 +550,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
|
||||
.sbi = F2FS_I_SB(inode),
|
||||
.type = DATA,
|
||||
.op = REQ_OP_READ,
|
||||
.op_flags = READ_SYNC,
|
||||
.op_flags = 0,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
struct dnode_of_data dn;
|
||||
@@ -625,7 +625,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
|
||||
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
|
||||
|
||||
fio.op = REQ_OP_WRITE;
|
||||
fio.op_flags = WRITE_SYNC;
|
||||
fio.op_flags = REQ_SYNC;
|
||||
fio.new_blkaddr = newaddr;
|
||||
f2fs_submit_page_mbio(&fio);
|
||||
|
||||
@@ -663,7 +663,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
|
||||
.sbi = F2FS_I_SB(inode),
|
||||
.type = DATA,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = WRITE_SYNC,
|
||||
.op_flags = REQ_SYNC,
|
||||
.page = page,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
|
@@ -111,7 +111,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
|
||||
.sbi = F2FS_I_SB(dn->inode),
|
||||
.type = DATA,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = WRITE_SYNC | REQ_PRIO,
|
||||
.op_flags = REQ_SYNC | REQ_PRIO,
|
||||
.page = page,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
|
@@ -1134,7 +1134,7 @@ repeat:
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = read_node_page(page, READ_SYNC);
|
||||
err = read_node_page(page, 0);
|
||||
if (err < 0) {
|
||||
f2fs_put_page(page, 1);
|
||||
return ERR_PTR(err);
|
||||
@@ -1570,7 +1570,7 @@ static int f2fs_write_node_page(struct page *page,
|
||||
.sbi = sbi,
|
||||
.type = NODE,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
|
||||
.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0,
|
||||
.page = page,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
|
@@ -259,7 +259,7 @@ static int __commit_inmem_pages(struct inode *inode,
|
||||
.sbi = sbi,
|
||||
.type = DATA,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = WRITE_SYNC | REQ_PRIO,
|
||||
.op_flags = REQ_SYNC | REQ_PRIO,
|
||||
.encrypted_page = NULL,
|
||||
};
|
||||
bool submit_bio = false;
|
||||
@@ -420,7 +420,7 @@ repeat:
|
||||
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
|
||||
|
||||
bio->bi_bdev = sbi->sb->s_bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
ret = submit_bio_wait(bio);
|
||||
|
||||
llist_for_each_entry_safe(cmd, next,
|
||||
@@ -454,7 +454,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
|
||||
|
||||
atomic_inc(&fcc->submit_flush);
|
||||
bio->bi_bdev = sbi->sb->s_bdev;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
ret = submit_bio_wait(bio);
|
||||
atomic_dec(&fcc->submit_flush);
|
||||
bio_put(bio);
|
||||
@@ -1515,7 +1515,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
|
||||
.sbi = sbi,
|
||||
.type = META,
|
||||
.op = REQ_OP_WRITE,
|
||||
.op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
|
||||
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
|
||||
.old_blkaddr = page->index,
|
||||
.new_blkaddr = page->index,
|
||||
.page = page,
|
||||
|
@@ -1238,7 +1238,7 @@ static int __f2fs_commit_super(struct buffer_head *bh,
|
||||
unlock_buffer(bh);
|
||||
|
||||
/* it's rare case, we can do fua all the time */
|
||||
return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
|
||||
return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA);
|
||||
}
|
||||
|
||||
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
|
||||
|
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
|
||||
struct gfs2_log_header *lh;
|
||||
unsigned int tail;
|
||||
u32 hash;
|
||||
int op_flags = WRITE_FLUSH_FUA | REQ_META;
|
||||
int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
|
||||
struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
|
||||
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
|
||||
lh = page_address(page);
|
||||
@@ -682,7 +682,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
|
||||
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
|
||||
gfs2_ordered_wait(sdp);
|
||||
log_flush_wait(sdp);
|
||||
op_flags = WRITE_SYNC | REQ_META | REQ_PRIO;
|
||||
op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
|
||||
}
|
||||
|
||||
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
|
||||
|
@@ -38,7 +38,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
|
||||
struct buffer_head *bh, *head;
|
||||
int nr_underway = 0;
|
||||
int write_flags = REQ_META | REQ_PRIO |
|
||||
(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
|
||||
(wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(!page_has_buffers(page));
|
||||
@@ -285,7 +285,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
||||
}
|
||||
}
|
||||
|
||||
gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
|
||||
gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
|
||||
if (!(flags & DIO_WAIT))
|
||||
return 0;
|
||||
|
||||
@@ -453,7 +453,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
||||
if (buffer_uptodate(first_bh))
|
||||
goto out;
|
||||
if (!buffer_locked(first_bh))
|
||||
ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
|
||||
ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh);
|
||||
|
||||
dblock++;
|
||||
extlen--;
|
||||
|
@@ -246,7 +246,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
|
||||
|
||||
bio->bi_end_io = end_bio_io_page;
|
||||
bio->bi_private = page;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
|
||||
submit_bio(bio);
|
||||
wait_on_page_locked(page);
|
||||
bio_put(bio);
|
||||
|
@@ -221,7 +221,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
|
||||
error2 = hfsplus_submit_bio(sb,
|
||||
sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
|
||||
sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
|
||||
WRITE_SYNC);
|
||||
REQ_SYNC);
|
||||
if (!error)
|
||||
error = error2;
|
||||
if (!write_backup)
|
||||
@@ -230,7 +230,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
|
||||
error2 = hfsplus_submit_bio(sb,
|
||||
sbi->part_start + sbi->sect_count - 2,
|
||||
sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
|
||||
WRITE_SYNC);
|
||||
REQ_SYNC);
|
||||
if (!error)
|
||||
error2 = error;
|
||||
out:
|
||||
|
@@ -186,7 +186,7 @@ __flush_batch(journal_t *journal, int *batch_count)
|
||||
|
||||
blk_start_plug(&plug);
|
||||
for (i = 0; i < *batch_count; i++)
|
||||
write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC);
|
||||
write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
for (i = 0; i < *batch_count; i++) {
|
||||
|
@@ -155,9 +155,10 @@ static int journal_submit_commit_record(journal_t *journal,
|
||||
|
||||
if (journal->j_flags & JBD2_BARRIER &&
|
||||
!jbd2_has_feature_async_commit(journal))
|
||||
ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh);
|
||||
ret = submit_bh(REQ_OP_WRITE,
|
||||
REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
|
||||
else
|
||||
ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
|
||||
ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
||||
|
||||
*cbh = bh;
|
||||
return ret;
|
||||
@@ -402,7 +403,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
jbd2_journal_update_sb_log_tail(journal,
|
||||
journal->j_tail_sequence,
|
||||
journal->j_tail,
|
||||
WRITE_SYNC);
|
||||
REQ_SYNC);
|
||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||
} else {
|
||||
jbd_debug(3, "superblock not updated\n");
|
||||
@@ -717,7 +718,7 @@ start_journal_io:
|
||||
clear_buffer_dirty(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
bh->b_end_io = journal_end_buffer_io_sync;
|
||||
submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
|
||||
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
||||
}
|
||||
cond_resched();
|
||||
stats.run.rs_blocks_logged += bufs;
|
||||
|
@@ -913,7 +913,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
||||
* space and if we lose sb update during power failure we'd replay
|
||||
* old transaction with possibly newly overwritten data.
|
||||
*/
|
||||
ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
|
||||
ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -1306,7 +1306,7 @@ static int journal_reset(journal_t *journal)
|
||||
/* Lock here to make assertions happy... */
|
||||
mutex_lock(&journal->j_checkpoint_mutex);
|
||||
/*
|
||||
* Update log tail information. We use WRITE_FUA since new
|
||||
* Update log tail information. We use REQ_FUA since new
|
||||
* transaction will start reusing journal space and so we
|
||||
* must make sure information about current log tail is on
|
||||
* disk before that.
|
||||
@@ -1314,7 +1314,7 @@ static int journal_reset(journal_t *journal)
|
||||
jbd2_journal_update_sb_log_tail(journal,
|
||||
journal->j_tail_sequence,
|
||||
journal->j_tail,
|
||||
WRITE_FUA);
|
||||
REQ_FUA);
|
||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||
}
|
||||
return jbd2_journal_start_thread(journal);
|
||||
@@ -1454,7 +1454,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
|
||||
sb->s_errno = cpu_to_be32(journal->j_errno);
|
||||
read_unlock(&journal->j_state_lock);
|
||||
|
||||
jbd2_write_superblock(journal, WRITE_FUA);
|
||||
jbd2_write_superblock(journal, REQ_FUA);
|
||||
}
|
||||
EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
|
||||
|
||||
@@ -1720,7 +1720,8 @@ int jbd2_journal_destroy(journal_t *journal)
|
||||
++journal->j_transaction_sequence;
|
||||
write_unlock(&journal->j_state_lock);
|
||||
|
||||
jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
|
||||
jbd2_mark_journal_empty(journal,
|
||||
REQ_PREFLUSH | REQ_FUA);
|
||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||
} else
|
||||
err = -EIO;
|
||||
@@ -1979,7 +1980,7 @@ int jbd2_journal_flush(journal_t *journal)
|
||||
* the magic code for a fully-recovered superblock. Any future
|
||||
* commits of data to the journal will restore the current
|
||||
* s_start value. */
|
||||
jbd2_mark_journal_empty(journal, WRITE_FUA);
|
||||
jbd2_mark_journal_empty(journal, REQ_FUA);
|
||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||
write_lock(&journal->j_state_lock);
|
||||
J_ASSERT(!journal->j_running_transaction);
|
||||
@@ -2025,7 +2026,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
|
||||
if (write) {
|
||||
/* Lock to make assertions happy... */
|
||||
mutex_lock(&journal->j_checkpoint_mutex);
|
||||
jbd2_mark_journal_empty(journal, WRITE_FUA);
|
||||
jbd2_mark_journal_empty(journal, REQ_FUA);
|
||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||
}
|
||||
|
||||
|
@@ -648,7 +648,7 @@ static void flush_descriptor(journal_t *journal,
|
||||
set_buffer_jwrite(descriptor);
|
||||
BUFFER_TRACE(descriptor, "write");
|
||||
set_buffer_dirty(descriptor);
|
||||
write_dirty_buffer(descriptor, WRITE_SYNC);
|
||||
write_dirty_buffer(descriptor, REQ_SYNC);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -2002,7 +2002,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
|
||||
|
||||
bio->bi_end_io = lbmIODone;
|
||||
bio->bi_private = bp;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
/*check if journaling to disk has been disabled*/
|
||||
if (log->no_integrity) {
|
||||
bio->bi_iter.bi_size = 0;
|
||||
@@ -2146,7 +2146,7 @@ static void lbmStartIO(struct lbuf * bp)
|
||||
|
||||
bio->bi_end_io = lbmIODone;
|
||||
bio->bi_private = bp;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
|
||||
|
||||
/* check if journaling to disk has been disabled */
|
||||
if (log->no_integrity) {
|
||||
|
@@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
||||
struct buffer_head map_bh;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
int ret = 0;
|
||||
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
|
||||
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
|
||||
|
||||
if (page_has_buffers(page)) {
|
||||
struct buffer_head *head = page_buffers(page);
|
||||
@@ -705,7 +705,7 @@ mpage_writepages(struct address_space *mapping,
|
||||
ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
|
||||
if (mpd.bio) {
|
||||
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
|
||||
WRITE_SYNC : 0);
|
||||
REQ_SYNC : 0);
|
||||
mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
|
||||
}
|
||||
}
|
||||
@@ -726,7 +726,7 @@ int mpage_writepage(struct page *page, get_block_t get_block,
|
||||
int ret = __mpage_writepage(page, wbc, &mpd);
|
||||
if (mpd.bio) {
|
||||
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
|
||||
WRITE_SYNC : 0);
|
||||
REQ_SYNC : 0);
|
||||
mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
|
||||
}
|
||||
return ret;
|
||||
|
@@ -189,7 +189,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
|
||||
set_buffer_dirty(nilfs->ns_sbh[0]);
|
||||
if (nilfs_test_opt(nilfs, BARRIER)) {
|
||||
err = __sync_dirty_buffer(nilfs->ns_sbh[0],
|
||||
WRITE_SYNC | WRITE_FLUSH_FUA);
|
||||
REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
|
||||
} else {
|
||||
err = sync_dirty_buffer(nilfs->ns_sbh[0]);
|
||||
}
|
||||
|
@@ -627,7 +627,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
|
||||
slot = o2nm_this_node();
|
||||
|
||||
bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
|
||||
WRITE_SYNC);
|
||||
REQ_SYNC);
|
||||
if (IS_ERR(bio)) {
|
||||
status = PTR_ERR(bio);
|
||||
mlog_errno(status);
|
||||
|
@@ -1111,7 +1111,8 @@ static int flush_commit_list(struct super_block *s,
|
||||
mark_buffer_dirty(jl->j_commit_bh) ;
|
||||
depth = reiserfs_write_unlock_nested(s);
|
||||
if (reiserfs_barrier_flush(s))
|
||||
__sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
|
||||
__sync_dirty_buffer(jl->j_commit_bh,
|
||||
REQ_PREFLUSH | REQ_FUA);
|
||||
else
|
||||
sync_dirty_buffer(jl->j_commit_bh);
|
||||
reiserfs_write_lock_nested(s, depth);
|
||||
@@ -1269,7 +1270,8 @@ static int _update_journal_header_block(struct super_block *sb,
|
||||
depth = reiserfs_write_unlock_nested(sb);
|
||||
|
||||
if (reiserfs_barrier_flush(sb))
|
||||
__sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
|
||||
__sync_dirty_buffer(journal->j_header_bh,
|
||||
REQ_PREFLUSH | REQ_FUA);
|
||||
else
|
||||
sync_dirty_buffer(journal->j_header_bh);
|
||||
|
||||
|
@@ -495,8 +495,10 @@ xfs_submit_ioend(
|
||||
|
||||
ioend->io_bio->bi_private = ioend;
|
||||
ioend->io_bio->bi_end_io = xfs_end_bio;
|
||||
bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
|
||||
(wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
|
||||
ioend->io_bio->bi_opf = REQ_OP_WRITE;
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
ioend->io_bio->bi_opf |= REQ_SYNC;
|
||||
|
||||
/*
|
||||
* If we are failing the IO now, just mark the ioend with an
|
||||
* error and finish it. This will run IO completion immediately
|
||||
@@ -567,8 +569,9 @@ xfs_chain_bio(
|
||||
|
||||
bio_chain(ioend->io_bio, new);
|
||||
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
|
||||
bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
|
||||
(wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
|
||||
ioend->io_bio->bi_opf = REQ_OP_WRITE;
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
ioend->io_bio->bi_opf |= REQ_SYNC;
|
||||
submit_bio(ioend->io_bio);
|
||||
ioend->io_bio = new;
|
||||
}
|
||||
|
@@ -1304,7 +1304,7 @@ _xfs_buf_ioapply(
|
||||
if (bp->b_flags & XBF_WRITE) {
|
||||
op = REQ_OP_WRITE;
|
||||
if (bp->b_flags & XBF_SYNCIO)
|
||||
op_flags = WRITE_SYNC;
|
||||
op_flags = REQ_SYNC;
|
||||
if (bp->b_flags & XBF_FUA)
|
||||
op_flags |= REQ_FUA;
|
||||
if (bp->b_flags & XBF_FLUSH)
|
||||
|
Reference in New Issue
Block a user