Merge branch 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block: (46 commits) xen-blkfront: disable barrier/flush write support Added blk-lib.c and blk-barrier.c was renamed to blk-flush.c block: remove BLKDEV_IFL_WAIT aic7xxx_old: removed unused 'req' variable block: remove the BH_Eopnotsupp flag block: remove the BLKDEV_IFL_BARRIER flag block: remove the WRITE_BARRIER flag swap: do not send discards as barriers fat: do not send discards as barriers ext4: do not send discards as barriers jbd2: replace barriers with explicit flush / FUA usage jbd2: Modify ASYNC_COMMIT code to not rely on queue draining on barrier jbd: replace barriers with explicit flush / FUA usage nilfs2: replace barriers with explicit flush / FUA usage reiserfs: replace barriers with explicit flush / FUA usage gfs2: replace barriers with explicit flush / FUA usage btrfs: replace barriers with explicit flush / FUA usage xfs: replace barriers with explicit flush / FUA usage block: pass gfp_mask and flags to sb_issue_discard dm: convey that all flushes are processed as empty ...
This commit is contained in:
117
drivers/md/md.c
117
drivers/md/md.c
@@ -227,12 +227,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
|
||||
return 0;
|
||||
}
|
||||
rcu_read_lock();
|
||||
if (mddev->suspended || mddev->barrier) {
|
||||
if (mddev->suspended) {
|
||||
DEFINE_WAIT(__wait);
|
||||
for (;;) {
|
||||
prepare_to_wait(&mddev->sb_wait, &__wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (!mddev->suspended && !mddev->barrier)
|
||||
if (!mddev->suspended)
|
||||
break;
|
||||
rcu_read_unlock();
|
||||
schedule();
|
||||
@@ -283,40 +283,29 @@ EXPORT_SYMBOL_GPL(mddev_resume);
|
||||
|
||||
int mddev_congested(mddev_t *mddev, int bits)
|
||||
{
|
||||
if (mddev->barrier)
|
||||
return 1;
|
||||
return mddev->suspended;
|
||||
}
|
||||
EXPORT_SYMBOL(mddev_congested);
|
||||
|
||||
/*
|
||||
* Generic barrier handling for md
|
||||
* Generic flush handling for md
|
||||
*/
|
||||
|
||||
#define POST_REQUEST_BARRIER ((void*)1)
|
||||
|
||||
static void md_end_barrier(struct bio *bio, int err)
|
||||
static void md_end_flush(struct bio *bio, int err)
|
||||
{
|
||||
mdk_rdev_t *rdev = bio->bi_private;
|
||||
mddev_t *mddev = rdev->mddev;
|
||||
if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
|
||||
set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
|
||||
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
|
||||
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
||||
if (mddev->barrier == POST_REQUEST_BARRIER) {
|
||||
/* This was a post-request barrier */
|
||||
mddev->barrier = NULL;
|
||||
wake_up(&mddev->sb_wait);
|
||||
} else
|
||||
/* The pre-request barrier has finished */
|
||||
schedule_work(&mddev->barrier_work);
|
||||
/* The pre-request flush has finished */
|
||||
schedule_work(&mddev->flush_work);
|
||||
}
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void submit_barriers(mddev_t *mddev)
|
||||
static void submit_flushes(mddev_t *mddev)
|
||||
{
|
||||
mdk_rdev_t *rdev;
|
||||
|
||||
@@ -333,60 +322,56 @@ static void submit_barriers(mddev_t *mddev)
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
bi = bio_alloc(GFP_KERNEL, 0);
|
||||
bi->bi_end_io = md_end_barrier;
|
||||
bi->bi_end_io = md_end_flush;
|
||||
bi->bi_private = rdev;
|
||||
bi->bi_bdev = rdev->bdev;
|
||||
atomic_inc(&mddev->flush_pending);
|
||||
submit_bio(WRITE_BARRIER, bi);
|
||||
submit_bio(WRITE_FLUSH, bi);
|
||||
rcu_read_lock();
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void md_submit_barrier(struct work_struct *ws)
|
||||
static void md_submit_flush_data(struct work_struct *ws)
|
||||
{
|
||||
mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
|
||||
struct bio *bio = mddev->barrier;
|
||||
mddev_t *mddev = container_of(ws, mddev_t, flush_work);
|
||||
struct bio *bio = mddev->flush_bio;
|
||||
|
||||
atomic_set(&mddev->flush_pending, 1);
|
||||
|
||||
if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
|
||||
bio_endio(bio, -EOPNOTSUPP);
|
||||
else if (bio->bi_size == 0)
|
||||
if (bio->bi_size == 0)
|
||||
/* an empty barrier - all done */
|
||||
bio_endio(bio, 0);
|
||||
else {
|
||||
bio->bi_rw &= ~REQ_HARDBARRIER;
|
||||
bio->bi_rw &= ~REQ_FLUSH;
|
||||
if (mddev->pers->make_request(mddev, bio))
|
||||
generic_make_request(bio);
|
||||
mddev->barrier = POST_REQUEST_BARRIER;
|
||||
submit_barriers(mddev);
|
||||
}
|
||||
if (atomic_dec_and_test(&mddev->flush_pending)) {
|
||||
mddev->barrier = NULL;
|
||||
mddev->flush_bio = NULL;
|
||||
wake_up(&mddev->sb_wait);
|
||||
}
|
||||
}
|
||||
|
||||
void md_barrier_request(mddev_t *mddev, struct bio *bio)
|
||||
void md_flush_request(mddev_t *mddev, struct bio *bio)
|
||||
{
|
||||
spin_lock_irq(&mddev->write_lock);
|
||||
wait_event_lock_irq(mddev->sb_wait,
|
||||
!mddev->barrier,
|
||||
!mddev->flush_bio,
|
||||
mddev->write_lock, /*nothing*/);
|
||||
mddev->barrier = bio;
|
||||
mddev->flush_bio = bio;
|
||||
spin_unlock_irq(&mddev->write_lock);
|
||||
|
||||
atomic_set(&mddev->flush_pending, 1);
|
||||
INIT_WORK(&mddev->barrier_work, md_submit_barrier);
|
||||
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
|
||||
|
||||
submit_barriers(mddev);
|
||||
submit_flushes(mddev);
|
||||
|
||||
if (atomic_dec_and_test(&mddev->flush_pending))
|
||||
schedule_work(&mddev->barrier_work);
|
||||
schedule_work(&mddev->flush_work);
|
||||
}
|
||||
EXPORT_SYMBOL(md_barrier_request);
|
||||
EXPORT_SYMBOL(md_flush_request);
|
||||
|
||||
/* Support for plugging.
|
||||
* This mirrors the plugging support in request_queue, but does not
|
||||
@@ -697,31 +682,6 @@ static void super_written(struct bio *bio, int error)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void super_written_barrier(struct bio *bio, int error)
|
||||
{
|
||||
struct bio *bio2 = bio->bi_private;
|
||||
mdk_rdev_t *rdev = bio2->bi_private;
|
||||
mddev_t *mddev = rdev->mddev;
|
||||
|
||||
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
|
||||
error == -EOPNOTSUPP) {
|
||||
unsigned long flags;
|
||||
/* barriers don't appear to be supported :-( */
|
||||
set_bit(BarriersNotsupp, &rdev->flags);
|
||||
mddev->barriers_work = 0;
|
||||
spin_lock_irqsave(&mddev->write_lock, flags);
|
||||
bio2->bi_next = mddev->biolist;
|
||||
mddev->biolist = bio2;
|
||||
spin_unlock_irqrestore(&mddev->write_lock, flags);
|
||||
wake_up(&mddev->sb_wait);
|
||||
bio_put(bio);
|
||||
} else {
|
||||
bio_put(bio2);
|
||||
bio->bi_private = rdev;
|
||||
super_written(bio, error);
|
||||
}
|
||||
}
|
||||
|
||||
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||
sector_t sector, int size, struct page *page)
|
||||
{
|
||||
@@ -730,51 +690,28 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
||||
* and decrement it on completion, waking up sb_wait
|
||||
* if zero is reached.
|
||||
* If an error occurred, call md_error
|
||||
*
|
||||
* As we might need to resubmit the request if REQ_HARDBARRIER
|
||||
* causes ENOTSUPP, we allocate a spare bio...
|
||||
*/
|
||||
struct bio *bio = bio_alloc(GFP_NOIO, 1);
|
||||
int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
|
||||
|
||||
bio->bi_bdev = rdev->bdev;
|
||||
bio->bi_sector = sector;
|
||||
bio_add_page(bio, page, size, 0);
|
||||
bio->bi_private = rdev;
|
||||
bio->bi_end_io = super_written;
|
||||
bio->bi_rw = rw;
|
||||
|
||||
atomic_inc(&mddev->pending_writes);
|
||||
if (!test_bit(BarriersNotsupp, &rdev->flags)) {
|
||||
struct bio *rbio;
|
||||
rw |= REQ_HARDBARRIER;
|
||||
rbio = bio_clone(bio, GFP_NOIO);
|
||||
rbio->bi_private = bio;
|
||||
rbio->bi_end_io = super_written_barrier;
|
||||
submit_bio(rw, rbio);
|
||||
} else
|
||||
submit_bio(rw, bio);
|
||||
submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA,
|
||||
bio);
|
||||
}
|
||||
|
||||
void md_super_wait(mddev_t *mddev)
|
||||
{
|
||||
/* wait for all superblock writes that were scheduled to complete.
|
||||
* if any had to be retried (due to BARRIER problems), retry them
|
||||
*/
|
||||
/* wait for all superblock writes that were scheduled to complete */
|
||||
DEFINE_WAIT(wq);
|
||||
for(;;) {
|
||||
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
|
||||
if (atomic_read(&mddev->pending_writes)==0)
|
||||
break;
|
||||
while (mddev->biolist) {
|
||||
struct bio *bio;
|
||||
spin_lock_irq(&mddev->write_lock);
|
||||
bio = mddev->biolist;
|
||||
mddev->biolist = bio->bi_next ;
|
||||
bio->bi_next = NULL;
|
||||
spin_unlock_irq(&mddev->write_lock);
|
||||
submit_bio(bio->bi_rw, bio);
|
||||
}
|
||||
schedule();
|
||||
}
|
||||
finish_wait(&mddev->sb_wait, &wq);
|
||||
@@ -1071,7 +1008,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
clear_bit(Faulty, &rdev->flags);
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
clear_bit(WriteMostly, &rdev->flags);
|
||||
clear_bit(BarriersNotsupp, &rdev->flags);
|
||||
|
||||
if (mddev->raid_disks == 0) {
|
||||
mddev->major_version = 0;
|
||||
@@ -1486,7 +1422,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
clear_bit(Faulty, &rdev->flags);
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
clear_bit(WriteMostly, &rdev->flags);
|
||||
clear_bit(BarriersNotsupp, &rdev->flags);
|
||||
|
||||
if (mddev->raid_disks == 0) {
|
||||
mddev->major_version = 1;
|
||||
@@ -4505,7 +4440,6 @@ int md_run(mddev_t *mddev)
|
||||
/* may be over-ridden by personality */
|
||||
mddev->resync_max_sectors = mddev->dev_sectors;
|
||||
|
||||
mddev->barriers_work = 1;
|
||||
mddev->ok_start_degraded = start_dirty_degraded;
|
||||
|
||||
if (start_readonly && mddev->ro == 0)
|
||||
@@ -4684,7 +4618,6 @@ static void md_clean(mddev_t *mddev)
|
||||
mddev->recovery = 0;
|
||||
mddev->in_sync = 0;
|
||||
mddev->degraded = 0;
|
||||
mddev->barriers_work = 0;
|
||||
mddev->safemode = 0;
|
||||
mddev->bitmap_info.offset = 0;
|
||||
mddev->bitmap_info.default_offset = 0;
|
||||
|
Reference in New Issue
Block a user