Merge branch 'for-3.8/drivers' of git://git.kernel.dk/linux-block
Pull block driver update from Jens Axboe: "Now that the core bits are in, here are the driver bits for 3.8. The branch contains: - A huge pile of drbd bits that were dumped from the 3.7 merge window. Following that, it was both made perfectly clear that there is going to be no more over-the-wall pulls and how the situation on individual pulls can be improved. - A few cleanups from Akinobu Mita for drbd and cciss. - Queue improvement for loop from Lukas. This grew into adding a generic interface for waiting/checking an even with a specific lock, allowing this to be pulled out of md and now loop and drbd is also using it. - A few fixes for xen back/front block driver from Roger Pau Monne. - Partition improvements from Stephen Warren, allowing partiion UUID to be used as an identifier." * 'for-3.8/drivers' of git://git.kernel.dk/linux-block: (609 commits) drbd: update Kconfig to match current dependencies drbd: Fix drbdsetup wait-connect, wait-sync etc... commands drbd: close race between drbd_set_role and drbd_connect drbd: respect no-md-barriers setting also when changed online via disk-options drbd: Remove obsolete check drbd: fixup after wait_even_lock_irq() addition to generic code loop: Limit the number of requests in the bio list wait: add wait_event_lock_irq() interface xen-blkfront: free allocated page xen-blkback: move free persistent grants code block: partition: msdos: provide UUIDs for partitions init: reduce PARTUUID min length to 1 from 36 block: store partition_meta_info.uuid as a string cciss: use check_signature() cciss: cleanup bitops usage drbd: use copy_highpage drbd: if the replication link breaks during handshake, keep retrying drbd: check return of kmalloc in receive_uuids drbd: Broadcast sync progress no more often than once per second drbd: don't try to clear bits once the disk has failed ...
This commit is contained in:
@@ -452,7 +452,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
spin_lock_irq(&mddev->write_lock);
|
||||
wait_event_lock_irq(mddev->sb_wait,
|
||||
!mddev->flush_bio,
|
||||
mddev->write_lock, /*nothing*/);
|
||||
mddev->write_lock);
|
||||
mddev->flush_bio = bio;
|
||||
spin_unlock_irq(&mddev->write_lock);
|
||||
|
||||
|
@@ -551,32 +551,6 @@ struct md_thread {
|
||||
|
||||
#define THREAD_WAKEUP 0
|
||||
|
||||
#define __wait_event_lock_irq(wq, condition, lock, cmd) \
|
||||
do { \
|
||||
wait_queue_t __wait; \
|
||||
init_waitqueue_entry(&__wait, current); \
|
||||
\
|
||||
add_wait_queue(&wq, &__wait); \
|
||||
for (;;) { \
|
||||
set_current_state(TASK_UNINTERRUPTIBLE); \
|
||||
if (condition) \
|
||||
break; \
|
||||
spin_unlock_irq(&lock); \
|
||||
cmd; \
|
||||
schedule(); \
|
||||
spin_lock_irq(&lock); \
|
||||
} \
|
||||
current->state = TASK_RUNNING; \
|
||||
remove_wait_queue(&wq, &__wait); \
|
||||
} while (0)
|
||||
|
||||
#define wait_event_lock_irq(wq, condition, lock, cmd) \
|
||||
do { \
|
||||
if (condition) \
|
||||
break; \
|
||||
__wait_event_lock_irq(wq, condition, lock, cmd); \
|
||||
} while (0)
|
||||
|
||||
static inline void safe_put_page(struct page *p)
|
||||
{
|
||||
if (p) put_page(p);
|
||||
|
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf)
|
||||
|
||||
/* Wait until no block IO is waiting */
|
||||
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
|
||||
conf->resync_lock, );
|
||||
conf->resync_lock);
|
||||
|
||||
/* block any new IO from starting */
|
||||
conf->barrier++;
|
||||
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf)
|
||||
/* Now wait for all pending IO to complete */
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
||||
conf->resync_lock, );
|
||||
conf->resync_lock);
|
||||
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
}
|
||||
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf)
|
||||
(conf->nr_pending &&
|
||||
current->bio_list &&
|
||||
!bio_list_empty(current->bio_list)),
|
||||
conf->resync_lock,
|
||||
);
|
||||
conf->resync_lock);
|
||||
conf->nr_waiting--;
|
||||
}
|
||||
conf->nr_pending++;
|
||||
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf)
|
||||
spin_lock_irq(&conf->resync_lock);
|
||||
conf->barrier++;
|
||||
conf->nr_waiting++;
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
conf->nr_pending == conf->nr_queued+1,
|
||||
conf->resync_lock,
|
||||
flush_pending_writes(conf));
|
||||
wait_event_lock_irq_cmd(conf->wait_barrier,
|
||||
conf->nr_pending == conf->nr_queued+1,
|
||||
conf->resync_lock,
|
||||
flush_pending_writes(conf));
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
}
|
||||
static void unfreeze_array(struct r1conf *conf)
|
||||
|
@@ -952,7 +952,7 @@ static void raise_barrier(struct r10conf *conf, int force)
|
||||
|
||||
/* Wait until no block IO is waiting (unless 'force') */
|
||||
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
|
||||
conf->resync_lock, );
|
||||
conf->resync_lock);
|
||||
|
||||
/* block any new IO from starting */
|
||||
conf->barrier++;
|
||||
@@ -960,7 +960,7 @@ static void raise_barrier(struct r10conf *conf, int force)
|
||||
/* Now wait for all pending IO to complete */
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
||||
conf->resync_lock, );
|
||||
conf->resync_lock);
|
||||
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
}
|
||||
@@ -993,8 +993,7 @@ static void wait_barrier(struct r10conf *conf)
|
||||
(conf->nr_pending &&
|
||||
current->bio_list &&
|
||||
!bio_list_empty(current->bio_list)),
|
||||
conf->resync_lock,
|
||||
);
|
||||
conf->resync_lock);
|
||||
conf->nr_waiting--;
|
||||
}
|
||||
conf->nr_pending++;
|
||||
@@ -1027,10 +1026,10 @@ static void freeze_array(struct r10conf *conf)
|
||||
spin_lock_irq(&conf->resync_lock);
|
||||
conf->barrier++;
|
||||
conf->nr_waiting++;
|
||||
wait_event_lock_irq(conf->wait_barrier,
|
||||
conf->nr_pending == conf->nr_queued+1,
|
||||
conf->resync_lock,
|
||||
flush_pending_writes(conf));
|
||||
wait_event_lock_irq_cmd(conf->wait_barrier,
|
||||
conf->nr_pending == conf->nr_queued+1,
|
||||
conf->resync_lock,
|
||||
flush_pending_writes(conf));
|
||||
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
}
|
||||
|
@@ -466,7 +466,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|
||||
do {
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
conf->quiesce == 0 || noquiesce,
|
||||
conf->device_lock, /* nothing */);
|
||||
conf->device_lock);
|
||||
sh = __find_stripe(conf, sector, conf->generation - previous);
|
||||
if (!sh) {
|
||||
if (!conf->inactive_blocked)
|
||||
@@ -480,8 +480,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|
||||
(atomic_read(&conf->active_stripes)
|
||||
< (conf->max_nr_stripes *3/4)
|
||||
|| !conf->inactive_blocked),
|
||||
conf->device_lock,
|
||||
);
|
||||
conf->device_lock);
|
||||
conf->inactive_blocked = 0;
|
||||
} else
|
||||
init_stripe(sh, sector, previous);
|
||||
@@ -1646,8 +1645,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
!list_empty(&conf->inactive_list),
|
||||
conf->device_lock,
|
||||
);
|
||||
conf->device_lock);
|
||||
osh = get_free_stripe(conf);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
atomic_set(&nsh->count, 1);
|
||||
@@ -4003,7 +4001,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
conf->quiesce == 0,
|
||||
conf->device_lock, /* nothing */);
|
||||
conf->device_lock);
|
||||
atomic_inc(&conf->active_aligned_reads);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
@@ -6095,7 +6093,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
atomic_read(&conf->active_stripes) == 0 &&
|
||||
atomic_read(&conf->active_aligned_reads) == 0,
|
||||
conf->device_lock, /* nothing */);
|
||||
conf->device_lock);
|
||||
conf->quiesce = 1;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
/* allow reshape to continue */
|
||||
|
Reference in New Issue
Block a user