Merge branch 'for-3.6/drivers' of git://git.kernel.dk/linux-block

Pull block driver changes from Jens Axboe:

 - Making the plugging support for drivers a bit more sane from Neil.
   This supersedes the plugging change from Shaohua as well.

 - The usual round of drbd updates.

 - Using a tail add instead of a head add in the request completion for
   ndb, making us find the most completed request more quickly.

 - A few floppy changes, getting rid of a duplicated flag and also
   running the floppy init async (since it takes forever in boot terms)
   from Andi.

* 'for-3.6/drivers' of git://git.kernel.dk/linux-block:
  floppy: remove duplicated flag FD_RAW_NEED_DISK
  blk: pass from_schedule to non-request unplug functions.
  block: stack unplug
  blk: centralize non-request unplug handling.
  md: remove plug_cnt feature of plugging.
  block/nbd: micro-optimization in nbd request completion
  drbd: announce FLUSH/FUA capability to upper layers
  drbd: fix max_bio_size to be unsigned
  drbd: flush drbd work queue before invalidate/invalidate remote
  drbd: fix potential access after free
  drbd: call local-io-error handler early
  drbd: do not reset rs_pending_cnt too early
  drbd: reset congestion information before reporting it in /proc/drbd
  drbd: report congestion if we are waiting for some userland callback
  drbd: differentiate between normal and forced detach
  drbd: cleanup, remove two unused global flags
  floppy: Run floppy initialization asynchronous
This commit is contained in:
Linus Torvalds
2012-08-01 09:06:47 -07:00
bovenliggende 8cf1a3fce0 10af8138eb
commit eff0d13f38
19 gewijzigde bestanden met toevoegingen van 238 en 177 verwijderingen

Bestand weergeven

@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);
/* Support for plugging.
* This mirrors the plugging support in request_queue, but does not
* require having a whole queue or request structures.
* We allocate an md_plug_cb for each md device and each thread it gets
* plugged on. This links tot the private plug_handle structure in the
* personality data where we keep a count of the number of outstanding
* plugs so other code can see if a plug is active.
*/
struct md_plug_cb {
struct blk_plug_cb cb;
struct mddev *mddev;
};
static void plugger_unplug(struct blk_plug_cb *cb)
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
md_wakeup_thread(mdcb->mddev->thread);
kfree(mdcb);
struct mddev *mddev = cb->data;
md_wakeup_thread(mddev->thread);
kfree(cb);
}
/* Check that an unplug wakeup will come shortly.
* If not, wakeup the md thread immediately
*/
int mddev_check_plugged(struct mddev *mddev)
{
struct blk_plug *plug = current->plug;
struct md_plug_cb *mdcb;
if (!plug)
return 0;
list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
if (mdcb->cb.callback == plugger_unplug &&
mdcb->mddev == mddev) {
/* Already on the list, move to top */
if (mdcb != list_first_entry(&plug->cb_list,
struct md_plug_cb,
cb.list))
list_move(&mdcb->cb.list, &plug->cb_list);
return 1;
}
}
/* Not currently on the callback list */
mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
if (!mdcb)
return 0;
mdcb->mddev = mddev;
mdcb->cb.callback = plugger_unplug;
atomic_inc(&mddev->plug_cnt);
list_add(&mdcb->cb.list, &plug->cb_list);
return 1;
}
EXPORT_SYMBOL_GPL(mddev_check_plugged);
EXPORT_SYMBOL(md_unplug);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);

Bestand weergeven

@@ -266,9 +266,6 @@ struct mddev {
int new_chunk_sectors;
int reshape_backwards;
atomic_t plug_cnt; /* If device is expecting
* more bios soon.
*/
struct md_thread *thread; /* management thread */
struct md_thread *sync_thread; /* doing resync or reconstruct */
sector_t curr_resync; /* last block scheduled */
@@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
extern int mddev_check_plugged(struct mddev *mddev);
extern void md_trim_bio(struct bio *bio, int offset, int size);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
static inline int mddev_check_plugged(struct mddev *mddev)
{
return !!blk_check_plugged(md_unplug, mddev,
sizeof(struct blk_plug_cb));
}
#endif /* _MD_MD_H */

Bestand weergeven

@@ -2247,8 +2247,7 @@ static void raid1d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
if (atomic_read(&mddev->plug_cnt) == 0)
flush_pending_writes(conf);
flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {

Bestand weergeven

@@ -2680,8 +2680,7 @@ static void raid10d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
if (atomic_read(&mddev->plug_cnt) == 0)
flush_pending_writes(conf);
flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {

Bestand weergeven

@@ -4562,7 +4562,7 @@ static void raid5d(struct mddev *mddev)
while (1) {
struct bio *bio;
if (atomic_read(&mddev->plug_cnt) == 0 &&
if (
!list_empty(&conf->bitmap_list)) {
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
@@ -4572,8 +4572,7 @@ static void raid5d(struct mddev *mddev)
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf);
}
if (atomic_read(&mddev->plug_cnt) == 0)
raid5_activate_delayed(conf);
raid5_activate_delayed(conf);
while ((bio = remove_bio_from_retry(conf))) {
int ok;