Merge branch 'for-3.10/core' of git://git.kernel.dk/linux-block
Pull block core updates from Jens Axboe: - Major bit is Kents prep work for immutable bio vecs. - Stable candidate fix for a scheduling-while-atomic in the queue bypass operation. - Fix for the hang on exceeded rq->datalen 32-bit unsigned when merging discard bios. - Tejuns changes to convert the writeback thread pool to the generic workqueue mechanism. - Runtime PM framework, SCSI patches exists on top of these in James' tree. - A few random fixes. * 'for-3.10/core' of git://git.kernel.dk/linux-block: (40 commits) relay: move remove_buf_file inside relay_close_buf partitions/efi.c: replace useless kzalloc's by kmalloc's fs/block_dev.c: fix iov_shorten() criteria in blkdev_aio_read() block: fix max discard sectors limit blkcg: fix "scheduling while atomic" in blk_queue_bypass_start Documentation: cfq-iosched: update documentation help for cfq tunables writeback: expose the bdi_wq workqueue writeback: replace custom worker pool implementation with unbound workqueue writeback: remove unused bdi_pending_list aoe: Fix unitialized var usage bio-integrity: Add explicit field for owner of bip_buf block: Add an explicit bio flag for bios that own their bvec block: Add bio_alloc_pages() block: Convert some code to bio_for_each_segment_all() block: Add bio_for_each_segment_all() bounce: Refactor __blk_queue_bounce to not use bi_io_vec raid1: use bio_copy_data() pktcdvd: Use bio_reset() in disabled code to kill bi_idx usage pktcdvd: use bio_copy_data() block: Add bio_copy_data() ...
This commit is contained in:
@@ -22,7 +22,6 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/backing-dev.h>
|
||||
@@ -88,20 +87,6 @@ static inline struct inode *wb_inode(struct list_head *head)
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/writeback.h>
|
||||
|
||||
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
|
||||
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
|
||||
{
|
||||
if (bdi->wb.task) {
|
||||
wake_up_process(bdi->wb.task);
|
||||
} else {
|
||||
/*
|
||||
* The bdi thread isn't there, wake up the forker thread which
|
||||
* will create and run it.
|
||||
*/
|
||||
wake_up_process(default_backing_dev_info.wb.task);
|
||||
}
|
||||
}
|
||||
|
||||
static void bdi_queue_work(struct backing_dev_info *bdi,
|
||||
struct wb_writeback_work *work)
|
||||
{
|
||||
@@ -109,10 +94,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
|
||||
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
list_add_tail(&work->list, &bdi->work_list);
|
||||
if (!bdi->wb.task)
|
||||
trace_writeback_nothread(bdi, work);
|
||||
bdi_wakeup_flusher(bdi);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -127,10 +111,8 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
*/
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
if (bdi->wb.task) {
|
||||
trace_writeback_nowork(bdi);
|
||||
wake_up_process(bdi->wb.task);
|
||||
}
|
||||
trace_writeback_nowork(bdi);
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -177,9 +159,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
|
||||
* writeback as soon as there is no other work to do.
|
||||
*/
|
||||
trace_writeback_wake_background(bdi);
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
bdi_wakeup_flusher(bdi);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1020,67 +1000,49 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
|
||||
|
||||
/*
|
||||
* Handle writeback of dirty data for the device backed by this bdi. Also
|
||||
* wakes up periodically and does kupdated style flushing.
|
||||
* reschedules periodically and does kupdated style flushing.
|
||||
*/
|
||||
int bdi_writeback_thread(void *data)
|
||||
void bdi_writeback_workfn(struct work_struct *work)
|
||||
{
|
||||
struct bdi_writeback *wb = data;
|
||||
struct bdi_writeback *wb = container_of(to_delayed_work(work),
|
||||
struct bdi_writeback, dwork);
|
||||
struct backing_dev_info *bdi = wb->bdi;
|
||||
long pages_written;
|
||||
|
||||
set_worker_desc("flush-%s", dev_name(bdi->dev));
|
||||
current->flags |= PF_SWAPWRITE;
|
||||
set_freezable();
|
||||
wb->last_active = jiffies;
|
||||
|
||||
/*
|
||||
* Our parent may run at a different priority, just set us to normal
|
||||
*/
|
||||
set_user_nice(current, 0);
|
||||
|
||||
trace_writeback_thread_start(bdi);
|
||||
|
||||
while (!kthread_freezable_should_stop(NULL)) {
|
||||
if (likely(!current_is_workqueue_rescuer() ||
|
||||
list_empty(&bdi->bdi_list))) {
|
||||
/*
|
||||
* Remove own delayed wake-up timer, since we are already awake
|
||||
* and we'll take care of the periodic write-back.
|
||||
* The normal path. Keep writing back @bdi until its
|
||||
* work_list is empty. Note that this path is also taken
|
||||
* if @bdi is shutting down even when we're running off the
|
||||
* rescuer as work_list needs to be drained.
|
||||
*/
|
||||
del_timer(&wb->wakeup_timer);
|
||||
|
||||
pages_written = wb_do_writeback(wb, 0);
|
||||
|
||||
do {
|
||||
pages_written = wb_do_writeback(wb, 0);
|
||||
trace_writeback_pages_written(pages_written);
|
||||
} while (!list_empty(&bdi->work_list));
|
||||
} else {
|
||||
/*
|
||||
* bdi_wq can't get enough workers and we're running off
|
||||
* the emergency worker. Don't hog it. Hopefully, 1024 is
|
||||
* enough for efficient IO.
|
||||
*/
|
||||
pages_written = writeback_inodes_wb(&bdi->wb, 1024,
|
||||
WB_REASON_FORKER_THREAD);
|
||||
trace_writeback_pages_written(pages_written);
|
||||
|
||||
if (pages_written)
|
||||
wb->last_active = jiffies;
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (wb_has_dirty_io(wb) && dirty_writeback_interval)
|
||||
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
|
||||
else {
|
||||
/*
|
||||
* We have nothing to do, so can go sleep without any
|
||||
* timeout and save power. When a work is queued or
|
||||
* something is made dirty - we will be woken up.
|
||||
*/
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush any work that raced with us exiting */
|
||||
if (!list_empty(&bdi->work_list))
|
||||
wb_do_writeback(wb, 1);
|
||||
if (!list_empty(&bdi->work_list) ||
|
||||
(wb_has_dirty_io(wb) && dirty_writeback_interval))
|
||||
queue_delayed_work(bdi_wq, &wb->dwork,
|
||||
msecs_to_jiffies(dirty_writeback_interval * 10));
|
||||
|
||||
trace_writeback_thread_stop(bdi);
|
||||
return 0;
|
||||
current->flags &= ~PF_SWAPWRITE;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
|
||||
* the whole world.
|
||||
|
Reference in New Issue
Block a user