Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - Move the nohz kick code out of the scheduler tick to a dedicated IPI, from Frederic Weisbecker. This necessiated quite some background infrastructure rework, including: * Clean up some irq-work internals * Implement remote irq-work * Implement nohz kick on top of remote irq-work * Move full dynticks timer enqueue notification to new kick * Move multi-task notification to new kick * Remove unecessary barriers on multi-task notification - Remove proliferation of wait_on_bit() action functions and allow wait_on_bit_action() functions to support a timeout. (Neil Brown) - Another round of sched/numa improvements, cleanups and fixes. (Rik van Riel) - Implement fast idling of CPUs when the system is partially loaded, for better scalability. (Tim Chen) - Restructure and fix the CPU hotplug handling code that may leave cfs_rq and rt_rq's throttled when tasks are migrated away from a dead cpu. (Kirill Tkhai) - Robustify the sched topology setup code. (Peterz Zijlstra) - Improve sched_feat() handling wrt. static_keys (Jason Baron) - Misc fixes. * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits) sched/fair: Fix 'make xmldocs' warning caused by missing description sched: Use macro for magic number of -1 for setparam sched: Robustify topology setup sched: Fix sched_setparam() policy == -1 logic sched: Allow wait_on_bit_action() functions to support a timeout sched: Remove proliferation of wait_on_bit() action functions sched/numa: Revert "Use effective_load() to balance NUMA loads" sched: Fix static_key race with sched_feat() sched: Remove extra static_key*() function indirection sched/rt: Fix replenish_dl_entity() comments to match the current upstream code sched: Transform resched_task() into resched_curr() sched/deadline: Kill task_struct->pi_top_task sched: Rework check_for_tasks() sched/rt: Enqueue just unthrottled rt_rq back on the stack in __disable_runtime() sched/fair: Disable runtime_enabled on dying rq sched/numa: Change scan period code to match intent sched/numa: Rework best node setting in task_numa_migrate() sched/numa: Examine a task move when examining a task swap sched/numa: Simplify task_numa_compare() sched/numa: Use effective_load() to balance NUMA loads ...
Tento commit je obsažen v:
@@ -614,16 +614,6 @@ static void write_endio(struct bio *bio, int error)
|
||||
wake_up_bit(&b->state, B_WRITING);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called when wait_on_bit is actually waiting.
|
||||
*/
|
||||
static int do_io_schedule(void *word)
|
||||
{
|
||||
io_schedule();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initiate a write on a dirty buffer, but don't wait for it.
|
||||
*
|
||||
@@ -640,8 +630,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
|
||||
return;
|
||||
|
||||
clear_bit(B_DIRTY, &b->state);
|
||||
wait_on_bit_lock(&b->state, B_WRITING,
|
||||
do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (!write_list)
|
||||
submit_io(b, WRITE, b->block, write_endio);
|
||||
@@ -675,9 +664,9 @@ static void __make_buffer_clean(struct dm_buffer *b)
|
||||
if (!b->state) /* fast case */
|
||||
return;
|
||||
|
||||
wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
|
||||
__write_dirty_buffer(b, NULL);
|
||||
wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1030,7 +1019,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
|
||||
if (need_submit)
|
||||
submit_io(b, READ, b->block, read_endio);
|
||||
|
||||
wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (b->read_error) {
|
||||
int error = b->read_error;
|
||||
@@ -1209,15 +1198,13 @@ again:
|
||||
dropped_lock = 1;
|
||||
b->hold_count++;
|
||||
dm_bufio_unlock(c);
|
||||
wait_on_bit(&b->state, B_WRITING,
|
||||
do_io_schedule,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_WRITING,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
dm_bufio_lock(c);
|
||||
b->hold_count--;
|
||||
} else
|
||||
wait_on_bit(&b->state, B_WRITING,
|
||||
do_io_schedule,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_WRITING,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
if (!test_bit(B_DIRTY, &b->state) &&
|
||||
@@ -1321,15 +1308,15 @@ retry:
|
||||
|
||||
__write_dirty_buffer(b, NULL);
|
||||
if (b->hold_count == 1) {
|
||||
wait_on_bit(&b->state, B_WRITING,
|
||||
do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_WRITING,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
set_bit(B_DIRTY, &b->state);
|
||||
__unlink_buffer(b);
|
||||
__link_buffer(b, new_block, LIST_DIRTY);
|
||||
} else {
|
||||
sector_t old_block;
|
||||
wait_on_bit_lock(&b->state, B_WRITING,
|
||||
do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_lock_io(&b->state, B_WRITING,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
/*
|
||||
* Relink buffer to "new_block" so that write_callback
|
||||
* sees "new_block" as a block number.
|
||||
@@ -1341,8 +1328,8 @@ retry:
|
||||
__unlink_buffer(b);
|
||||
__link_buffer(b, new_block, b->list_mode);
|
||||
submit_io(b, WRITE, new_block, write_endio);
|
||||
wait_on_bit(&b->state, B_WRITING,
|
||||
do_io_schedule, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit_io(&b->state, B_WRITING,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
__unlink_buffer(b);
|
||||
__link_buffer(b, old_block, b->list_mode);
|
||||
}
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele