mm: dirty balancing for tasks
Based on ideas of Andrew: http://marc.info/?l=linux-kernel&m=102912915020543&w=2 Scale the bdi dirty limit inversly with the tasks dirty rate. This makes heavy writers have a lower dirty limit than the occasional writer. Andrea proposed something similar: http://lwn.net/Articles/152277/ The main disadvantage to his patch is that he uses an unrelated quantity to measure time, which leaves him with a workload dependant tunable. Other than that the two approaches appear quite similar. [akpm@linux-foundation.org: fix warning] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
04fbfdc14e
commit
3e26c149c3
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
|
||||
|
||||
void free_task(struct task_struct *tsk)
|
||||
{
|
||||
prop_local_destroy_single(&tsk->dirties);
|
||||
free_thread_info(tsk->stack);
|
||||
rt_mutex_debug_task_free(tsk);
|
||||
free_task_struct(tsk);
|
||||
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct thread_info *ti;
|
||||
int err;
|
||||
|
||||
prepare_to_copy(orig);
|
||||
|
||||
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
|
||||
*tsk = *orig;
|
||||
tsk->stack = ti;
|
||||
|
||||
err = prop_local_init_single(&tsk->dirties);
|
||||
if (err) {
|
||||
free_thread_info(ti);
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
setup_thread_stack(tsk, orig);
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
|
Reference in New Issue
Block a user