memcg: add inactive_anon_is_low()
The inactive_anon_is_low() is key component of active/inactive anon balancing on reclaim. However current inactive_anon_is_low() function only consider global reclaim. Therefore, we need following ugly scan_global_lru() condition. if (lru == LRU_ACTIVE_ANON && (!scan_global_lru(sc) || inactive_anon_is_low(zone))) { shrink_active_list(nr_to_scan, zone, sc, priority, file); return 0; it cause that memcg reclaim always deactivate pages when shrink_list() is called. To make mem_cgroup_inactive_anon_is_low() improve active/inactive anon balancing of memcgroup. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: "Pekka Enberg" <penberg@cs.helsinki.fi> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
549927620b
commit
14797e2363
@@ -156,6 +156,9 @@ struct mem_cgroup {
|
||||
unsigned long last_oom_jiffies;
|
||||
int obsolete;
|
||||
atomic_t refcnt;
|
||||
|
||||
unsigned int inactive_ratio;
|
||||
|
||||
/*
|
||||
* statistics. This must be placed at the end of memcg.
|
||||
*/
|
||||
@@ -431,6 +434,20 @@ long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
|
||||
return (nr_pages >> priority);
|
||||
}
|
||||
|
||||
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
|
||||
{
|
||||
unsigned long active;
|
||||
unsigned long inactive;
|
||||
|
||||
inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
|
||||
active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
|
||||
|
||||
if (inactive * memcg->inactive_ratio < active)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||
struct list_head *dst,
|
||||
unsigned long *scanned, int order,
|
||||
@@ -1360,6 +1377,29 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The inactive anon list should be small enough that the VM never has to
|
||||
* do too much work, but large enough that each inactive page has a chance
|
||||
* to be referenced again before it is swapped out.
|
||||
*
|
||||
* this calculation is straightforward porting from
|
||||
* page_alloc.c::setup_per_zone_inactive_ratio().
|
||||
* it describe more detail.
|
||||
*/
|
||||
static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg)
|
||||
{
|
||||
unsigned int gb, ratio;
|
||||
|
||||
gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30;
|
||||
if (gb)
|
||||
ratio = int_sqrt(10 * gb);
|
||||
else
|
||||
ratio = 1;
|
||||
|
||||
memcg->inactive_ratio = ratio;
|
||||
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(set_limit_mutex);
|
||||
|
||||
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
|
||||
@@ -1398,6 +1438,10 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
|
||||
GFP_KERNEL, false);
|
||||
if (!progress) retry_count--;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
mem_cgroup_set_inactive_ratio(memcg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1982,7 +2026,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||
res_counter_init(&mem->res, NULL);
|
||||
res_counter_init(&mem->memsw, NULL);
|
||||
}
|
||||
|
||||
mem_cgroup_set_inactive_ratio(mem);
|
||||
mem->last_scanned_child = NULL;
|
||||
|
||||
return &mem->css;
|
||||
|
Reference in New Issue
Block a user