memcg: better migration handling
This patch changes page migration under memory controller to use a different algorithm. (thanks to Christoph for new idea.) Before: - page_cgroup is migrated from an old page to a new page. After: - a new page is accounted , no reuse of page_cgroup. Pros: - We can avoid compliated lock depndencies and races in migration. Cons: - new param to mem_cgroup_charge_common(). - mem_cgroup_getref() is added for handling ref_cnt ping-pong. This version simplifies complicated lock dependency in page migraiton under memory resource controller. new refcnt sequence is following. a mapped page: prepage_migration() ..... +1 to NEW page try_to_unmap() ..... all refs to OLD page is gone. move_pages() ..... +1 to NEW page if page cache. remap... ..... all refs from *map* is added to NEW one. end_migration() ..... -1 to New page. page's mapcount + (page_is_cache) refs are added to NEW one. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Pavel Emelyanov <xemul@openvz.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
508b7be0a5
commit
e8589cc189
@@ -50,9 +50,10 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
|
||||
#define mm_match_cgroup(mm, cgroup) \
|
||||
((cgroup) == mem_cgroup_from_task((mm)->owner))
|
||||
|
||||
extern int mem_cgroup_prepare_migration(struct page *page);
|
||||
extern int
|
||||
mem_cgroup_prepare_migration(struct page *page, struct page *newpage);
|
||||
extern void mem_cgroup_end_migration(struct page *page);
|
||||
extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
|
||||
extern int mem_cgroup_getref(struct page *page);
|
||||
|
||||
/*
|
||||
* For memory reclaim.
|
||||
@@ -112,7 +113,8 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int mem_cgroup_prepare_migration(struct page *page)
|
||||
static inline int
|
||||
mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -121,8 +123,7 @@ static inline void mem_cgroup_end_migration(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
mem_cgroup_page_migration(struct page *page, struct page *newpage)
|
||||
static inline void mem_cgroup_getref(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user