ANDROID: zram: allow zram to allocate CMA pages
Though zram pages are movable, they aren't allowed to enter MIGRATE_CMA pageblocks. zram is not seen to pin pages for long which can cause an issue. Moreover allowing zram to pick CMA pages can be helpful in cases seen where zram order 0 alloc fails when there are lots of free cma pages, resulting in kswapd or direct reclaim not making enough progress. Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org> Bug: 158645321 Link: https://lore.kernel.org/linux-mm/4c77bb100706b714213ff840d827a48e40ac9177.1604282969.git.cgoldswo@codeaurora.org/ Signed-off-by: Suren Baghdasaryan <surenb@google.com> Change-Id: I406f92a4175367caec38ef8b8eaca7020ae09917
This commit is contained in:

committed by
Suren Baghdasaryan

parent
cf8f7947f2
commit
4f9d16a68d
@@ -347,7 +347,7 @@ static void destroy_cache(struct zs_pool *pool)
|
||||
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
|
||||
{
|
||||
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
|
||||
gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
|
||||
gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
|
||||
}
|
||||
|
||||
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
|
||||
@@ -358,7 +358,7 @@ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
|
||||
static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
|
||||
{
|
||||
return kmem_cache_alloc(pool->zspage_cachep,
|
||||
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
|
||||
flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
|
||||
}
|
||||
|
||||
static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
|
||||
|
Reference in New Issue
Block a user