ANDROID: mm: lru_cache_disable skips lru cache drainnig

lru_cache_disable is not trivial cost since it should run work
from every cores in the system. Thus, repeated call of the
function whenever alloc_contig_range in the cma's allocation loop
is called is expensive.

This patch makes the lru_cache_disable smarter in that it will
not run __lru_add_drain_all since it knows the cache was already
disabled by someone else.
With that, user of alloc_contig_range can disable the lru cache
in advance in their context so that subsequent alloc_contig_range
for user's operation will avoid the costly function call.

This patch moves lru_cache APIs from swap.h to swap.c and export
it for vendor users.

Bug: 192475091
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: I23da8599c55db49dc80226285972e4cd80dedcff
This commit is contained in:
Minchan Kim
2021-07-13 12:24:14 -07:00
committed by Todd Kjos
parent c01ce3b5ef
commit c8578a3e90
2 changed files with 22 additions and 14 deletions

View File

@@ -342,19 +342,9 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void mark_page_accessed(struct page *);
extern atomic_t lru_disable_count;
static inline bool lru_cache_disabled(void)
{
return atomic_read(&lru_disable_count);
}
static inline void lru_cache_enable(void)
{
atomic_dec(&lru_disable_count);
}
extern bool lru_cache_disabled(void);
extern void lru_cache_disable(void);
extern void lru_cache_enable(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);

View File

@@ -924,7 +924,18 @@ void lru_add_drain_all(void)
}
#endif /* CONFIG_SMP */
atomic_t lru_disable_count = ATOMIC_INIT(0);
static atomic_t lru_disable_count = ATOMIC_INIT(0);
bool lru_cache_disabled(void)
{
return atomic_read(&lru_disable_count) != 0;
}
void lru_cache_enable(void)
{
atomic_dec(&lru_disable_count);
}
EXPORT_SYMBOL_GPL(lru_cache_enable);
/*
* lru_cache_disable() needs to be called before we start compiling
@@ -936,7 +947,12 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
*/
void lru_cache_disable(void)
{
atomic_inc(&lru_disable_count);
/*
* If someone is already disabled lru_cache, just return with
* increasing the lru_disable_count.
*/
if (atomic_inc_not_zero(&lru_disable_count))
return;
#ifdef CONFIG_SMP
/*
* lru_add_drain_all in the force mode will schedule draining on
@@ -950,7 +966,9 @@ void lru_cache_disable(void)
#else
lru_add_drain();
#endif
atomic_inc(&lru_disable_count);
}
EXPORT_SYMBOL_GPL(lru_cache_disable);
/**
* release_pages - batched put_page()