ANDROID: mm: add cma allocation statistics

alloc_contig_range is the core worker function for CMA allocation
so it has every information to be able to understand allocation
latency. For example, how many pages are migrated, how many time
unmap was needed to migrate pages, how many times it encountered
errors by some reasons.

This patch adds such statistics in the alloc_contig_range and
return it to user so user can use those information to analyize
latency. The cma_alloc is first user for the statistics, which
export the statistics as new trace event(i.e., cma_alloc_info).

It was really usefuli to optimize cma allocation work.

Bug: 192475091
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: I7be43cc89d11078e2a324d2d06aada6d8e9e1cc9
This commit is contained in:
Minchan Kim
2021-06-29 11:28:13 -07:00
committed by Todd Kjos
parent b1e4543c27
commit 675e504598
6 changed files with 105 additions and 8 deletions

View File

@@ -1307,11 +1307,12 @@ static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count; const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
unsigned long start_pfn; unsigned long start_pfn;
int rc; int rc;
struct acr_info dummy;
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->subblock_size); sb_id * vm->subblock_size);
rc = alloc_contig_range(start_pfn, start_pfn + nr_pages, rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
MIGRATE_MOVABLE, GFP_KERNEL); MIGRATE_MOVABLE, GFP_KERNEL, &dummy);
if (rc == -ENOMEM) if (rc == -ENOMEM)
/* whoops, out of memory */ /* whoops, out of memory */
return rc; return rc;

View File

@@ -22,6 +22,15 @@
struct cma; struct cma;
struct cma_alloc_info {
unsigned long nr_migrated;
unsigned long nr_reclaimed;
unsigned long nr_mapped;
unsigned int nr_isolate_fail;
unsigned int nr_migrate_fail;
unsigned int nr_test_fail;
};
extern unsigned long totalcma_pages; extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma); extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma); extern unsigned long cma_get_size(const struct cma *cma);

View File

@@ -642,9 +642,21 @@ static inline bool pm_suspended_storage(void)
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
#define ACR_ERR_ISOLATE (1 << 0)
#define ACR_ERR_MIGRATE (1 << 1)
#define ACR_ERR_TEST (1 << 2)
struct acr_info {
unsigned long nr_mapped;
unsigned long nr_migrated;
unsigned long nr_reclaimed;
unsigned int err;
};
/* The below functions must be run on a range from a single zone. */ /* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end, extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask); unsigned migratetype, gfp_t gfp_mask,
struct acr_info *info);
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask); int nid, nodemask_t *nodemask);
#endif #endif

View File

@@ -91,6 +91,51 @@ TRACE_EVENT(cma_alloc_start,
__entry->align) __entry->align)
); );
TRACE_EVENT(cma_alloc_info,
TP_PROTO(const char *name, const struct page *page, unsigned int count, unsigned int align, struct cma_alloc_info *info),
TP_ARGS(name, page, count, align, info),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned long, pfn)
__field(unsigned int, count)
__field(unsigned int, align)
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_reclaimed)
__field(unsigned long, nr_mapped)
__field(unsigned int, err_iso)
__field(unsigned int, err_mig)
__field(unsigned int, err_test)
),
TP_fast_assign(
__assign_str(name, name);
__entry->pfn = page ? page_to_pfn(page) : -1;
__entry->count = count;
__entry->align = align;
__entry->nr_migrated = info->nr_migrated;
__entry->nr_reclaimed = info->nr_reclaimed;
__entry->nr_mapped = info->nr_mapped;
__entry->err_iso = info->nr_isolate_fail;
__entry->err_mig = info->nr_migrate_fail;
__entry->err_test = info->nr_test_fail;
),
TP_printk("name=%s pfn=0x%lx count=%u align=%u nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu err_iso=%u err_mig=%u err_test=%u",
__get_str(name),
__entry->pfn,
__entry->count,
__entry->align,
__entry->nr_migrated,
__entry->nr_reclaimed,
__entry->nr_mapped,
__entry->err_iso,
__entry->err_mig,
__entry->err_test)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc_finish, DEFINE_EVENT(cma_alloc_class, cma_alloc_finish,
TP_PROTO(const char *name, unsigned long pfn, const struct page *page, TP_PROTO(const char *name, unsigned long pfn, const struct page *page,

View File

@@ -443,6 +443,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
int num_attempts = 0; int num_attempts = 0;
int max_retries = 5; int max_retries = 5;
s64 ts; s64 ts;
struct cma_alloc_info cma_info = {0};
trace_android_vh_cma_alloc_start(&ts); trace_android_vh_cma_alloc_start(&ts);
@@ -466,6 +467,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
goto out; goto out;
for (;;) { for (;;) {
struct acr_info info = {0};
mutex_lock(&cma->lock); mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask, bitmap_maxno, start, bitmap_count, mask,
@@ -503,7 +506,18 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
mutex_unlock(&cma->lock); mutex_unlock(&cma->lock);
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info);
cma_info.nr_migrated += info.nr_migrated;
cma_info.nr_reclaimed += info.nr_reclaimed;
cma_info.nr_mapped += info.nr_mapped;
if (info.err) {
if (info.err & ACR_ERR_ISOLATE)
cma_info.nr_isolate_fail++;
if (info.err & ACR_ERR_MIGRATE)
cma_info.nr_migrate_fail++;
if (info.err & ACR_ERR_TEST)
cma_info.nr_test_fail++;
}
if (ret == 0) { if (ret == 0) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
break; break;
@@ -523,6 +537,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
} }
trace_cma_alloc_finish(cma->name, pfn, page, count, align); trace_cma_alloc_finish(cma->name, pfn, page, count, align);
trace_cma_alloc_info(cma->name, page, count, align, &cma_info);
/* /*
* CMA can allocate multiple page blocks, which results in different * CMA can allocate multiple page blocks, which results in different

View File

@@ -8586,7 +8586,8 @@ static inline void alloc_contig_dump_pages(struct list_head *page_list)
/* [start, end) must belong to a single zone. */ /* [start, end) must belong to a single zone. */
static int __alloc_contig_migrate_range(struct compact_control *cc, static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end) unsigned long start, unsigned long end,
struct acr_info *info)
{ {
/* This function is based on compact_zone() from compaction.c. */ /* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed; unsigned int nr_reclaimed;
@@ -8594,6 +8595,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned int tries = 0; unsigned int tries = 0;
unsigned int max_tries = 5; unsigned int max_tries = 5;
int ret = 0; int ret = 0;
struct page *page;
struct migration_target_control mtc = { struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone), .nid = zone_to_nid(cc->zone),
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
@@ -8625,10 +8627,16 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
&cc->migratepages); &cc->migratepages);
info->nr_reclaimed += nr_reclaimed;
cc->nr_migratepages -= nr_reclaimed; cc->nr_migratepages -= nr_reclaimed;
list_for_each_entry(page, &cc->migratepages, lru)
info->nr_mapped += page_mapcount(page);
ret = migrate_pages(&cc->migratepages, alloc_migration_target, ret = migrate_pages(&cc->migratepages, alloc_migration_target,
NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE); NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
if (!ret)
info->nr_migrated += cc->nr_migratepages;
} }
lru_cache_enable(); lru_cache_enable();
@@ -8637,7 +8645,9 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
alloc_contig_dump_pages(&cc->migratepages); alloc_contig_dump_pages(&cc->migratepages);
page_pinner_mark_migration_failed_pages(&cc->migratepages); page_pinner_mark_migration_failed_pages(&cc->migratepages);
} }
putback_movable_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages);
info->err |= ACR_ERR_MIGRATE;
return ret; return ret;
} }
return 0; return 0;
@@ -8665,7 +8675,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
* need to be freed with free_contig_range(). * need to be freed with free_contig_range().
*/ */
int alloc_contig_range(unsigned long start, unsigned long end, int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask) unsigned migratetype, gfp_t gfp_mask,
struct acr_info *info)
{ {
unsigned long outer_start, outer_end; unsigned long outer_start, outer_end;
unsigned int order; unsigned int order;
@@ -8709,8 +8720,10 @@ int alloc_contig_range(unsigned long start, unsigned long end,
ret = start_isolate_page_range(pfn_max_align_down(start), ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype, 0); pfn_max_align_up(end), migratetype, 0);
if (ret) if (ret) {
info->err |= ACR_ERR_ISOLATE;
return ret; return ret;
}
drain_all_pages(cc.zone); drain_all_pages(cc.zone);
@@ -8724,7 +8737,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
* allocated. So, if we fall through be sure to clear ret so that * allocated. So, if we fall through be sure to clear ret so that
* -EBUSY is not accidentally used or returned to caller. * -EBUSY is not accidentally used or returned to caller.
*/ */
ret = __alloc_contig_migrate_range(&cc, start, end); ret = __alloc_contig_migrate_range(&cc, start, end, info);
if (ret && ret != -EBUSY) if (ret && ret != -EBUSY)
goto done; goto done;
ret =0; ret =0;
@@ -8774,6 +8787,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end); __func__, outer_start, end);
ret = -EBUSY; ret = -EBUSY;
info->err |= ACR_ERR_TEST;
goto done; goto done;
} }
@@ -8800,10 +8814,11 @@ EXPORT_SYMBOL(alloc_contig_range);
static int __alloc_contig_pages(unsigned long start_pfn, static int __alloc_contig_pages(unsigned long start_pfn,
unsigned long nr_pages, gfp_t gfp_mask) unsigned long nr_pages, gfp_t gfp_mask)
{ {
struct acr_info dummy;
unsigned long end_pfn = start_pfn + nr_pages; unsigned long end_pfn = start_pfn + nr_pages;
return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
gfp_mask); gfp_mask, &dummy);
} }
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,