
alloc_contig_range is the core worker function for CMA allocation so it has every information to be able to understand allocation latency. For example, how many pages are migrated, how many time unmap was needed to migrate pages, how many times it encountered errors by some reasons. This patch adds such statistics in the alloc_contig_range and return it to user so user can use those information to analyize latency. The cma_alloc is first user for the statistics, which export the statistics as new trace event(i.e., cma_alloc_info). It was really usefuli to optimize cma allocation work. Bug: 192475091 Signed-off-by: Minchan Kim <minchan@google.com> Change-Id: I7be43cc89d11078e2a324d2d06aada6d8e9e1cc9
62 lines
1.8 KiB
C
62 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __CMA_H__
|
|
#define __CMA_H__
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
#include <linux/numa.h>
|
|
|
|
/*
|
|
* There is always at least global CMA area and a few optional
|
|
* areas configured in kernel .config.
|
|
*/
|
|
#ifdef CONFIG_CMA_AREAS
|
|
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
|
|
|
#else
|
|
#define MAX_CMA_AREAS (0)
|
|
|
|
#endif
|
|
|
|
#define CMA_MAX_NAME 64
|
|
|
|
struct cma;
|
|
|
|
struct cma_alloc_info {
|
|
unsigned long nr_migrated;
|
|
unsigned long nr_reclaimed;
|
|
unsigned long nr_mapped;
|
|
unsigned int nr_isolate_fail;
|
|
unsigned int nr_migrate_fail;
|
|
unsigned int nr_test_fail;
|
|
};
|
|
|
|
extern unsigned long totalcma_pages;
|
|
extern phys_addr_t cma_get_base(const struct cma *cma);
|
|
extern unsigned long cma_get_size(const struct cma *cma);
|
|
extern const char *cma_get_name(const struct cma *cma);
|
|
|
|
extern int __init cma_declare_contiguous_nid(phys_addr_t base,
|
|
phys_addr_t size, phys_addr_t limit,
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
bool fixed, const char *name, struct cma **res_cma,
|
|
int nid);
|
|
static inline int __init cma_declare_contiguous(phys_addr_t base,
|
|
phys_addr_t size, phys_addr_t limit,
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
bool fixed, const char *name, struct cma **res_cma)
|
|
{
|
|
return cma_declare_contiguous_nid(base, size, limit, alignment,
|
|
order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
|
|
}
|
|
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
unsigned int order_per_bit,
|
|
const char *name,
|
|
struct cma **res_cma);
|
|
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
gfp_t gfp_mask);
|
|
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
|
|
|
|
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
|
|
#endif
|