
alloc_contig_range is supposed to work on max(MAX_ORDER_NR_PAGES, or pageblock_nr_pages) granularity aligned range. If it fails at a page and return error to user, user doesn't know what page makes the allocation failure and keep retrying another allocation with new range including the failed page and encountered error again and again until it could escape the out of the granularity block. Instead, let's make CMA aware of what pfn was troubled in previous trial and then continue to work new pageblock out of the failed page so it doesn't see the repeated error repeatedly. Currently, this option works for only __GFP_NORETRY case for safe for existing CMA users. Bug: 192475091 Signed-off-by: Minchan Kim <minchan@google.com> Change-Id: I0959c9df3d4b36408a68920abbb4d52d31026079
67 lines
1.7 KiB
C
67 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PAGEISOLATION_H
|
|
#define __LINUX_PAGEISOLATION_H
|
|
|
|
#ifdef CONFIG_MEMORY_ISOLATION
|
|
static inline bool has_isolate_pageblock(struct zone *zone)
|
|
{
|
|
return zone->nr_isolate_pageblock;
|
|
}
|
|
static inline bool is_migrate_isolate_page(struct page *page)
|
|
{
|
|
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
|
|
}
|
|
static inline bool is_migrate_isolate(int migratetype)
|
|
{
|
|
return migratetype == MIGRATE_ISOLATE;
|
|
}
|
|
#else
|
|
static inline bool has_isolate_pageblock(struct zone *zone)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_migrate_isolate_page(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_migrate_isolate(int migratetype)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
#define MEMORY_OFFLINE 0x1
|
|
#define REPORT_FAILURE 0x2
|
|
|
|
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
|
|
int migratetype, int flags);
|
|
void set_pageblock_migratetype(struct page *page, int migratetype);
|
|
int move_freepages_block(struct zone *zone, struct page *page,
|
|
int migratetype, int *num_movable);
|
|
|
|
/*
|
|
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
|
|
*/
|
|
int
|
|
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
unsigned migratetype, int flags,
|
|
unsigned long *failed_pfn);
|
|
|
|
/*
|
|
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
|
|
* target range is [start_pfn, end_pfn)
|
|
*/
|
|
void
|
|
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
unsigned migratetype);
|
|
|
|
/*
|
|
* Test all pages in [start_pfn, end_pfn) are isolated or not.
|
|
*/
|
|
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|
int isol_flags, unsigned long *failed_pfn);
|
|
|
|
struct page *alloc_migrate_target(struct page *page, unsigned long private);
|
|
|
|
#endif
|