Merge 5.5-rc7 into android-mainline

Linux 5.5-rc7

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ibda9b40265c1a8e76cb8eb58107312438ecf687b
This commit is contained in:
Greg Kroah-Hartman
2020-01-20 11:57:16 +01:00
291개의 변경된 파일2317개의 추가작업 그리고 2228개의 파일을 삭제

파일 보기

@@ -706,34 +706,27 @@ void prep_compound_page(struct page *page, unsigned int order)
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
#else
bool _debug_pagealloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
#endif
EXPORT_SYMBOL(_debug_pagealloc_enabled);
DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static int __init early_debug_pagealloc(char *buf)
{
bool enable = false;
if (kstrtobool(buf, &enable))
return -EINVAL;
if (enable)
static_branch_enable(&_debug_pagealloc_enabled);
return 0;
return kstrtobool(buf, &_debug_pagealloc_enabled_early);
}
early_param("debug_pagealloc", early_debug_pagealloc);
static void init_debug_guardpage(void)
void init_debug_pagealloc(void)
{
if (!debug_pagealloc_enabled())
return;
static_branch_enable(&_debug_pagealloc_enabled);
if (!debug_guardpage_minorder())
return;
@@ -1198,7 +1191,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
*/
arch_free_page(page, order);
if (debug_pagealloc_enabled())
if (debug_pagealloc_enabled_static())
kernel_map_pages(page, 1 << order, 0);
kasan_free_nondeferred_pages(page, order);
@@ -1219,7 +1212,7 @@ static bool free_pcp_prepare(struct page *page)
static bool bulkfree_pcp_prepare(struct page *page)
{
if (debug_pagealloc_enabled())
if (debug_pagealloc_enabled_static())
return free_pages_check(page);
else
return false;
@@ -1233,7 +1226,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
*/
static bool free_pcp_prepare(struct page *page)
{
if (debug_pagealloc_enabled())
if (debug_pagealloc_enabled_static())
return free_pages_prepare(page, 0, true);
else
return free_pages_prepare(page, 0, false);
@@ -1985,10 +1978,6 @@ void __init page_alloc_init_late(void)
for_each_populated_zone(zone)
set_zone_contiguous(zone);
#ifdef CONFIG_DEBUG_PAGEALLOC
init_debug_guardpage();
#endif
}
#ifdef CONFIG_CMA
@@ -2118,7 +2107,7 @@ static inline bool free_pages_prezeroed(void)
*/
static inline bool check_pcp_refill(struct page *page)
{
if (debug_pagealloc_enabled())
if (debug_pagealloc_enabled_static())
return check_new_page(page);
else
return false;
@@ -2140,7 +2129,7 @@ static inline bool check_pcp_refill(struct page *page)
}
static inline bool check_new_pcp(struct page *page)
{
if (debug_pagealloc_enabled())
if (debug_pagealloc_enabled_static())
return check_new_page(page);
else
return false;
@@ -2167,7 +2156,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_refcounted(page);
arch_alloc_page(page, order);
if (debug_pagealloc_enabled())
if (debug_pagealloc_enabled_static())
kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
kernel_poison_pages(page, 1 << order, 1);
@@ -4488,8 +4477,11 @@ retry_cpuset:
if (page)
goto got_pg;
if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
!(gfp_mask & __GFP_RETRY_MAYFAIL)) {
/*
* Checks for costly allocations with __GFP_NORETRY, which
* includes some THP page fault allocations
*/
if (costly_order && (gfp_mask & __GFP_NORETRY)) {
/*
* If allocating entire pageblock(s) and compaction
* failed because all zones are below low watermarks
@@ -4510,23 +4502,6 @@ retry_cpuset:
if (compact_result == COMPACT_SKIPPED ||
compact_result == COMPACT_DEFERRED)
goto nopage;
}
/*
* Checks for costly allocations with __GFP_NORETRY, which
* includes THP page fault allocations
*/
if (costly_order && (gfp_mask & __GFP_NORETRY)) {
/*
* If compaction is deferred for high-order allocations,
* it is because sync compaction recently failed. If
* this is the case and the caller requested a THP
* allocation, we do not want to heavily disrupt the
* system, so we fail the allocation instead of entering
* direct reclaim.
*/
if (compact_result == COMPACT_DEFERRED)
goto nopage;
/*
* Looks like reclaim/compaction is worth trying, but