Merge 5.4-rc1-prelrease into android-mainline

To make the 5.4-rc1 merge easier, merge at a prerelease point in time
before the final release happens.

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: If613d657fd0abf9910c5bf3435a745f01b89765e
This commit is contained in:
Greg Kroah-Hartman
2019-10-02 17:58:47 +02:00
702 changed files with 16343 additions and 10116 deletions

View File

@@ -682,6 +682,7 @@ out:
void free_compound_page(struct page *page)
{
mem_cgroup_uncharge(page);
__free_pages_ok(page, compound_order(page));
}
@@ -3967,16 +3968,24 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
goto check_priority;
/*
* make sure the compaction wasn't deferred or didn't bail out early
* due to locks contention before we declare that we should give up.
* But do not retry if the given zonelist is not suitable for
* compaction.
* compaction was skipped because there are not enough order-0 pages
* to work with, so we retry only if it looks like reclaim can help.
*/
if (compaction_withdrawn(compact_result)) {
if (compaction_needs_reclaim(compact_result)) {
ret = compaction_zonelist_suitable(ac, order, alloc_flags);
goto out;
}
/*
* make sure the compaction wasn't deferred or didn't bail out early
* due to locks contention before we declare that we should give up.
* But the next retry should use a higher priority if allowed, so
* we don't just keep bailing out endlessly.
*/
if (compaction_withdrawn(compact_result)) {
goto check_priority;
}
/*
* !costly requests are much more important than __GFP_RETRY_MAYFAIL
* costly ones because they are de facto nofail and invoke OOM
@@ -6650,9 +6659,11 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{
spin_lock_init(&pgdat->split_queue_lock);
INIT_LIST_HEAD(&pgdat->split_queue);
pgdat->split_queue_len = 0;
struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
spin_lock_init(&ds_queue->split_queue_lock);
INIT_LIST_HEAD(&ds_queue->split_queue);
ds_queue->split_queue_len = 0;
}
#else
static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
@@ -8213,7 +8224,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable;
skip_pages = (1 << compound_order(head)) - (page - head);
skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1;
continue;
}