Merge a45ad71e89 ("Merge tag 'rproc-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc") into android-mainline

Another "small" merge point to handle conflicts in a sane way.

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I5dc2f5f11275b29f3c9b5b8d4dd59864ceb6faf9
This commit is contained in:
Greg Kroah-Hartman
2020-02-08 13:32:37 +01:00
1376 changed files with 59912 additions and 18856 deletions

View File

@@ -5866,6 +5866,23 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
return false;
}
#ifdef CONFIG_SPARSEMEM
/* Skip PFNs that belong to non-present sections */
static inline __meminit unsigned long next_pfn(unsigned long pfn)
{
const unsigned long section_nr = pfn_to_section_nr(++pfn);
if (present_section_nr(section_nr))
return pfn;
return section_nr_to_pfn(next_present_section_nr(section_nr));
}
#else
static inline __meminit unsigned long next_pfn(unsigned long pfn)
{
return pfn++;
}
#endif
/*
* Initially all pages are reserved - free ones are freed
* up by memblock_free_all() once the early boot process is
@@ -5899,16 +5916,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
}
#endif
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
for (pfn = start_pfn; pfn < end_pfn; ) {
/*
* There can be holes in boot-time mem_map[]s handed to this
* function. They do not exist on hotplugged memory.
*/
if (context == MEMMAP_EARLY) {
if (!early_pfn_valid(pfn))
if (!early_pfn_valid(pfn)) {
pfn = next_pfn(pfn);
continue;
if (!early_pfn_in_nid(pfn, nid))
}
if (!early_pfn_in_nid(pfn, nid)) {
pfn++;
continue;
}
if (overlap_memmap_init(zone, &pfn))
continue;
if (defer_init(nid, pfn, end_pfn))
@@ -5936,16 +5957,17 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched();
}
pfn++;
}
}
#ifdef CONFIG_ZONE_DEVICE
void __ref memmap_init_zone_device(struct zone *zone,
unsigned long start_pfn,
unsigned long size,
unsigned long nr_pages,
struct dev_pagemap *pgmap)
{
unsigned long pfn, end_pfn = start_pfn + size;
unsigned long pfn, end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
struct vmem_altmap *altmap = pgmap_altmap(pgmap);
unsigned long zone_idx = zone_idx(zone);
@@ -5962,7 +5984,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
*/
if (altmap) {
start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
size = end_pfn - start_pfn;
nr_pages = end_pfn - start_pfn;
}
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
@@ -6009,7 +6031,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
}
pr_info("%s initialised %lu pages in %ums\n", __func__,
size, jiffies_to_msecs(jiffies - start));
nr_pages, jiffies_to_msecs(jiffies - start));
}
#endif
@@ -6908,10 +6930,10 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
/*
* Zero all valid struct pages in range [spfn, epfn), return number of struct
* pages zeroed
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
* PageReserved(). Return the number of struct pages that were initialized.
*/
static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
{
unsigned long pfn;
u64 pgcnt = 0;
@@ -6922,7 +6944,13 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
+ pageblock_nr_pages - 1;
continue;
}
mm_zero_struct_page(pfn_to_page(pfn));
/*
* Use a fake node/zone (0) for now. Some of these pages
* (in memblock.reserved but not in memblock.memory) will
* get re-initialized via reserve_bootmem_region() later.
*/
__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
__SetPageReserved(pfn_to_page(pfn));
pgcnt++;
}
@@ -6934,14 +6962,15 @@ static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
* initialized by going through __init_single_page(). But, there are some
* struct pages which are reserved in memblock allocator and their fields
* may be accessed (for example page_to_pfn() on some configuration accesses
* flags). We must explicitly zero those struct pages.
* flags). We must explicitly initialize those struct pages.
*
* This function also addresses a similar issue where struct pages are left
* uninitialized because the physical address range is not covered by
* memblock.memory or memblock.reserved. That could happen when memblock
* layout is manually configured via memmap=.
* layout is manually configured via memmap=, or when the highest physical
* address (max_pfn) does not end on a section boundary.
*/
void __init zero_resv_unavail(void)
static void __init init_unavailable_mem(void)
{
phys_addr_t start, end;
u64 i, pgcnt;
@@ -6954,10 +6983,20 @@ void __init zero_resv_unavail(void)
for_each_mem_range(i, &memblock.memory, NULL,
NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
if (next < start)
pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
pgcnt += init_unavailable_range(PFN_DOWN(next),
PFN_UP(start));
next = end;
}
pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
/*
* Early sections always have a fully populated memmap for the whole
* section - see pfn_valid(). If the last section has holes at the
* end and that section is marked "online", the memmap will be
* considered initialized. Make sure that memmap has a well defined
* state.
*/
pgcnt += init_unavailable_range(PFN_DOWN(next),
round_up(max_pfn, PAGES_PER_SECTION));
/*
* Struct pages that do not have backing memory. This could be because
@@ -6966,6 +7005,10 @@ void __init zero_resv_unavail(void)
if (pgcnt)
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
}
#else
static inline void __init init_unavailable_mem(void)
{
}
#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -7395,7 +7438,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
zero_resv_unavail();
init_unavailable_mem();
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
free_area_init_node(nid, NULL,
@@ -7590,7 +7633,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
void __init free_area_init(unsigned long *zones_size)
{
zero_resv_unavail();
init_unavailable_mem();
free_area_init_node(0, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
}
@@ -8177,20 +8220,22 @@ void *__init alloc_large_system_hash(const char *tablename,
/*
* This function checks whether pageblock includes unmovable pages or not.
* If @count is not zero, it is okay to include less @count unmovable pages
*
* PageLRU check without isolation or lru_lock could race so that
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
* check without lock_page also may miss some movable non-lru pages at
* race condition. So you can't expect this function should be exact.
*
* Returns a page without holding a reference. If the caller wants to
* dereference that page (e.g., dumping), it has to make sure that that it
* cannot get removed (e.g., via memory unplug) concurrently.
*
*/
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype, int flags)
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
int migratetype, int flags)
{
unsigned long found;
unsigned long iter = 0;
unsigned long pfn = page_to_pfn(page);
const char *reason = "unmovable page";
/*
* TODO we could make this much more efficient by not checking every
@@ -8207,22 +8252,19 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* so consider them movable here.
*/
if (is_migrate_cma(migratetype))
return false;
return NULL;
reason = "CMA page";
goto unmovable;
return page;
}
for (found = 0; iter < pageblock_nr_pages; iter++) {
unsigned long check = pfn + iter;
if (!pfn_valid_within(check))
for (; iter < pageblock_nr_pages; iter++) {
if (!pfn_valid_within(pfn + iter))
continue;
page = pfn_to_page(check);
page = pfn_to_page(pfn + iter);
if (PageReserved(page))
goto unmovable;
return page;
/*
* If the zone is movable and we have ruled out all reserved
@@ -8242,7 +8284,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
unsigned int skip_pages;
if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable;
return page;
skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1;
@@ -8268,11 +8310,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
continue;
if (__PageMovable(page))
if (__PageMovable(page) || PageLRU(page))
continue;
if (!PageLRU(page))
found++;
/*
* If there are RECLAIMABLE pages, we need to check
* it. But now, memory offline itself doesn't call
@@ -8286,15 +8326,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* is set to both of a memory hole page and a _used_ kernel
* page at boot.
*/
if (found > count)
goto unmovable;
return page;
}
return false;
unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
if (flags & REPORT_FAILURE)
dump_page(pfn_to_page(pfn + iter), reason);
return true;
return NULL;
}
#ifdef CONFIG_CONTIG_ALLOC
@@ -8698,10 +8732,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
BUG_ON(!PageBuddy(page));
order = page_order(page);
offlined_pages += 1 << order;
#ifdef CONFIG_DEBUG_VM
pr_info("remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn);
#endif
del_page_from_free_area(page, &zone->free_area[order]);
pfn += (1 << order);
}