mm: pass the vmem_altmap to vmemmap_free

We can just pass this on instead of having to do a radix tree lookup
without proper locking a few levels into the callchain.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Christoph Hellwig
2017-12-29 08:53:56 +01:00
committed by Dan Williams
parent da024512a1
commit 24b6d41643
10 changed files with 68 additions and 51 deletions

View File

@@ -685,12 +685,13 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
/* This will make the necessary allocations eventually. */
return sparse_mem_map_populate(pnum, nid, altmap);
}
static void __kfree_section_memmap(struct page *memmap)
static void __kfree_section_memmap(struct page *memmap,
struct vmem_altmap *altmap)
{
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
vmemmap_free(start, end);
vmemmap_free(start, end, altmap);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static void free_map_bootmem(struct page *memmap)
@@ -698,7 +699,7 @@ static void free_map_bootmem(struct page *memmap)
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
vmemmap_free(start, end);
vmemmap_free(start, end, NULL);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#else
@@ -729,7 +730,8 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
return __kmalloc_section_memmap();
}
static void __kfree_section_memmap(struct page *memmap)
static void __kfree_section_memmap(struct page *memmap,
struct vmem_altmap *altmap)
{
if (is_vmalloc_addr(memmap))
vfree(memmap);
@@ -798,7 +800,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
return -ENOMEM;
usemap = __kmalloc_section_usemap();
if (!usemap) {
__kfree_section_memmap(memmap);
__kfree_section_memmap(memmap, altmap);
return -ENOMEM;
}
@@ -820,7 +822,7 @@ out:
pgdat_resize_unlock(pgdat, &flags);
if (ret <= 0) {
kfree(usemap);
__kfree_section_memmap(memmap);
__kfree_section_memmap(memmap, altmap);
}
return ret;
}
@@ -847,7 +849,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
}
#endif
static void free_section_usemap(struct page *memmap, unsigned long *usemap)
static void free_section_usemap(struct page *memmap, unsigned long *usemap,
struct vmem_altmap *altmap)
{
struct page *usemap_page;
@@ -861,7 +864,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
kfree(usemap);
if (memmap)
__kfree_section_memmap(memmap);
__kfree_section_memmap(memmap, altmap);
return;
}
@@ -875,7 +878,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
}
void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset)
unsigned long map_offset, struct vmem_altmap *altmap)
{
struct page *memmap = NULL;
unsigned long *usemap = NULL, flags;
@@ -893,7 +896,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
clear_hwpoisoned_pages(memmap + map_offset,
PAGES_PER_SECTION - map_offset);
free_section_usemap(memmap, usemap);
free_section_usemap(memmap, usemap, altmap);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_MEMORY_HOTPLUG */