mm: pass the vmem_altmap to arch_remove_memory and __remove_pages
We can just pass this on instead of having to do a radix tree lookup without proper locking 2 levels into the callchain. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Tento commit je obsažen v:

odevzdal
Dan Williams

rodič
7b73d978a5
revize
da024512a1
@@ -663,7 +663,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size)
|
||||
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
@@ -671,7 +671,7 @@ int arch_remove_memory(u64 start, u64 size)
|
||||
int ret;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
ret = __remove_pages(zone, start_pfn, nr_pages);
|
||||
ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
if (ret)
|
||||
pr_warn("%s: Problem encountered in __remove_pages() as"
|
||||
" ret=%d\n", __func__, ret);
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele