1
0

Merge branch 'memblock-kill-early_node_map' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/memblock

Este cometimento está contido em:
Ingo Molnar
2011-12-20 12:14:26 +01:00
ascendente 511585a28e 7bd0b0f0da
cometimento 45aa0663cc
75 ficheiros modificados com 1073 adições e 1788 eliminações

Ver ficheiro

@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_MEMTEST) += memtest.o

Ver ficheiro

@@ -67,7 +67,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
good_end = max_pfn_mapped << PAGE_SHIFT;
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
if (!base)
panic("Cannot find space for the kernel page tables");
pgt_buf_start = base >> PAGE_SHIFT;
@@ -80,7 +80,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
void __init native_pagetable_reserve(u64 start, u64 end)
{
memblock_x86_reserve_range(start, end, "PGTABLE");
memblock_reserve(start, end - start);
}
struct map_range {
@@ -279,8 +279,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
* so that they can be reused for other purposes.
*
* On native it just means calling memblock_x86_reserve_range, on Xen it
* also means marking RW the pagetable pages that we allocated before
* On native it just means calling memblock_reserve, on Xen it also
* means marking RW the pagetable pages that we allocated before
* but that haven't been used.
*
* In fact on xen we mark RO the whole range pgt_buf_start -

Ver ficheiro

@@ -427,23 +427,17 @@ static void __init add_one_highpage_init(struct page *page)
void __init add_highpages_with_active_regions(int nid,
unsigned long start_pfn, unsigned long end_pfn)
{
struct range *range;
int nr_range;
int i;
phys_addr_t start, end;
u64 i;
nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
for (i = 0; i < nr_range; i++) {
struct page *page;
int node_pfn;
for (node_pfn = range[i].start; node_pfn < range[i].end;
node_pfn++) {
if (!pfn_valid(node_pfn))
continue;
page = pfn_to_page(node_pfn);
add_one_highpage_init(page);
}
for_each_free_mem_range(i, nid, &start, &end, NULL) {
unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
start_pfn, end_pfn);
unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
start_pfn, end_pfn);
for ( ; pfn < e_pfn; pfn++)
if (pfn_valid(pfn))
add_one_highpage_init(pfn_to_page(pfn));
}
}
#else
@@ -650,18 +644,18 @@ void __init initmem_init(void)
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn)
highstart_pfn = max_low_pfn;
memblock_x86_register_active_regions(0, 0, highend_pfn);
sparse_memory_present_with_active_regions(0);
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
num_physpages = highend_pfn;
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
memblock_x86_register_active_regions(0, 0, max_low_pfn);
sparse_memory_present_with_active_regions(0);
num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_FLATMEM
max_mapnr = num_physpages;
#endif

Ver ficheiro

@@ -608,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
memblock_x86_register_active_regions(0, 0, max_pfn);
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
}
#endif

Ver ficheiro

@@ -1,348 +0,0 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/range.h>
/* Check for already reserved areas */
bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
{
struct memblock_region *r;
u64 addr = *addrp, last;
u64 size = *sizep;
bool changed = false;
again:
last = addr + size;
for_each_memblock(reserved, r) {
if (last > r->base && addr < r->base) {
size = r->base - addr;
changed = true;
goto again;
}
if (last > (r->base + r->size) && addr < (r->base + r->size)) {
addr = round_up(r->base + r->size, align);
size = last - addr;
changed = true;
goto again;
}
if (last <= (r->base + r->size) && addr >= r->base) {
*sizep = 0;
return false;
}
}
if (changed) {
*addrp = addr;
*sizep = size;
}
return changed;
}
/*
* Find next free range after start, and size is returned in *sizep
*/
u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
{
struct memblock_region *r;
for_each_memblock(memory, r) {
u64 ei_start = r->base;
u64 ei_last = ei_start + r->size;
u64 addr;
addr = round_up(ei_start, align);
if (addr < start)
addr = round_up(start, align);
if (addr >= ei_last)
continue;
*sizep = ei_last - addr;
while (memblock_x86_check_reserved_size(&addr, sizep, align))
;
if (*sizep)
return addr;
}
return MEMBLOCK_ERROR;
}
static __init struct range *find_range_array(int count)
{
u64 end, size, mem;
struct range *range;
size = sizeof(struct range) * count;
end = memblock.current_limit;
mem = memblock_find_in_range(0, end, size, sizeof(struct range));
if (mem == MEMBLOCK_ERROR)
panic("can not find more space for range array");
/*
* This range is tempoaray, so don't reserve it, it will not be
* overlapped because We will not alloccate new buffer before
* We discard this one
*/
range = __va(mem);
memset(range, 0, size);
return range;
}
static void __init memblock_x86_subtract_reserved(struct range *range, int az)
{
u64 final_start, final_end;
struct memblock_region *r;
/* Take out region array itself at first*/
memblock_free_reserved_regions();
memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
for_each_memblock(reserved, r) {
memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
final_start = PFN_DOWN(r->base);
final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
subtract_range(range, az, final_start, final_end);
}
/* Put region array back ? */
memblock_reserve_reserved_regions();
}
struct count_data {
int nr;
};
static int __init count_work_fn(unsigned long start_pfn,
unsigned long end_pfn, void *datax)
{
struct count_data *data = datax;
data->nr++;
return 0;
}
static int __init count_early_node_map(int nodeid)
{
struct count_data data;
data.nr = 0;
work_with_active_regions(nodeid, count_work_fn, &data);
return data.nr;
}
int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
unsigned long start_pfn, unsigned long end_pfn)
{
int count;
struct range *range;
int nr_range;
count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
range = find_range_array(count);
nr_range = 0;
/*
* Use early_node_map[] and memblock.reserved.region to get range array
* at first
*/
nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
subtract_range(range, count, 0, start_pfn);
subtract_range(range, count, end_pfn, -1ULL);
memblock_x86_subtract_reserved(range, count);
nr_range = clean_sort_range(range, count);
*rangep = range;
return nr_range;
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
unsigned long end_pfn = -1UL;
#ifdef CONFIG_X86_32
end_pfn = max_low_pfn;
#endif
return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
}
static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
{
int i, count;
struct range *range;
int nr_range;
u64 final_start, final_end;
u64 free_size;
struct memblock_region *r;
count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
range = find_range_array(count);
nr_range = 0;
addr = PFN_UP(addr);
limit = PFN_DOWN(limit);
for_each_memblock(memory, r) {
final_start = PFN_UP(r->base);
final_end = PFN_DOWN(r->base + r->size);
if (final_start >= final_end)
continue;
if (final_start >= limit || final_end <= addr)
continue;
nr_range = add_range(range, count, nr_range, final_start, final_end);
}
subtract_range(range, count, 0, addr);
subtract_range(range, count, limit, -1ULL);
/* Subtract memblock.reserved.region in range ? */
if (!get_free)
goto sort_and_count_them;
for_each_memblock(reserved, r) {
final_start = PFN_DOWN(r->base);
final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
if (final_start >= limit || final_end <= addr)
continue;
subtract_range(range, count, final_start, final_end);
}
sort_and_count_them:
nr_range = clean_sort_range(range, count);
free_size = 0;
for (i = 0; i < nr_range; i++)
free_size += range[i].end - range[i].start;
return free_size << PAGE_SHIFT;
}
u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
{
return __memblock_x86_memory_in_range(addr, limit, true);
}
u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
{
return __memblock_x86_memory_in_range(addr, limit, false);
}
void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
return;
memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
memblock_reserve(start, end - start);
}
void __init memblock_x86_free_range(u64 start, u64 end)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
return;
memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
memblock_free(start, end - start);
}
/*
* Need to call this function after memblock_x86_register_active_regions,
* so early_node_map[] is filled already.
*/
u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
{
u64 addr;
addr = find_memory_core_early(nid, size, align, start, end);
if (addr != MEMBLOCK_ERROR)
return addr;
/* Fallback, should already have start end within node range */
return memblock_find_in_range(start, end, size, align);
}
/*
* Finds an active region in the address range from start_pfn to last_pfn and
* returns its range in ei_startpfn and ei_endpfn for the memblock entry.
*/
static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
unsigned long start_pfn,
unsigned long last_pfn,
unsigned long *ei_startpfn,
unsigned long *ei_endpfn)
{
u64 align = PAGE_SIZE;
*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
/* Skip map entries smaller than a page */
if (*ei_startpfn >= *ei_endpfn)
return 0;
/* Skip if map is outside the node */
if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
return 0;
/* Check for overlaps */
if (*ei_startpfn < start_pfn)
*ei_startpfn = start_pfn;
if (*ei_endpfn > last_pfn)
*ei_endpfn = last_pfn;
return 1;
}
/* Walk the memblock.memory map and register active regions within a node */
void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn)
{
unsigned long ei_startpfn;
unsigned long ei_endpfn;
struct memblock_region *r;
for_each_memblock(memory, r)
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
&ei_startpfn, &ei_endpfn))
add_active_range(nid, ei_startpfn, ei_endpfn);
}
/*
* Find the hole size (in bytes) in the memory range.
* @start: starting address of the memory range to scan
* @end: ending address of the memory range to scan
*/
u64 __init memblock_x86_hole_size(u64 start, u64 end)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long last_pfn = end >> PAGE_SHIFT;
unsigned long ei_startpfn, ei_endpfn, ram = 0;
struct memblock_region *r;
for_each_memblock(memory, r)
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
&ei_startpfn, &ei_endpfn))
ram += ei_endpfn - ei_startpfn;
return end - start - ((u64)ram << PAGE_SHIFT);
}

Ver ficheiro

@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
(unsigned long long) pattern,
(unsigned long long) start_bad,
(unsigned long long) end_bad);
memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
memblock_reserve(start_bad, end_bad - start_bad);
}
static void __init memtest(u64 pattern, u64 start_phys, u64 size)
@@ -70,24 +70,19 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size)
static void __init do_one_pass(u64 pattern, u64 start, u64 end)
{
u64 size = 0;
u64 i;
phys_addr_t this_start, this_end;
while (start < end) {
start = memblock_x86_find_in_range_size(start, &size, 1);
/* done ? */
if (start >= end)
break;
if (start + size > end)
size = end - start;
printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
(unsigned long long) start,
(unsigned long long) start + size,
(unsigned long long) cpu_to_be64(pattern));
memtest(pattern, start, size);
start += size;
for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
this_start = clamp_t(phys_addr_t, this_start, start, end);
this_end = clamp_t(phys_addr_t, this_end, start, end);
if (this_start < this_end) {
printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
(unsigned long long)this_start,
(unsigned long long)this_end,
(unsigned long long)cpu_to_be64(pattern));
memtest(pattern, this_start, this_end - this_start);
}
}
}

Ver ficheiro

@@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start, u64 end)
{
const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
const u64 nd_high = PFN_PHYS(max_pfn_mapped);
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
bool remapped = false;
u64 nd_pa;
@@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
nd_pa = __pa(nd);
remapped = true;
} else {
nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
nd_size, SMP_CACHE_BYTES);
if (nd_pa == MEMBLOCK_ERROR)
nd_pa = memblock_find_in_range(nd_low, nd_high,
nd_size, SMP_CACHE_BYTES);
if (nd_pa == MEMBLOCK_ERROR) {
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
pr_err("Cannot find %zu bytes in node %d\n",
nd_size, nid);
return;
}
memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
nd = __va(nd_pa);
}
@@ -371,8 +364,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt)
memblock_x86_free_range(__pa(numa_distance),
__pa(numa_distance) + size);
memblock_free(__pa(numa_distance), size);
numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */
}
@@ -395,13 +387,13 @@ static int __init numa_alloc_distance(void)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
size, PAGE_SIZE);
if (phys == MEMBLOCK_ERROR) {
if (!phys) {
pr_warning("NUMA: Warning: can't allocate distance table!\n");
/* don't retry until explicitly reset */
numa_distance = (void *)1LU;
return -ENOMEM;
}
memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
memblock_reserve(phys, size);
numa_distance = __va(phys);
numa_distance_cnt = cnt;
@@ -482,8 +474,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
numaram = 0;
}
e820ram = max_pfn - (memblock_x86_hole_size(0,
PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
@@ -505,13 +497,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (WARN_ON(nodes_empty(node_possible_map)))
return -EINVAL;
for (i = 0; i < mi->nr_blks; i++)
memblock_x86_register_active_regions(mi->blk[i].nid,
mi->blk[i].start >> PAGE_SHIFT,
mi->blk[i].end >> PAGE_SHIFT);
/* for out of order entries */
sort_node_map();
for (i = 0; i < mi->nr_blks; i++) {
struct numa_memblk *mb = &mi->blk[i];
memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
}
/*
* If sections array is gonna be used for pfn -> nid mapping, check
@@ -545,6 +534,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
setup_node_data(nid, start, end);
}
/* Dump memblock with node info and return. */
memblock_dump_all();
return 0;
}
@@ -582,7 +573,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
remove_all_active_ranges();
WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
ret = init_func();

Ver ficheiro

@@ -199,23 +199,23 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
/* allocate node memory and the lowmem remap area */
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
if (node_pa == MEMBLOCK_ERROR) {
if (!node_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
size, nid);
return;
}
memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
memblock_reserve(node_pa, size);
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
max_low_pfn << PAGE_SHIFT,
size, LARGE_PAGE_BYTES);
if (remap_pa == MEMBLOCK_ERROR) {
if (!remap_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
size, nid);
memblock_x86_free_range(node_pa, node_pa + size);
memblock_free(node_pa, size);
return;
}
memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
memblock_reserve(remap_pa, size);
remap_va = phys_to_virt(remap_pa);
/* perform actual remap */

Ver ficheiro

@@ -19,7 +19,7 @@ unsigned long __init numa_free_all_bootmem(void)
for_each_online_node(i)
pages += free_all_bootmem_node(NODE_DATA(i));
pages += free_all_memory_core_early(MAX_NUMNODES);
pages += free_low_memory_core_early(MAX_NUMNODES);
return pages;
}

Ver ficheiro

@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
return -ENOENT;
}
static u64 mem_hole_size(u64 start, u64 end)
{
unsigned long start_pfn = PFN_UP(start);
unsigned long end_pfn = PFN_DOWN(end);
if (start_pfn < end_pfn)
return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
return 0;
}
/*
* Sets up nid to range from @start to @end. The return value is -errno if
* something went wrong, 0 otherwise.
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Calculate target node size. x86_32 freaks on __udivdi3() so do
* the division in ulong number of pages and convert back.
*/
size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
size = max_addr - addr - mem_hole_size(addr, max_addr);
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
/*
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Continue to add memory to this fake node if its
* non-reserved memory is less than the per-node size.
*/
while (end - start -
memblock_x86_hole_size(start, end) < size) {
while (end - start - mem_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > limit) {
end = limit;
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;
/*
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
if (limit - end -
memblock_x86_hole_size(end, limit) < size)
if (limit - end - mem_hole_size(end, limit) < size)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
{
u64 end = start + size;
while (end - start - memblock_x86_hole_size(start, end) < size) {
while (end - start - mem_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > max_addr) {
end = max_addr;
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* creates a uniform distribution of node sizes across the entire
* machine (but not necessarily over physical nodes).
*/
min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
MAX_NUMNODES;
min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;
/*
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
if (limit - end -
memblock_x86_hole_size(end, limit) < size)
if (limit - end - mem_hole_size(end, limit) < size)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -351,11 +357,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
phys_size, PAGE_SIZE);
if (phys == MEMBLOCK_ERROR) {
if (!phys) {
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu;
}
memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
memblock_reserve(phys, phys_size);
phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++)
@@ -424,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* free the copied physical distance table */
if (phys_dist)
memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
memblock_free(__pa(phys_dist), phys_size);
return;
no_emu: