memblock, x86: Replace memblock_x86_find_in_range_node() with generic memblock calls
With the previous changes, generic NUMA aware memblock API has feature parity with memblock_x86_find_in_range_node(). There currently are two users - x86 setup_node_data() and __alloc_memory_core_early() in nobootmem.c. This patch converts the former to use memblock_alloc_nid() and the latter memblock_find_range_in_node(), and kills memblock_x86_find_in_range_node() and related functions including find_memory_early_core_early() in page_alloc.c. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310460395-30913-9-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:

committed by
H. Peter Anvin

parent
e64980405c
commit
eb40c4c27f
@@ -3779,73 +3779,6 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK
|
||||
/*
|
||||
* Basic iterator support. Return the last range of PFNs for a node
|
||||
* Note: nid == MAX_NUMNODES returns last region regardless of node
|
||||
*/
|
||||
static int __meminit last_active_region_index_in_nid(int nid)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = nr_nodemap_entries - 1; i >= 0; i--)
|
||||
if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
|
||||
return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Basic iterator support. Return the previous active range of PFNs for a node
|
||||
* Note: nid == MAX_NUMNODES returns next region regardless of node
|
||||
*/
|
||||
static int __meminit previous_active_region_index_in_nid(int index, int nid)
|
||||
{
|
||||
for (index = index - 1; index >= 0; index--)
|
||||
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
|
||||
return index;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
#define for_each_active_range_index_in_nid_reverse(i, nid) \
|
||||
for (i = last_active_region_index_in_nid(nid); i != -1; \
|
||||
i = previous_active_region_index_in_nid(i, nid))
|
||||
|
||||
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
|
||||
u64 goal, u64 limit)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Need to go over early_node_map to find out good range for node */
|
||||
for_each_active_range_index_in_nid_reverse(i, nid) {
|
||||
u64 addr;
|
||||
u64 ei_start, ei_last;
|
||||
u64 final_start, final_end;
|
||||
|
||||
ei_last = early_node_map[i].end_pfn;
|
||||
ei_last <<= PAGE_SHIFT;
|
||||
ei_start = early_node_map[i].start_pfn;
|
||||
ei_start <<= PAGE_SHIFT;
|
||||
|
||||
final_start = max(ei_start, goal);
|
||||
final_end = min(ei_last, limit);
|
||||
|
||||
if (final_start >= final_end)
|
||||
continue;
|
||||
|
||||
addr = memblock_find_in_range(final_start, final_end, size, align);
|
||||
|
||||
if (!addr)
|
||||
continue;
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int __init add_from_early_node_map(struct range *range, int az,
|
||||
int nr_range, int nid)
|
||||
{
|
||||
|
Reference in New Issue
Block a user