bootmem: Replace work_with_active_regions() with for_each_mem_pfn_range()
Callback based iteration is cumbersome and much less useful than for_each_*() iterator. This patch implements for_each_mem_pfn_range() which replaces work_with_active_regions(). All the current users of work_with_active_regions() are converted. This simplifies walking over early_node_map and will allow converting internal logics in page_alloc to use iterator instead of walking early_node_map directly, which in turn will enable moving node information to memblock. powerpc change is only compile tested. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20110714074610.GD3455@htj.dyndns.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:

committed by
H. Peter Anvin

parent
fc769a8e70
commit
5dfe8660a3
@@ -3903,18 +3903,6 @@ int __init add_from_early_node_map(struct range *range, int az,
|
||||
return nr_range;
|
||||
}
|
||||
|
||||
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
for_each_active_range_index_in_nid(i, nid) {
|
||||
ret = work_fn(early_node_map[i].start_pfn,
|
||||
early_node_map[i].end_pfn, data);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* sparse_memory_present_with_active_regions - Call memory_present for each active range
|
||||
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
|
||||
@@ -4421,6 +4409,34 @@ static inline void setup_nr_node_ids(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common iterator interface used to define for_each_mem_pfn_range().
|
||||
*/
|
||||
void __meminit __next_mem_pfn_range(int *idx, int nid,
|
||||
unsigned long *out_start_pfn,
|
||||
unsigned long *out_end_pfn, int *out_nid)
|
||||
{
|
||||
struct node_active_region *r = NULL;
|
||||
|
||||
while (++*idx < nr_nodemap_entries) {
|
||||
if (nid == MAX_NUMNODES || nid == early_node_map[*idx].nid) {
|
||||
r = &early_node_map[*idx];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!r) {
|
||||
*idx = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (out_start_pfn)
|
||||
*out_start_pfn = r->start_pfn;
|
||||
if (out_end_pfn)
|
||||
*out_end_pfn = r->end_pfn;
|
||||
if (out_nid)
|
||||
*out_nid = r->nid;
|
||||
}
|
||||
|
||||
/**
|
||||
* add_active_range - Register a range of PFNs backed by physical memory
|
||||
* @nid: The node ID the range resides on
|
||||
|
Reference in New Issue
Block a user