[PATCH] NUMA: Add zone_to_nid function
There are many places where we need to determine the node of a zone. Currently we use a difficult to read sequence of pointer dereferencing. Put that into an inline function and use throughout VM. Maybe we can find a way to optimize the lookup in the future. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:

committed by
Linus Torvalds

parent
4415cc8df6
commit
89fa30242f
@@ -72,7 +72,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
|
||||
struct zone **z;
|
||||
|
||||
for (z = zonelist->zones; *z; z++) {
|
||||
nid = (*z)->zone_pgdat->node_id;
|
||||
nid = zone_to_nid(*z);
|
||||
if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
|
||||
!list_empty(&hugepage_freelists[nid]))
|
||||
break;
|
||||
|
@@ -487,7 +487,7 @@ static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
|
||||
switch (p->policy) {
|
||||
case MPOL_BIND:
|
||||
for (i = 0; p->v.zonelist->zones[i]; i++)
|
||||
node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
|
||||
node_set(zone_to_nid(p->v.zonelist->zones[i]),
|
||||
*nodes);
|
||||
break;
|
||||
case MPOL_DEFAULT:
|
||||
@@ -1145,7 +1145,7 @@ unsigned slab_node(struct mempolicy *policy)
|
||||
* Follow bind policy behavior and start allocation at the
|
||||
* first node.
|
||||
*/
|
||||
return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
|
||||
return zone_to_nid(policy->v.zonelist->zones[0]);
|
||||
|
||||
case MPOL_PREFERRED:
|
||||
if (policy->v.preferred_node >= 0)
|
||||
@@ -1649,7 +1649,7 @@ void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
|
||||
|
||||
nodes_clear(nodes);
|
||||
for (z = pol->v.zonelist->zones; *z; z++)
|
||||
node_set((*z)->zone_pgdat->node_id, nodes);
|
||||
node_set(zone_to_nid(*z), nodes);
|
||||
nodes_remap(tmp, nodes, *mpolmask, *newmask);
|
||||
nodes = tmp;
|
||||
|
||||
|
@@ -177,8 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
|
||||
|
||||
for (z = zonelist->zones; *z; z++)
|
||||
if (cpuset_zone_allowed(*z, gfp_mask))
|
||||
node_clear((*z)->zone_pgdat->node_id,
|
||||
nodes);
|
||||
node_clear(zone_to_nid(*z), nodes);
|
||||
else
|
||||
return CONSTRAINT_CPUSET;
|
||||
|
||||
|
@@ -1217,7 +1217,7 @@ unsigned int nr_free_pagecache_pages(void)
|
||||
#ifdef CONFIG_NUMA
|
||||
static void show_node(struct zone *zone)
|
||||
{
|
||||
printk("Node %d ", zone->zone_pgdat->node_id);
|
||||
printk("Node %ld ", zone_to_nid(zone));
|
||||
}
|
||||
#else
|
||||
#define show_node(zone) do { } while (0)
|
||||
|
@@ -1661,7 +1661,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
||||
* over remote processors and spread off node memory allocations
|
||||
* as wide as possible.
|
||||
*/
|
||||
node_id = zone->zone_pgdat->node_id;
|
||||
node_id = zone_to_nid(zone);
|
||||
mask = node_to_cpumask(node_id);
|
||||
if (!cpus_empty(mask) && node_id != numa_node_id())
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user