summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c2
5 files changed, 7 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3aceadce1a7..7c7d03dbf73 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -72,7 +72,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
struct zone **z;
for (z = zonelist->zones; *z; z++) {
- nid = (*z)->zone_pgdat->node_id;
+ nid = zone_to_nid(*z);
if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
!list_empty(&hugepage_freelists[nid]))
break;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8002e1faccd..38f89650bc8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -487,7 +487,7 @@ static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
switch (p->policy) {
case MPOL_BIND:
for (i = 0; p->v.zonelist->zones[i]; i++)
- node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
+ node_set(zone_to_nid(p->v.zonelist->zones[i]),
*nodes);
break;
case MPOL_DEFAULT:
@@ -1145,7 +1145,7 @@ unsigned slab_node(struct mempolicy *policy)
* Follow bind policy behavior and start allocation at the
* first node.
*/
- return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
+ return zone_to_nid(policy->v.zonelist->zones[0]);
case MPOL_PREFERRED:
if (policy->v.preferred_node >= 0)
@@ -1649,7 +1649,7 @@ void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
nodes_clear(nodes);
for (z = pol->v.zonelist->zones; *z; z++)
- node_set((*z)->zone_pgdat->node_id, nodes);
+ node_set(zone_to_nid(*z), nodes);
nodes_remap(tmp, nodes, *mpolmask, *newmask);
nodes = tmp;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f1c0ef1fd21..bada3d03119 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -177,8 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
for (z = zonelist->zones; *z; z++)
if (cpuset_zone_allowed(*z, gfp_mask))
- node_clear((*z)->zone_pgdat->node_id,
- nodes);
+ node_clear(zone_to_nid(*z), nodes);
else
return CONSTRAINT_CPUSET;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cf913bdd433..51070b6d593 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1217,7 +1217,7 @@ unsigned int nr_free_pagecache_pages(void)
#ifdef CONFIG_NUMA
static void show_node(struct zone *zone)
{
- printk("Node %d ", zone->zone_pgdat->node_id);
+ printk("Node %ld ", zone_to_nid(zone));
}
#else
#define show_node(zone) do { } while (0)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b950f193816..87779dda4ec 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1661,7 +1661,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* over remote processors and spread off node memory allocations
* as wide as possible.
*/
- node_id = zone->zone_pgdat->node_id;
+ node_id = zone_to_nid(zone);
mask = node_to_cpumask(node_id);
if (!cpus_empty(mask) && node_id != numa_node_id())
return 0;