summaryrefslogtreecommitdiff
path: root/arch/x86/mm/numa.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-14 11:44:23 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 11:47:43 -0700
commit0608f70c78a384c2f225f2de226ca057a196f108 (patch)
treea5ffb473cc55ca8bb538b9869cb61d2982d99f5e /arch/x86/mm/numa.c
parent7c0caeb866b0f648d91bb75b8bc6f86af95bb033 (diff)
downloadlwn-0608f70c78a384c2f225f2de226ca057a196f108.tar.gz
lwn-0608f70c78a384c2f225f2de226ca057a196f108.zip
x86: Use HAVE_MEMBLOCK_NODE_MAP
From 5732e1247898d67cbf837585150fe9f68974671d Mon Sep 17 00:00:00 2001 From: Tejun Heo <tj@kernel.org> Date: Thu, 14 Jul 2011 11:22:16 +0200 Convert x86 to HAVE_MEMBLOCK_NODE_MAP. The only difference in memory handling is that allocations can't no longer cross node boundaries whether they're node affine or not, which shouldn't matter at all. This conversion will enable further simplification of boot memory handling. -v2: Fix build failure on !NUMA configurations discovered by hpa. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20110714094423.GG3455@htj.dyndns.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/numa.c')
-rw-r--r--arch/x86/mm/numa.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 824efadc5741..f4a40bdb2e4e 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -498,13 +498,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (WARN_ON(nodes_empty(node_possible_map)))
return -EINVAL;
- for (i = 0; i < mi->nr_blks; i++)
- memblock_x86_register_active_regions(mi->blk[i].nid,
- mi->blk[i].start >> PAGE_SHIFT,
- mi->blk[i].end >> PAGE_SHIFT);
-
- /* for out of order entries */
- sort_node_map();
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *mb = &mi->blk[i];
+ memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
+ }
/*
* If sections array is gonna be used for pfn -> nid mapping, check
@@ -538,6 +535,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
setup_node_data(nid, start, end);
}
+ /* Dump memblock with node info and return. */
+ memblock_dump_all();
return 0;
}
@@ -575,7 +574,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- remove_all_active_ranges();
+ WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
ret = init_func();