summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/hash_low_32.S2
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c35
-rw-r--r--arch/powerpc/mm/hugetlbpage.c15
-rw-r--r--arch/powerpc/mm/init_32.c4
-rw-r--r--arch/powerpc/mm/init_64.c52
-rw-r--r--arch/powerpc/mm/lmb.c16
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/mm/mmap.c2
-rw-r--r--arch/powerpc/mm/numa.c160
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/mm/slb_low.S2
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_64.c2
15 files changed, 128 insertions, 189 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a4815d316722..ec4adcb4bc28 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -1,6 +1,4 @@
/*
- * arch/ppc/mm/fault.c
- *
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 12ccd7155bac..ea469eefa146 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -1,6 +1,4 @@
/*
- * arch/ppc/kernel/hashtable.S
- *
* $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
*
* PowerPC version
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d96bcfe4c6f6..33654d1b1b43 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -403,12 +403,17 @@ static void native_hpte_clear(void)
*/
hpte_v = hptep->v;
+ /*
+ * Call __tlbie() here rather than tlbie() since we
+ * already hold the native_tlbie_lock.
+ */
if (hpte_v & HPTE_V_VALID) {
hptep->v = 0;
- tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K, 0);
+ __tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K);
}
}
+ asm volatile("eieio; tlbsync; ptesync":::"memory");
spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 149351a84b94..89b35c181314 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -88,6 +88,7 @@ static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
hpte_t *htab_address;
+unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
@@ -168,7 +169,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
#ifdef CONFIG_PPC_ISERIES
if (_machine == PLATFORM_ISERIES_LPAR)
ret = iSeries_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ paddr,
tmp_mode,
HPTE_V_BOLTED,
psize);
@@ -177,7 +178,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
#ifdef CONFIG_PPC_PSERIES
if (_machine & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ paddr,
tmp_mode,
HPTE_V_BOLTED,
psize);
@@ -185,7 +186,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
#endif
#ifdef CONFIG_PPC_MULTIPLATFORM
ret = native_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ paddr,
tmp_mode, HPTE_V_BOLTED,
psize);
#endif
@@ -391,7 +392,7 @@ static unsigned long __init htab_get_table_size(void)
#ifdef CONFIG_MEMORY_HOTPLUG
void create_section_mapping(unsigned long start, unsigned long end)
{
- BUG_ON(htab_bolt_mapping(start, end, start,
+ BUG_ON(htab_bolt_mapping(start, end, __pa(start),
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
mmu_linear_psize));
}
@@ -399,7 +400,7 @@ void create_section_mapping(unsigned long start, unsigned long end)
void __init htab_initialize(void)
{
- unsigned long table, htab_size_bytes;
+ unsigned long table;
unsigned long pteg_count;
unsigned long mode_rw;
unsigned long base = 0, size = 0;
@@ -421,7 +422,7 @@ void __init htab_initialize(void)
htab_hash_mask = pteg_count - 1;
- if (platform_is_lpar()) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
@@ -430,7 +431,6 @@ void __init htab_initialize(void)
* the absolute address space.
*/
table = lmb_alloc(htab_size_bytes, htab_size_bytes);
- BUG_ON(table == 0);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
@@ -473,21 +473,22 @@ void __init htab_initialize(void)
if (dart_tablebase != 0 && dart_tablebase >= base
&& dart_tablebase < (base + size)) {
+ unsigned long dart_table_end = dart_tablebase + 16 * MB;
if (base != dart_tablebase)
BUG_ON(htab_bolt_mapping(base, dart_tablebase,
- base, mode_rw,
- mmu_linear_psize));
- if ((base + size) > (dart_tablebase + 16*MB))
+ __pa(base), mode_rw,
+ mmu_linear_psize));
+ if ((base + size) > dart_table_end)
BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
- base + size,
- dart_tablebase+16*MB,
+ base + size,
+ __pa(dart_table_end),
mode_rw,
mmu_linear_psize));
continue;
}
#endif /* CONFIG_U3_DART */
- BUG_ON(htab_bolt_mapping(base, base + size, base,
- mode_rw, mmu_linear_psize));
+ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
+ mode_rw, mmu_linear_psize));
}
/*
@@ -504,8 +505,8 @@ void __init htab_initialize(void)
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
- BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
- tce_alloc_start, mode_rw,
+ BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
+ __pa(tce_alloc_start), mode_rw,
mmu_linear_psize));
}
@@ -516,7 +517,7 @@ void __init htab_initialize(void)
void htab_initialize_secondary(void)
{
- if (!platform_is_lpar())
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
mtspr(SPRN_SDR1, _SDR1);
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b51bb28c054b..7370f9f33e29 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -133,21 +133,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return __pte(old);
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- if (! (within_hugepage_low_range(addr, len)
- || within_hugepage_high_range(addr, len)) )
- return -EINVAL;
- return 0;
-}
-
struct slb_flush_info {
struct mm_struct *mm;
u16 newareas;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 7d0d75c11848..b57fb3a2b7bb 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -216,7 +216,7 @@ static void free_sec(unsigned long start, unsigned long end, const char *name)
while (start < end) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
cnt++;
start += PAGE_SIZE;
@@ -248,7 +248,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 81cfb0c2ec58..babebd15bdc4 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -84,54 +84,6 @@
/* max amount of RAM to use */
unsigned long __max_memory;
-/* info on what we think the IO hole is */
-unsigned long io_hole_start;
-unsigned long io_hole_size;
-
-/*
- * Do very early mm setup.
- */
-void __init mm_init_ppc64(void)
-{
-#ifndef CONFIG_PPC_ISERIES
- unsigned long i;
-#endif
-
- ppc64_boot_msg(0x100, "MM Init");
-
- /* This is the story of the IO hole... please, keep seated,
- * unfortunately, we are out of oxygen masks at the moment.
- * So we need some rough way to tell where your big IO hole
- * is. On pmac, it's between 2G and 4G, on POWER3, it's around
- * that area as well, on POWER4 we don't have one, etc...
- * We need that as a "hint" when sizing the TCE table on POWER3
- * So far, the simplest way that seem work well enough for us it
- * to just assume that the first discontinuity in our physical
- * RAM layout is the IO hole. That may not be correct in the future
- * (and isn't on iSeries but then we don't care ;)
- */
-
-#ifndef CONFIG_PPC_ISERIES
- for (i = 1; i < lmb.memory.cnt; i++) {
- unsigned long base, prevbase, prevsize;
-
- prevbase = lmb.memory.region[i-1].base;
- prevsize = lmb.memory.region[i-1].size;
- base = lmb.memory.region[i].base;
- if (base > (prevbase + prevsize)) {
- io_hole_start = prevbase + prevsize;
- io_hole_size = base - (prevbase + prevsize);
- break;
- }
- }
-#endif /* CONFIG_PPC_ISERIES */
- if (io_hole_start)
- printk("IO Hole assumed to be %lx -> %lx\n",
- io_hole_start, io_hole_start + io_hole_size - 1);
-
- ppc64_boot_msg(0x100, "MM Init Done");
-}
-
void free_initmem(void)
{
unsigned long addr;
@@ -140,7 +92,7 @@ void free_initmem(void)
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
memset((void *)addr, 0xcc, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
@@ -155,7 +107,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
+ init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index bbe3eac918e8..417d58518558 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -31,6 +31,8 @@
#define DBG(fmt...)
#endif
+#define LMB_ALLOC_ANYWHERE 0
+
struct lmb lmb;
void lmb_dump_all(void)
@@ -226,6 +228,20 @@ unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
unsigned long max_addr)
{
+ unsigned long alloc;
+
+ alloc = __lmb_alloc_base(size, align, max_addr);
+
+ if (alloc == 0)
+ panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
+ size, max_addr);
+
+ return alloc;
+}
+
+unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
+ unsigned long max_addr)
+{
long i, j;
unsigned long base = 0;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 550517c2dd42..badac10d700c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -108,8 +108,8 @@ EXPORT_SYMBOL(phys_mem_access_prot);
void online_page(struct page *page)
{
ClearPageReserved(page);
- set_page_count(page, 0);
- free_cold_page(page);
+ init_page_count(page);
+ __free_page(page);
totalram_pages++;
num_physpages++;
}
@@ -125,7 +125,7 @@ int __devinit add_memory(u64 start, u64 size)
nid = hot_add_scn_to_nid(start);
pgdata = NODE_DATA(nid);
- start = __va(start);
+ start = (unsigned long)__va(start);
create_section_mapping(start, start + size);
/* this should work for most non-highmem platforms */
@@ -249,7 +249,6 @@ void __init do_init_bootmem(void)
bootmap_pages = bootmem_bootmap_pages(total_pages);
start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
- BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
@@ -376,7 +375,7 @@ void __init mem_init(void)
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalhigh_pages++;
}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index fe65f522aff3..972a8e884b9a 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/ppc64/mm/mmap.c
- *
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2863a912bcd0..e89b22aa539e 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -129,10 +129,12 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
*start_pfn = 0;
}
-static inline void map_cpu_to_node(int cpu, int node)
+static void __cpuinit map_cpu_to_node(int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
+ dbg("adding cpu %d to node %d\n", cpu, node);
+
if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
cpu_set(cpu, numa_cpumask_lookup_table[node]);
}
@@ -153,7 +155,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static struct device_node *find_cpu_node(unsigned int cpu)
+static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
{
unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
struct device_node *cpu_node = NULL;
@@ -189,23 +191,29 @@ static int *of_get_associativity(struct device_node *dev)
return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
}
-static int of_node_numa_domain(struct device_node *device)
+/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
+ * info is found.
+ */
+static int of_node_to_nid(struct device_node *device)
{
- int numa_domain;
+ int nid = -1;
unsigned int *tmp;
if (min_common_depth == -1)
- return 0;
+ goto out;
tmp = of_get_associativity(device);
- if (tmp && (tmp[0] >= min_common_depth)) {
- numa_domain = tmp[min_common_depth];
- } else {
- dbg("WARNING: no NUMA information for %s\n",
- device->full_name);
- numa_domain = 0;
- }
- return numa_domain;
+ if (!tmp)
+ goto out;
+
+ if (tmp[0] >= min_common_depth)
+ nid = tmp[min_common_depth];
+
+ /* POWER4 LPAR uses 0xffff as invalid node */
+ if (nid == 0xffff || nid >= MAX_NUMNODES)
+ nid = -1;
+out:
+ return nid;
}
/*
@@ -246,8 +254,7 @@ static int __init find_min_common_depth(void)
if ((len >= 1) && ref_points) {
depth = ref_points[1];
} else {
- dbg("WARNING: could not find NUMA "
- "associativity reference point\n");
+ dbg("NUMA: ibm,associativity-reference-points not found.\n");
depth = -1;
}
of_node_put(rtas_root);
@@ -283,9 +290,9 @@ static unsigned long __devinit read_n_cells(int n, unsigned int **buf)
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
*/
-static int numa_setup_cpu(unsigned long lcpu)
+static int __cpuinit numa_setup_cpu(unsigned long lcpu)
{
- int numa_domain = 0;
+ int nid = 0;
struct device_node *cpu = find_cpu_node(lcpu);
if (!cpu) {
@@ -293,27 +300,16 @@ static int numa_setup_cpu(unsigned long lcpu)
goto out;
}
- numa_domain = of_node_numa_domain(cpu);
+ nid = of_node_to_nid(cpu);
- if (numa_domain >= num_online_nodes()) {
- /*
- * POWER4 LPAR uses 0xffff as invalid node,
- * dont warn in this case.
- */
- if (numa_domain != 0xffff)
- printk(KERN_ERR "WARNING: cpu %ld "
- "maps to invalid NUMA node %d\n",
- lcpu, numa_domain);
- numa_domain = 0;
- }
+ if (nid < 0 || !node_online(nid))
+ nid = any_online_node(NODE_MASK_ALL);
out:
- node_set_online(numa_domain);
-
- map_cpu_to_node(lcpu, numa_domain);
+ map_cpu_to_node(lcpu, nid);
of_node_put(cpu);
- return numa_domain;
+ return nid;
}
static int cpu_numa_callback(struct notifier_block *nfb,
@@ -325,10 +321,7 @@ static int cpu_numa_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
- if (min_common_depth == -1 || !numa_enabled)
- map_cpu_to_node(lcpu, 0);
- else
- numa_setup_cpu(lcpu);
+ numa_setup_cpu(lcpu);
ret = NOTIFY_OK;
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -375,7 +368,7 @@ static int __init parse_numa_properties(void)
{
struct device_node *cpu = NULL;
struct device_node *memory = NULL;
- int max_domain;
+ int default_nid = 0;
unsigned long i;
if (numa_enabled == 0) {
@@ -385,32 +378,32 @@ static int __init parse_numa_properties(void)
min_common_depth = find_min_common_depth();
- dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
if (min_common_depth < 0)
return min_common_depth;
- max_domain = numa_setup_cpu(boot_cpuid);
+ dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
/*
- * Even though we connect cpus to numa domains later in SMP init,
- * we need to know the maximum node id now. This is because each
- * node id must have NODE_DATA etc backing it.
- * As a result of hotplug we could still have cpus appear later on
- * with larger node ids. In that case we force the cpu into node 0.
+ * Even though we connect cpus to numa domains later in SMP
+ * init, we need to know the node ids now. This is because
+ * each node to be onlined must have NODE_DATA etc backing it.
*/
- for_each_cpu(i) {
- int numa_domain;
+ for_each_present_cpu(i) {
+ int nid;
cpu = find_cpu_node(i);
+ BUG_ON(!cpu);
+ nid = of_node_to_nid(cpu);
+ of_node_put(cpu);
- if (cpu) {
- numa_domain = of_node_numa_domain(cpu);
- of_node_put(cpu);
-
- if (numa_domain < MAX_NUMNODES &&
- max_domain < numa_domain)
- max_domain = numa_domain;
- }
+ /*
+ * Don't fall back to default_nid yet -- we will plug
+ * cpus into nodes once the memory scan has discovered
+ * the topology.
+ */
+ if (nid < 0)
+ continue;
+ node_set_online(nid);
}
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
@@ -418,7 +411,7 @@ static int __init parse_numa_properties(void)
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start;
unsigned long size;
- int numa_domain;
+ int nid;
int ranges;
unsigned int *memcell_buf;
unsigned int len;
@@ -439,18 +432,15 @@ new_range:
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
- numa_domain = of_node_numa_domain(memory);
-
- if (numa_domain >= MAX_NUMNODES) {
- if (numa_domain != 0xffff)
- printk(KERN_ERR "WARNING: memory at %lx maps "
- "to invalid NUMA node %d\n", start,
- numa_domain);
- numa_domain = 0;
- }
-
- if (max_domain < numa_domain)
- max_domain = numa_domain;
+ /*
+ * Assumption: either all memory nodes or none will
+ * have associativity properties. If none, then
+ * everything goes to default_nid.
+ */
+ nid = of_node_to_nid(memory);
+ if (nid < 0)
+ nid = default_nid;
+ node_set_online(nid);
if (!(size = numa_enforce_memory_limit(start, size))) {
if (--ranges)
@@ -459,16 +449,13 @@ new_range:
continue;
}
- add_region(numa_domain, start >> PAGE_SHIFT,
+ add_region(nid, start >> PAGE_SHIFT,
size >> PAGE_SHIFT);
if (--ranges)
goto new_range;
}
- for (i = 0; i <= max_domain; i++)
- node_set_online(i);
-
return 0;
}
@@ -483,7 +470,6 @@ static void __init setup_nonnuma(void)
printk(KERN_INFO "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
- map_cpu_to_node(boot_cpuid, 0);
for (i = 0; i < lmb.memory.cnt; ++i)
add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,
lmb_size_pages(&lmb.memory, i));
@@ -570,11 +556,11 @@ static void __init *careful_allocation(int nid, unsigned long size,
unsigned long end_pfn)
{
int new_nid;
- unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+ unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
if (!ret)
- ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
+ ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
if (!ret)
panic("numa.c: cannot allocate %lu bytes on node %d",
@@ -620,6 +606,8 @@ void __init do_init_bootmem(void)
dump_numa_memory_topology();
register_cpu_notifier(&ppc64_numa_nb);
+ cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+ (void *)(unsigned long)boot_cpuid);
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn, pages_present;
@@ -767,10 +755,10 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory = NULL;
nodemask_t nodes;
- int numa_domain = 0;
+ int default_nid = any_online_node(NODE_MASK_ALL);
if (!numa_enabled || (min_common_depth < 0))
- return numa_domain;
+ return default_nid;
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start, size;
@@ -787,15 +775,15 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
ha_new_range:
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
- numa_domain = of_node_numa_domain(memory);
+ nid = of_node_to_nid(memory);
/* Domains not present at boot default to 0 */
- if (!node_online(numa_domain))
- numa_domain = any_online_node(NODE_MASK_ALL);
+ if (nid < 0 || !node_online(nid))
+ nid = default_nid;
if ((scn_addr >= start) && (scn_addr < (start + size))) {
of_node_put(memory);
- goto got_numa_domain;
+ goto got_nid;
}
if (--ranges) /* process all ranges in cell */
@@ -804,12 +792,12 @@ ha_new_range:
BUG(); /* section address should be found above */
/* Temporary code to ensure that returned node is not empty */
-got_numa_domain:
+got_nid:
nodes_setall(nodes);
- while (NODE_DATA(numa_domain)->node_spanned_pages == 0) {
- node_clear(numa_domain, nodes);
- numa_domain = any_online_node(nodes);
+ while (NODE_DATA(nid)->node_spanned_pages == 0) {
+ node_clear(nid, nodes);
+ nid = any_online_node(nodes);
}
- return numa_domain;
+ return nid;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index f4e5ac122615..d296eb6b4545 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -37,6 +37,7 @@
unsigned long ioremap_base;
unsigned long ioremap_bot;
+EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
int io_bat_index;
#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
@@ -153,6 +154,7 @@ ioremap64(unsigned long long addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
+EXPORT_SYMBOL(ioremap64);
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
@@ -162,6 +164,7 @@ ioremap(phys_addr_t addr, unsigned long size)
return ioremap64(addr64, size);
}
#endif /* CONFIG_PHYS_64BIT */
+EXPORT_SYMBOL(ioremap);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
@@ -247,6 +250,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
out:
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
+EXPORT_SYMBOL(__ioremap);
void iounmap(volatile void __iomem *addr)
{
@@ -259,6 +263,7 @@ void iounmap(volatile void __iomem *addr)
if (addr > high_memory && (unsigned long) addr < ioremap_bot)
vunmap((void *) (PAGE_MASK & (unsigned long)addr));
}
+EXPORT_SYMBOL(iounmap);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index d1acee38f163..abfaabf667bf 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -1,6 +1,4 @@
/*
- * arch/ppc64/mm/slb_low.S
- *
* Low-level SLB routines
*
* Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 82e4951826bc..91d25fb27f89 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -247,10 +247,6 @@ void stabs_alloc(void)
newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
1<<SID_SHIFT);
- if (! newstab)
- panic("Unable to allocate segment table for CPU %d.\n",
- cpu);
-
newstab = (unsigned long)__va(newstab);
memset((void *)newstab, 0, HW_PAGE_SIZE);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index bb3afb6e6317..f734b11566c2 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -36,7 +36,7 @@
DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
/* This is declared as we are using the more or less generic
- * include/asm-ppc64/tlb.h file -- tgall
+ * include/asm-powerpc/tlb.h file -- tgall
*/
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);