summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-05-10 18:59:18 +0000
committerGreg Kroah-Hartman <gregkh@suse.de>2011-03-14 14:29:56 -0700
commit17b08650ffabf7d025ceb608748dc12d4dc2928d (patch)
treea60b7d4108fd7f4be91a6e1684d979935b04af9f
parent9644dbc889ae788130d967d4f4e7f95253440be6 (diff)
downloadlwn-17b08650ffabf7d025ceb608748dc12d4dc2928d.tar.gz
lwn-17b08650ffabf7d025ceb608748dc12d4dc2928d.zip
powerpc: Use more accurate limit for first segment memory allocations
commit 095c7965f4dc870ed2b65143b1e2610de653416c upstream. Author: Milton Miller <miltonm@bga.com> On large machines we are running out of room below 256MB. In some cases we only need to ensure the allocation is in the first segment, which may be 256MB or 1TB. Add slb0_limit and use it to specify the upper limit for the irqstack and emergency stacks. On a large ppc64 box, this fixes a panic at boot when the crashkernel= option is specified (previously we would run out of memory below 256MB). Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--arch/powerpc/kernel/setup_64.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04f638d82fb3..00d3b65eba4c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -432,9 +432,18 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
+static u64 slb0_limit(void)
+{
+ if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+ return 1UL << SID_SHIFT_1T;
+ }
+ return 1UL << SID_SHIFT;
+}
+
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
+ u64 limit = slb0_limit();
unsigned int i;
/*
@@ -444,10 +453,10 @@ static void __init irqstack_early_init(void)
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ THREAD_SIZE, limit));
}
}
#else
@@ -478,7 +487,7 @@ static void __init exc_lvl_early_init(void)
*/
static void __init emergency_stack_init(void)
{
- unsigned long limit;
+ u64 limit;
unsigned int i;
/*
@@ -490,7 +499,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(0x10000000ULL, lmb.rmo_size);
+ limit = min(slb0_limit(), lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;