diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-03-09 10:38:55 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-03-09 10:38:59 +0100 |
commit | c8b44163b754612fc4769fe1c5df00e98fc9d3c6 (patch) | |
tree | 77706ff1f2a72ed294885b6cf0a7c0de0f92d6df /arch/arm/kernel/head.S | |
parent | ac23f25355ef53f3d14352fcff3c6817527a9749 (diff) | |
parent | a5abba989deceb731047425812d268daf7536575 (diff) | |
download | lwn-c8b44163b754612fc4769fe1c5df00e98fc9d3c6.tar.gz lwn-c8b44163b754612fc4769fe1c5df00e98fc9d3c6.zip |
Merge commit 'v2.6.38-rc8' into x86/asm
Merge reason: Update with the latest fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/kernel/head.S')
-rw-r--r-- | arch/arm/kernel/head.S | 38 |
1 files changed, 26 insertions, 12 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb21..f06ff9feb0db 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP + __INIT __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? @@ -415,18 +416,7 @@ __fixup_smp_on_up: sub r3, r0, r3 add r4, r4, r3 add r5, r5, r3 -2: cmp r4, r5 - movhs pc, lr - ldmia r4!, {r0, r6} - ARM( str r6, [r0, r3] ) - THUMB( add r0, r0, r3 ) -#ifdef __ARMEB__ - THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. -#endif - THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords - THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. - THUMB( strh r6, [r0] ) - b 2b + b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .align @@ -440,7 +430,31 @@ smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection +#endif + .text +__do_fixup_smp_on_up: + cmp r4, r5 + movhs pc, lr + ldmia r4!, {r0, r6} + ARM( str r6, [r0, r3] ) + THUMB( add r0, r0, r3 ) +#ifdef __ARMEB__ + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( strh r6, [r0] ) + b __do_fixup_smp_on_up +ENDPROC(__do_fixup_smp_on_up) + +ENTRY(fixup_smp) + stmfd sp!, {r4 - r6, lr} + mov r4, r0 + add r5, r0, r1 + mov r3, #0 + bl __do_fixup_smp_on_up + ldmfd sp!, {r4 - r6, pc} +ENDPROC(fixup_smp) #include "head-common.S" |