summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-09 22:57:21 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:12:14 -0800
commit164c220fa3947abbada65329d168f421b461a2a7 (patch)
tree1a10418ccf896f1f9209c2206bedf87915b63bfd
parentdedacf623283cb24933ec9f7d5bf539f19173cd4 (diff)
downloadlwn-164c220fa3947abbada65329d168f421b461a2a7.tar.gz
lwn-164c220fa3947abbada65329d168f421b461a2a7.zip
[SPARC64]: Fix hypervisor call arg passing.
Function goes in %o5, args go in %o0 --> %o5. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/irq.c8
-rw-r--r--arch/sparc64/kernel/smp.c16
-rw-r--r--arch/sparc64/kernel/trampoline.S56
-rw-r--r--arch/sparc64/kernel/tsb.S6
-rw-r--r--arch/sparc64/mm/init.c20
5 files changed, 53 insertions, 53 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 1f6455503f24..c5dd6daf127f 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -863,10 +863,10 @@ void init_irqwork_curcpu(void)
static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
{
- register unsigned long func __asm__("%o0");
- register unsigned long arg0 __asm__("%o1");
- register unsigned long arg1 __asm__("%o2");
- register unsigned long arg2 __asm__("%o3");
+ register unsigned long func __asm__("%o5");
+ register unsigned long arg0 __asm__("%o0");
+ register unsigned long arg1 __asm__("%o1");
+ register unsigned long arg2 __asm__("%o2");
unsigned long page = get_zeroed_page(GFP_ATOMIC);
if (!page) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c10a3a8639e8..f553264588d6 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -572,10 +572,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
retries = 0;
cnt = init_cpu_list(cpu_list, mask);
do {
- register unsigned long func __asm__("%o0");
- register unsigned long arg0 __asm__("%o1");
- register unsigned long arg1 __asm__("%o2");
- register unsigned long arg2 __asm__("%o3");
+ register unsigned long func __asm__("%o5");
+ register unsigned long arg0 __asm__("%o0");
+ register unsigned long arg1 __asm__("%o1");
+ register unsigned long arg2 __asm__("%o2");
func = HV_FAST_CPU_MONDO_SEND;
arg0 = cnt;
@@ -624,10 +624,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
int retries = 0;
do {
- register unsigned long func __asm__("%o0");
- register unsigned long arg0 __asm__("%o1");
- register unsigned long arg1 __asm__("%o2");
- register unsigned long arg2 __asm__("%o3");
+ register unsigned long func __asm__("%o5");
+ register unsigned long arg0 __asm__("%o0");
+ register unsigned long arg1 __asm__("%o1");
+ register unsigned long arg2 __asm__("%o2");
cpu_list[0] = i;
func = HV_FAST_CPU_MONDO_SEND;
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index ffa8b79632cf..c476f5b321fb 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -265,20 +265,20 @@ do_unlock:
nop
niagara_lock_tlb:
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
- sethi %hi(KERNBASE), %o1
- clr %o2
- sethi %hi(kern_locked_tte_data), %o3
- ldx [%o3 + %lo(kern_locked_tte_data)], %o3
- mov HV_MMU_IMMU, %o4
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+ sethi %hi(KERNBASE), %o0
+ clr %o1
+ sethi %hi(kern_locked_tte_data), %o2
+ ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
- sethi %hi(KERNBASE), %o1
- clr %o2
- sethi %hi(kern_locked_tte_data), %o3
- ldx [%o3 + %lo(kern_locked_tte_data)], %o3
- mov HV_MMU_DMMU, %o4
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+ sethi %hi(KERNBASE), %o0
+ clr %o1
+ sethi %hi(kern_locked_tte_data), %o2
+ ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP
sethi %hi(bigkernel), %g2
@@ -286,24 +286,24 @@ niagara_lock_tlb:
brz,pt %g2, after_lock_tlb
nop
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
- sethi %hi(KERNBASE + 0x400000), %o1
- clr %o2
- sethi %hi(kern_locked_tte_data), %o3
- ldx [%o3 + %lo(kern_locked_tte_data)], %o3
- sethi %hi(0x400000), %o4
- add %o3, %o4, %o3
- mov HV_MMU_IMMU, %o4
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+ sethi %hi(KERNBASE + 0x400000), %o0
+ clr %o1
+ sethi %hi(kern_locked_tte_data), %o2
+ ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ sethi %hi(0x400000), %o3
+ add %o2, %o3, %o2
+ mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
- sethi %hi(KERNBASE + 0x400000), %o1
- clr %o2
- sethi %hi(kern_locked_tte_data), %o3
- ldx [%o3 + %lo(kern_locked_tte_data)], %o3
- sethi %hi(0x400000), %o4
- add %o3, %o4, %o3
- mov HV_MMU_DMMU, %o4
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+ sethi %hi(KERNBASE + 0x400000), %o0
+ clr %o1
+ sethi %hi(kern_locked_tte_data), %o2
+ ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ sethi %hi(0x400000), %o3
+ add %o2, %o3, %o2
+ mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP
after_lock_tlb:
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index a53ec6fb7697..8a9351258af8 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -266,9 +266,9 @@ __tsb_context_switch:
mov SCRATCHPAD_UTSBREG2, %g1
stxa %g2, [%g1] ASI_SCRATCHPAD
- mov HV_FAST_MMU_TSB_CTXNON0, %o0
- mov 1, %o1
- mov %o4, %o2
+ mov HV_FAST_MMU_TSB_CTXNON0, %o5
+ mov 1, %o0
+ mov %o4, %o1
ta HV_FAST_TRAP
ba,pt %xcc, 9f
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index e602b857071a..7faba33202a9 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
unsigned long pte,
unsigned long mmu)
{
- register unsigned long func asm("%o0");
- register unsigned long arg0 asm("%o1");
- register unsigned long arg1 asm("%o2");
- register unsigned long arg2 asm("%o3");
- register unsigned long arg3 asm("%o4");
+ register unsigned long func asm("%o5");
+ register unsigned long arg0 asm("%o0");
+ register unsigned long arg1 asm("%o1");
+ register unsigned long arg2 asm("%o2");
+ register unsigned long arg3 asm("%o3");
func = HV_FAST_MMU_MAP_PERM_ADDR;
arg0 = vaddr;
@@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void)
/* Register this cpu's fault status area with the hypervisor. */
void __cpuinit sun4v_register_fault_status(void)
{
+ register unsigned long func asm("%o5");
register unsigned long arg0 asm("%o0");
- register unsigned long arg1 asm("%o1");
int cpu = hard_smp_processor_id();
struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long pa;
pa = kern_base + ((unsigned long) tb - KERNBASE);
- arg0 = HV_FAST_MMU_FAULT_AREA_CONF;
- arg1 = pa;
+ func = HV_FAST_MMU_FAULT_AREA_CONF;
+ arg0 = pa;
__asm__ __volatile__("ta %4"
- : "=&r" (arg0), "=&r" (arg1)
- : "0" (arg0), "1" (arg1),
+ : "=&r" (func), "=&r" (arg0)
+ : "0" (func), "1" (arg0),
"i" (HV_FAST_TRAP));
}