diff options
author | Alexander Graf <agraf@suse.de> | 2010-01-08 02:58:06 +0100 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 12:35:49 -0300 |
commit | 021ec9c69f8b7b20f46296cc76cc4cb341b25191 (patch) | |
tree | 304f086761e7c01fb412c8319b89ff8b6fb2dde7 /arch/powerpc/kvm/book3s_64_interrupts.S | |
parent | bc90923e27908ef65aa8aaad2f234e18b5273c78 (diff) | |
download | lwn-021ec9c69f8b7b20f46296cc76cc4cb341b25191.tar.gz lwn-021ec9c69f8b7b20f46296cc76cc4cb341b25191.zip |
KVM: PPC: Call SLB patching code in interrupt safe manner
Currently we're racy when doing the transition from IR=1 to IR=0, from
the module memory entry code to the real mode SLB switching code.
To work around that I took a look at the RTAS entry code which is faced
with a similar problem and did the same thing:
A small helper in linear mapped memory that does mtmsr with IR=0 and
then RFIs info the actual handler.
Thanks to that trick we can safely take page faults in the entry code
and only need to be really wary of what to do as of the SLB switching
part.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_interrupts.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_interrupts.S | 25 |
1 files changed, 7 insertions, 18 deletions
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index 3c0ba5513077..33aef5345f6b 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S @@ -95,17 +95,14 @@ kvm_start_entry: ld r3, VCPU_HIGHMEM_HANDLER(r4) std r3, PACA_KVM_VMHANDLER(r13) - ld r3, VCPU_TRAMPOLINE_ENTER(r4) - std r3, PACA_KVM_RMHANDLER(r13) - kvm_start_lightweight: ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ /* Load some guest state in the respective registers */ - ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ - mtctr r3 /* CTR = r3 */ + ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ + /* will be swapped in by rmcall */ ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ mtlr r3 /* LR = r3 */ @@ -131,22 +128,14 @@ kvm_start_lightweight: no_dcbz32_on: - /* This sets the Magic value for the trampoline */ - - /* XXX this needs to move into a safe function, so we can - be sure we don't get any interrupts */ - - li r11, 1 - stb r11, PACA_KVM_IN_GUEST(r13) - - ld r3, PACA_KVM_RMHANDLER(r13) - mtsrr0 r3 + ld r6, VCPU_RMCALL(r4) + mtctr r6 - LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) - mtsrr1 r3 + ld r3, VCPU_TRAMPOLINE_ENTER(r4) + LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) /* Jump to SLB patching handlder and into our guest */ - RFI + bctr /* * This is the handler in module memory. It gets jumped at from the |