summaryrefslogtreecommitdiff
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-12-20 11:36:07 +0000
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-07 11:44:13 +0000
commit358b28f09f0ab074d781df72b8a671edb1547789 (patch)
tree7e85610ff66698d60b528d8697e4d221a3ceb851 /arch/arm/kvm
parente761a927bc9a7ee6ceb7c4f63d5922dbced87f0d (diff)
downloadlwn-358b28f09f0ab074d781df72b8a671edb1547789.tar.gz
lwn-358b28f09f0ab074d781df72b8a671edb1547789.zip
arm/arm64: KVM: Allow a VCPU to fully reset itself
The current kvm_psci_vcpu_on implementation will directly try to manipulate the state of the VCPU to reset it. However, since this is not done on the thread that runs the VCPU, we can end up in a strangely corrupted state when the source and target VCPUs are running at the same time. Fix this by factoring out all reset logic from the PSCI implementation and forwarding the required information along with a request to the target VCPU. Reviewed-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/reset.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 5ed0c3ee33d6..e53327912adc 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -26,6 +26,7 @@
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <kvm/arm_arch_timer.h>
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
+ /*
+ * Additional reset state handling that PSCI may have imposed on us.
+ * Must be done after all the sys_reg reset.
+ */
+ if (READ_ONCE(vcpu->arch.reset_state.reset)) {
+ unsigned long target_pc = vcpu->arch.reset_state.pc;
+
+ /* Gracefully handle Thumb2 entry point */
+ if (target_pc & 1) {
+ target_pc &= ~1UL;
+ vcpu_set_thumb(vcpu);
+ }
+
+ /* Propagate caller endianness */
+ if (vcpu->arch.reset_state.be)
+ kvm_vcpu_set_be(vcpu);
+
+ *vcpu_pc(vcpu) = target_pc;
+ vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+
+ vcpu->arch.reset_state.reset = false;
+ }
+
/* Reset arch_timer context */
return kvm_timer_vcpu_reset(vcpu);
}