summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-11-23 19:51:58 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-11-24 21:08:58 +1100
commit2251fbe76395e4d89c31099984714c5f1135f052 (patch)
treec1971c38297e1a4f0672386bf59ff7b133d5bf1e /arch/powerpc/kvm/book3s_hv.c
parent34e119c96b2b381278d1ddf6b1708678462daba4 (diff)
downloadlwn-2251fbe76395e4d89c31099984714c5f1135f052.tar.gz
lwn-2251fbe76395e4d89c31099984714c5f1135f052.zip
KVM: PPC: Book3S HV P9: Improve mtmsrd scheduling by delaying MSR[EE] disable
Moving the mtmsrd after the host SPRs are saved and before the guest SPRs start to be loaded can prevent an SPR scoreboard stall (because the mtmsrd is L=1 type which does not cause context synchronisation. This is also now more convenient to combined with the mtmsrd L=0 instruction to enable facilities just below, but that is not done yet. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-21-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 541a023e25dd..6b0689589e13 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4169,6 +4169,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
save_p9_host_os_sprs(&host_os_sprs);
+ /*
+ * This could be combined with MSR[RI] clearing, but that expands
+ * the unrecoverable window. It would be better to cover unrecoverable
+ * with KVM bad interrupt handling rather than use MSR[RI] at all.
+ *
+ * Much more difficult and less worthwhile to combine with IR/DR
+ * disable.
+ */
+ hard_irq_disable();
+ if (lazy_irq_pending())
+ return 0;
+
/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
@@ -4680,6 +4692,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
+ unsigned long flags;
trace_kvmppc_run_vcpu_enter(vcpu);
@@ -4723,11 +4736,11 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (kvm_is_radix(kvm))
kvmppc_prepare_radix_vcpu(vcpu, pcpu);
- local_irq_disable();
- hard_irq_disable();
+ /* flags save not required, but irq_pmu has no disable/enable API */
+ powerpc_local_irq_pmu_save(flags);
if (signal_pending(current))
goto sigpend;
- if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
+ if (need_resched() || !kvm->arch.mmu_ready)
goto out;
if (!nested) {
@@ -4795,7 +4808,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
}
vtime_account_guest_exit();
- local_irq_enable();
+ powerpc_local_irq_pmu_restore(flags);
cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
@@ -4853,7 +4866,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
out:
- local_irq_enable();
+ powerpc_local_irq_pmu_restore(flags);
preempt_enable();
goto done;
}