diff options
author | Sean Christopherson <seanjc@google.com> | 2022-06-11 00:57:52 +0000 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-06-20 11:49:46 -0400 |
commit | 545feb96c052809dab5ec04b95f976acca9f9364 (patch) | |
tree | 5fa98bf0e6b77569ded69b8f528c3b2b44213e9f /arch/x86/kvm/vmx/pmu_intel.c | |
parent | 5d4283df5a0fc8299fba9443c33d219939eccc2d (diff) | |
download | lwn-545feb96c052809dab5ec04b95f976acca9f9364.tar.gz lwn-545feb96c052809dab5ec04b95f976acca9f9364.zip |
Revert "KVM: x86: always allow host-initiated writes to PMU MSRs"
Revert the hack to allow host-initiated accesses to all "PMU" MSRs,
as intel_is_valid_msr() returns true for _all_ MSRs, regardless of whether
or not it has a snowball's chance in hell of actually being a PMU MSR.
That mostly gets papered over by the actual get/set helpers only handling
MSRs that they knows about, except there's the minor detail that
kvm_pmu_{g,s}et_msr() eat reads and writes when the PMU is disabled.
I.e. KVM will happy allow reads and writes to _any_ MSR if the PMU is
disabled, either via module param or capability.
This reverts commit d1c88a4020567ba4da52f778bcd9619d87e4ea75.
Fixes: d1c88a402056 ("KVM: x86: always allow host-initiated writes to PMU MSRs")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220611005755.753273-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/pmu_intel.c')
-rw-r--r-- | arch/x86/kvm/vmx/pmu_intel.c | 27 |
1 files changed, 10 insertions, 17 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index b62012766226..b1aae60cf061 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -196,45 +196,38 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) return ret; } -static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated) +static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); u64 perf_capabilities = vcpu->arch.perf_capabilities; + int ret; switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_GLOBAL_STATUS: case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_OVF_CTRL: - if (host_initiated) - return true; - return pmu->version > 1; + ret = pmu->version > 1; break; case MSR_IA32_PEBS_ENABLE: - if (host_initiated) - return true; - return perf_capabilities & PERF_CAP_PEBS_FORMAT; + ret = perf_capabilities & PERF_CAP_PEBS_FORMAT; break; case MSR_IA32_DS_AREA: - if (host_initiated) - return true; - return guest_cpuid_has(vcpu, X86_FEATURE_DS); + ret = guest_cpuid_has(vcpu, X86_FEATURE_DS); break; case MSR_PEBS_DATA_CFG: - if (host_initiated) - return true; - return (perf_capabilities & PERF_CAP_PEBS_BASELINE) && + ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) && ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3); break; default: - if (host_initiated) - return true; - return get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || + ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || intel_pmu_is_valid_lbr_msr(vcpu, msr); break; } + + return ret; } static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) @@ -596,7 +589,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); nested_vmx_pmu_refresh(vcpu, - intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, false)); + intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)); if (cpuid_model_is_consistent(vcpu)) x86_perf_get_lbr(&lbr_desc->records); |