summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Hankland <ehankland@google.com>2019-07-18 11:38:18 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2019-07-20 09:00:48 +0200
commit30cd8604323dbaf20a80e797fe7057f5b02e394d (patch)
tree53a0ecd235eb3805293d3be074d72f9ecf57403f
parent88dddc11a8d6b09201b4db9d255b3394d9bc9e57 (diff)
downloadlwn-30cd8604323dbaf20a80e797fe7057f5b02e394d.tar.gz
lwn-30cd8604323dbaf20a80e797fe7057f5b02e394d.zip
KVM: x86: Add fixed counters to PMU filter
Updates KVM_CAP_PMU_EVENT_FILTER so it can also whitelist or blacklist fixed counters. Signed-off-by: Eric Hankland <ehankland@google.com> [No need to check padding fields for zero. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--Documentation/virtual/kvm/api.txt15
-rw-r--r--arch/x86/include/uapi/asm/kvm.h9
-rw-r--r--arch/x86/kvm/pmu.c23
3 files changed, 35 insertions, 12 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 2cd6250b2896..e54a3f51ddc5 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4090,17 +4090,22 @@ Parameters: struct kvm_pmu_event_filter (in)
Returns: 0 on success, -1 on error
struct kvm_pmu_event_filter {
- __u32 action;
- __u32 nevents;
- __u64 events[0];
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[0];
};
This ioctl restricts the set of PMU events that the guest can program.
The argument holds a list of events which will be allowed or denied.
The eventsel+umask of each event the guest attempts to program is compared
against the events field to determine whether the guest should have access.
-This only affects general purpose counters; fixed purpose counters can
-be disabled by changing the perfmon CPUID leaf.
+The events field only controls general purpose counters; fixed purpose
+counters are controlled by the fixed_counter_bitmap.
+
+No flags are defined yet, the field must be zero.
Valid values for 'action':
#define KVM_PMU_EVENT_ALLOW 0
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index e901b0ab116f..503d3f42da16 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -435,9 +435,12 @@ struct kvm_nested_state {
/* for KVM_CAP_PMU_EVENT_FILTER */
struct kvm_pmu_event_filter {
- __u32 action;
- __u32 nevents;
- __u64 events[0];
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[0];
};
#define KVM_PMU_EVENT_ALLOW 0
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index cedaa01ceb6f..46875bbd0419 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -19,8 +19,8 @@
#include "lapic.h"
#include "pmu.h"
-/* This keeps the total size of the filter under 4k. */
-#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 63
+/* This is enough to filter the vast majority of currently defined events. */
+#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
/* NOTE:
* - Each perf counter is defined as "struct kvm_pmc";
@@ -206,12 +206,24 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
{
unsigned en_field = ctrl & 0x3;
bool pmi = ctrl & 0x8;
+ struct kvm_pmu_event_filter *filter;
+ struct kvm *kvm = pmc->vcpu->kvm;
pmc_stop_counter(pmc);
if (!en_field || !pmc_is_enabled(pmc))
return;
+ filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
+ if (filter) {
+ if (filter->action == KVM_PMU_EVENT_DENY &&
+ test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
+ return;
+ if (filter->action == KVM_PMU_EVENT_ALLOW &&
+ !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
+ return;
+ }
+
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
kvm_x86_ops->pmu_ops->find_fixed_event(idx),
!(en_field & 0x2), /* exclude user */
@@ -385,6 +397,9 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
tmp.action != KVM_PMU_EVENT_DENY)
return -EINVAL;
+ if (tmp.flags != 0)
+ return -EINVAL;
+
if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
return -E2BIG;
@@ -406,8 +421,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
mutex_unlock(&kvm->lock);
synchronize_srcu_expedited(&kvm->srcu);
- r = 0;
+ r = 0;
cleanup:
kfree(filter);
- return r;
+ return r;
}