summaryrefslogtreecommitdiff
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-06-07 19:18:30 +0300
committerAvi Kivity <avi@qumranet.com>2007-07-16 12:05:46 +0300
commitd9e368d61263055eceac2966bb7ea31b89da3425 (patch)
tree9d507b851ea7bd667cdd50dde640e47e0d4773e9 /drivers/kvm/kvm_main.c
parent39c3b86e5c193e09f69f0e99c93600a4999ffc60 (diff)
downloadlwn-d9e368d61263055eceac2966bb7ea31b89da3425.tar.gz
lwn-d9e368d61263055eceac2966bb7ea31b89da3425.zip
KVM: Flush remote tlbs when reducing shadow pte permissions
When a vcpu causes a shadow tlb entry to have reduced permissions, it must also clear the tlb on remote vcpus. We do that by: - setting a bit on the vcpu that requests a tlb flush before the next entry - if the vcpu is currently executing, we send an ipi to make sure it exits before we continue Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4e1a017f3db7..633c2eded08d 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -41,6 +41,8 @@
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
#include "x86_emulate.h"
#include "segment_descriptor.h"
@@ -309,6 +311,48 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
mutex_unlock(&vcpu->mutex);
}
+static void ack_flush(void *_completed)
+{
+ atomic_t *completed = _completed;
+
+ atomic_inc(completed);
+}
+
+void kvm_flush_remote_tlbs(struct kvm *kvm)
+{
+ int i, cpu, needed;
+ cpumask_t cpus;
+ struct kvm_vcpu *vcpu;
+ atomic_t completed;
+
+ atomic_set(&completed, 0);
+ cpus_clear(cpus);
+ needed = 0;
+ for (i = 0; i < kvm->nvcpus; ++i) {
+ vcpu = &kvm->vcpus[i];
+ if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ continue;
+ cpu = vcpu->cpu;
+ if (cpu != -1 && cpu != raw_smp_processor_id())
+ if (!cpu_isset(cpu, cpus)) {
+ cpu_set(cpu, cpus);
+ ++needed;
+ }
+ }
+
+ /*
+ * We really want smp_call_function_mask() here. But that's not
+ * available, so ipi all cpus in parallel and wait for them
+ * to complete.
+ */
+ for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
+ smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
+ while (atomic_read(&completed) != needed) {
+ cpu_relax();
+ barrier();
+ }
+}
+
static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);