summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2014-08-05 16:44:14 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2014-12-04 15:29:13 +0100
commit7a72f7a140bfd3a5dae73088947010bfdbcf6a40 (patch)
treebb6f945f4460a39e7d0e6b242ecf656508891c27 /virt
parenteed6e79d7362335967def243cc891179a9c59892 (diff)
downloadlwn-7a72f7a140bfd3a5dae73088947010bfdbcf6a40.tar.gz
lwn-7a72f7a140bfd3a5dae73088947010bfdbcf6a40.zip
KVM: track pid for VCPU only on KVM_RUN ioctl
We currently track the pid of the task that runs the VCPU in vcpu_load. If a yield to that VCPU is triggered while the PID of the wrong thread is active, the wrong thread might receive a yield, but this will most likely not help the executing thread at all. Instead, if we only track the pid on the KVM_RUN ioctl, there are two possibilities: 1) the thread that did a non-KVM_RUN ioctl is holding a mutex that the VCPU thread is waiting for. In this case, the VCPU thread is not runnable, but we also do not do a wrong yield. 2) the thread that did a non-KVM_RUN ioctl is sleeping, or doing something that does not block the VCPU thread. In this case, the VCPU thread can receive the directed yield correctly. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> CC: Rik van Riel <riel@redhat.com> CC: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> CC: Michael Mueller <mimu@linux.vnet.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2ffee3018a3d..c5c186af823b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -124,15 +124,6 @@ int vcpu_load(struct kvm_vcpu *vcpu)
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
- if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
- /* The thread running this VCPU changed. */
- struct pid *oldpid = vcpu->pid;
- struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
- rcu_assign_pointer(vcpu->pid, newpid);
- if (oldpid)
- synchronize_rcu();
- put_pid(oldpid);
- }
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
@@ -2050,6 +2041,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
+ if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ /* The thread running this VCPU changed. */
+ struct pid *oldpid = vcpu->pid;
+ struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+ rcu_assign_pointer(vcpu->pid, newpid);
+ if (oldpid)
+ synchronize_rcu();
+ put_pid(oldpid);
+ }
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;