summaryrefslogtreecommitdiff
path: root/virt/kvm/async_pf.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-11-02 17:35:35 +0800
committerAvi Kivity <avi@redhat.com>2011-01-12 11:29:01 +0200
commit15096ffceabb9693306982127348890886384aaa (patch)
tree10eb51dd343830a56783a1b364f775b0ed28afd2 /virt/kvm/async_pf.c
parente6d53e3b0db7ae3641f01a2b2af1470fda86d10c (diff)
downloadlwn-15096ffceabb9693306982127348890886384aaa.tar.gz
lwn-15096ffceabb9693306982127348890886384aaa.zip
KVM: handle more completed apfs if possible
If it's no need to inject async #PF to PV guest we can handle more completed apfs at one time, so we can retry guest #PF as early as possible Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Acked-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm/async_pf.c')
-rw-r--r--virt/kvm/async_pf.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 60df9e059e69..100c66ee0220 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -124,24 +124,24 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
{
struct kvm_async_pf *work;
- if (list_empty_careful(&vcpu->async_pf.done) ||
- !kvm_arch_can_inject_async_page_present(vcpu))
- return;
-
- spin_lock(&vcpu->async_pf.lock);
- work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link);
- list_del(&work->link);
- spin_unlock(&vcpu->async_pf.lock);
+ while (!list_empty_careful(&vcpu->async_pf.done) &&
+ kvm_arch_can_inject_async_page_present(vcpu)) {
+ spin_lock(&vcpu->async_pf.lock);
+ work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+ link);
+ list_del(&work->link);
+ spin_unlock(&vcpu->async_pf.lock);
- if (work->page)
- kvm_arch_async_page_ready(vcpu, work);
- kvm_arch_async_page_present(vcpu, work);
+ if (work->page)
+ kvm_arch_async_page_ready(vcpu, work);
+ kvm_arch_async_page_present(vcpu, work);
- list_del(&work->queue);
- vcpu->async_pf.queued--;
- if (work->page)
- put_page(work->page);
- kmem_cache_free(async_pf_cache, work);
+ list_del(&work->queue);
+ vcpu->async_pf.queued--;
+ if (work->page)
+ put_page(work->page);
+ kmem_cache_free(async_pf_cache, work);
+ }
}
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,