summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorWanpeng Li <wanpeng.li@hotmail.com>2015-09-03 22:07:38 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2015-09-06 16:32:43 +0200
commitaca6ff29c4063a8d467cdee241e6b3bf7dc4a171 (patch)
treee006ea14ead8ae95add04f135dd332bdcbea4bdf /virt
parent19020f8ab83de9dc5a9c8af1f321a526f38bbc40 (diff)
downloadlwn-aca6ff29c4063a8d467cdee241e6b3bf7dc4a171.tar.gz
lwn-aca6ff29c4063a8d467cdee241e6b3bf7dc4a171.zip
KVM: dynamic halt-polling
There is a downside of always-poll since poll is still happened for idle vCPUs which can waste cpu usage. This patchset add the ability to adjust halt_poll_ns dynamically, to grow halt_poll_ns when shot halt is detected, and to shrink halt_poll_ns when long halt is detected. There are two new kernel parameters for changing the halt_poll_ns: halt_poll_ns_grow and halt_poll_ns_shrink. no-poll always-poll dynamic-poll ----------------------------------------------------------------------- Idle (nohz) vCPU %c0 0.15% 0.3% 0.2% Idle (250HZ) vCPU %c0 1.1% 4.6%~14% 1.2% TCP_RR latency 34us 27us 26.7us "Idle (X) vCPU %c0" is the percent of time the physical cpu spent in c0 over 60 seconds (each vCPU is pinned to a pCPU). (nohz) means the guest was tickless. (250HZ) means the guest was ticking at 250HZ. The big win is with ticking operating systems. Running the linux guest with nohz=off (and HZ=250), we save 3.4%~12.8% CPUs/second and get close to no-polling overhead levels by using the dynamic-poll. The savings should be even higher for higher frequency ticks. Suggested-by: David Matlack <dmatlack@google.com> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> [Simplify the patch. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c53
1 files changed, 51 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c06e57cd1269..8ab49cfc024e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,9 +66,18 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static unsigned int halt_poll_ns;
+/* halt polling only reduces halt latency by 5-7 us, 500us is enough */
+static unsigned int halt_poll_ns = 500000;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+/* Default doubles per-vcpu halt_poll_ns. */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+
+/* Default resets per-vcpu halt_poll_ns . */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+
/*
* Ordering of locks:
*
@@ -1907,6 +1916,31 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int val = vcpu->halt_poll_ns;
+
+ /* 10us base */
+ if (val == 0 && halt_poll_ns_grow)
+ val = 10000;
+ else
+ val *= halt_poll_ns_grow;
+
+ vcpu->halt_poll_ns = val;
+}
+
+static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int val = vcpu->halt_poll_ns;
+
+ if (halt_poll_ns_shrink == 0)
+ val = 0;
+ else
+ val /= halt_poll_ns_shrink;
+
+ vcpu->halt_poll_ns = val;
+}
+
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
if (kvm_arch_vcpu_runnable(vcpu)) {
@@ -1929,6 +1963,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
ktime_t start, cur;
DEFINE_WAIT(wait);
bool waited = false;
+ u64 block_ns;
start = cur = ktime_get();
if (vcpu->halt_poll_ns) {
@@ -1961,7 +1996,21 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
cur = ktime_get();
out:
- trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
+ block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+ if (halt_poll_ns) {
+ if (block_ns <= vcpu->halt_poll_ns)
+ ;
+ /* we had a long block, shrink polling */
+ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ shrink_halt_poll_ns(vcpu);
+ /* we had a short halt and our poll time is too small */
+ else if (vcpu->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
+ grow_halt_poll_ns(vcpu);
+ }
+
+ trace_kvm_vcpu_wakeup(block_ns, waited);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);