summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2020-04-16 11:58:59 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2020-05-15 12:26:13 -0400
commitdd03bcaad0b1a62c8ea6297e6f2a5993c1c5cd30 (patch)
tree58a3a8e50c197261712183bccff0da1c6623303f /arch
parentc16312f4fa75e786f5748a7b721a05e6b0d761fe (diff)
downloadlwn-dd03bcaad0b1a62c8ea6297e6f2a5993c1c5cd30.tar.gz
lwn-dd03bcaad0b1a62c8ea6297e6f2a5993c1c5cd30.zip
KVM: X86: Force ASYNC_PF_PER_VCPU to be power of two
Forcing the ASYNC_PF_PER_VCPU to be power of two is much easier to be used rather than calling roundup_pow_of_two() from time to time. Do this by adding a BUILD_BUG_ON() inside the hash function. Another point is that generally async pf does not allow concurrency over ASYNC_PF_PER_VCPU after all (see kvm_setup_async_pf()), so it does not make much sense either to have it not a power of two or some of the entries will definitely be wasted. Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20200416155859.267366-1-peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/x86.c8
2 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b8d035694b87..04f44961858b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -763,7 +763,7 @@ struct kvm_vcpu_arch {
struct {
bool halted;
- gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
+ gfn_t gfns[ASYNC_PF_PER_VCPU];
struct gfn_to_hva_cache data;
u64 msr_val;
u32 id;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a52ef81fb87a..8e28f21fb39b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -257,7 +257,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
int i;
- for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
+ for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
vcpu->arch.apf.gfns[i] = ~0;
}
@@ -10310,12 +10310,14 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
+ BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
+
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
}
static inline u32 kvm_async_pf_next_probe(u32 key)
{
- return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
+ return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
}
static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -10333,7 +10335,7 @@ static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
int i;
u32 key = kvm_async_pf_hash_fn(gfn);
- for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
+ for (i = 0; i < ASYNC_PF_PER_VCPU &&
(vcpu->arch.apf.gfns[key] != gfn &&
vcpu->arch.apf.gfns[key] != ~0); i++)
key = kvm_async_pf_next_probe(key);