From b264f57fde0c686c5c1dfdd0c21992c49196bb87 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 27 Oct 2019 16:19:38 +0100 Subject: x86/hyperv: Micro-optimize send_ipi_one() When sending an IPI to a single CPU there is no need to deal with cpumasks. With 2 CPU guest on WS2019 a minor (like 3%, 8043 -> 7761 CPU cycles) improvement with smp_call_function_single() loop benchmark can be seeb. The optimization, however, is tiny and straitforward. Also, send_ipi_one() is important for PV spinlock kick. Switching to the regular APIC IPI send for CPU > 64 case does not make sense as it is twice as expesive (12650 CPU cycles for __send_ipi_mask_ex() call, 26000 for orig_apic.send_IPI(cpu, vector)). Signed-off-by: Vitaly Kuznetsov Signed-off-by: Thomas Gleixner Reviewed-by: Michael Kelley Reviewed-by: Roman Kagan Link: https://lkml.kernel.org/r/20191027151938.7296-1-vkuznets@redhat.com --- arch/x86/hyperv/hv_apic.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'arch/x86/hyperv') diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 5c056b8aebef..86c8674e92f4 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -194,10 +194,20 @@ do_ex_hypercall: static bool __send_ipi_one(int cpu, int vector) { - struct cpumask mask = CPU_MASK_NONE; + int vp = hv_cpu_number_to_vp_number(cpu); - cpumask_set_cpu(cpu, &mask); - return __send_ipi_mask(&mask, vector); + trace_hyperv_send_ipi_one(cpu, vector); + + if (!hv_hypercall_pg || (vp == VP_INVAL)) + return false; + + if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) + return false; + + if (vp >= 64) + return __send_ipi_mask_ex(cpumask_of(cpu), vector); + + return !hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); } static void hv_send_ipi(int cpu, int vector) -- cgit v1.2.3