diff options
author | Juergen Gross <jgross@suse.com> | 2018-08-28 09:40:23 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-09-03 16:50:36 +0200 |
commit | 9bad5658ea710f45e4ee68b88a01cfe1839d8b00 (patch) | |
tree | 463c646368602c2fdc4c66bf0dab8cc6af97514d /arch/x86/kernel | |
parent | 40181646db45fb72f46563a2f3b792adc5380710 (diff) | |
download | lwn-9bad5658ea710f45e4ee68b88a01cfe1839d8b00.tar.gz lwn-9bad5658ea710f45e4ee68b88a01cfe1839d8b00.zip |
x86/paravirt: Move the Xen-only pv_cpu_ops under the PARAVIRT_XXL umbrella
Most of the paravirt ops defined in pv_cpu_ops are for Xen PV guests
only. Define them only if CONFIG_PARAVIRT_XXL is set.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: xen-devel@lists.xenproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: akataria@vmware.com
Cc: rusty@rustcorp.com.au
Cc: boris.ostrovsky@oracle.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180828074026.820-13-jgross@suse.com
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_64.c | 6 |
7 files changed, 28 insertions, 3 deletions
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 37e323f3d8c9..0fe233d98d17 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -68,7 +68,9 @@ void common(void) { BLANK(); OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); +#ifdef CONFIG_PARAVIRT_XXL OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); +#endif OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 3384b03e717f..2a15d420a84d 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -21,9 +21,11 @@ static char syscalls_ia32[] = { int main(void) { #ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, cpu.usergs_sysret64); OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); +#endif #ifdef CONFIG_DEBUG_ENTRY OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c4da9c23a96c..d45cd5e0fb11 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1240,7 +1240,7 @@ static void generic_identify(struct cpuinfo_x86 *c) * ESPFIX issue, we can change this. */ #ifdef CONFIG_X86_32 -# ifdef CONFIG_PARAVIRT +# ifdef CONFIG_PARAVIRT_XXL do { extern void native_iret(void); if (pv_ops.cpu.iret == native_iret) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 15ebc2fc166e..a5bd72a0ee1a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -31,6 +31,8 @@ #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #else #define GET_CR2_INTO(reg) movq %cr2, reg +#endif +#ifndef CONFIG_PARAVIRT_XXL #define INTERRUPT_RETURN iretq #endif diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ce0e28506942..1d5f40cc872a 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -101,6 +101,7 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target, return 5; } +#ifdef CONFIG_PARAVIRT_XXL static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, unsigned long addr, unsigned len) { @@ -119,6 +120,7 @@ static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, return 5; } +#endif DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); @@ -150,10 +152,12 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); +#ifdef CONFIG_PARAVIRT_XXL else if (type == PARAVIRT_PATCH(cpu.iret) || type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); +#endif else /* Otherwise call the function. */ ret = paravirt_patch_call(insnbuf, opfunc, addr, len); @@ -262,6 +266,7 @@ void paravirt_flush_lazy_mmu(void) preempt_enable(); } +#ifdef CONFIG_PARAVIRT_XXL void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -282,6 +287,7 @@ void paravirt_end_context_switch(struct task_struct *next) if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) arch_enter_lazy_mmu_mode(); } +#endif enum paravirt_lazy_mode paravirt_get_lazy_mode(void) { @@ -320,6 +326,9 @@ struct paravirt_patch_template pv_ops = { .time.steal_clock = native_steal_clock, /* Cpu ops. */ + .cpu.io_delay = native_io_delay, + +#ifdef CONFIG_PARAVIRT_XXL .cpu.cpuid = native_cpuid, .cpu.get_debugreg = native_get_debugreg, .cpu.set_debugreg = native_set_debugreg, @@ -361,10 +370,10 @@ struct paravirt_patch_template pv_ops = { .cpu.swapgs = native_swapgs, .cpu.set_iopl_mask = native_set_iopl_mask, - .cpu.io_delay = native_io_delay, .cpu.start_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop, +#endif /* CONFIG_PARAVIRT_XXL */ /* Irq ops. */ .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), @@ -464,10 +473,12 @@ struct paravirt_patch_template pv_ops = { #endif }; +#ifdef CONFIG_PARAVIRT_XXL /* At this point, native_get/set_debugreg has real function entries */ NOKPROBE_SYMBOL(native_get_debugreg); NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_load_idt); +#endif EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_info); diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 026fa43e9261..5a20aa56efc0 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -5,7 +5,9 @@ DEF_NATIVE(irq, irq_disable, "cli"); DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, restore_fl, "push %eax; popf"); DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); +#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(cpu, iret, "iret"); +#endif DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); @@ -45,7 +47,9 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, save_fl); +#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(cpu, iret); +#endif PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 582e893728e8..461aba038ada 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -10,10 +10,12 @@ DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); +#ifdef CONFIG_PARAVIRT_XXL DEF_NATIVE(cpu, wbinvd, "wbinvd"); DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(cpu, swapgs, "swapgs"); +#endif DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); @@ -53,12 +55,14 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) PATCH_SITE(irq, save_fl); PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, irq_disable); +#ifdef CONFIG_PARAVIRT_XXL PATCH_SITE(cpu, usergs_sysret64); PATCH_SITE(cpu, swapgs); + PATCH_SITE(cpu, wbinvd); +#endif PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, write_cr3); - PATCH_SITE(cpu, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(lock.queued_spin_unlock): if (pv_is_native_spin_unlock()) { |