diff options
author | H. Peter Anvin <hpa@zytor.com> | 2008-01-30 13:31:02 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:31:02 +0100 |
commit | faca62273b602ab482fb7d3d940dbf41ef08b00e (patch) | |
tree | 913fb1c565a2b719b00ae4b745c38cc9b0ebf279 | |
parent | 25149b62d3e6a3e737af39bd4a0b4e97de0811b7 (diff) | |
download | lwn-faca62273b602ab482fb7d3d940dbf41ef08b00e.tar.gz lwn-faca62273b602ab482fb7d3d940dbf41ef08b00e.zip |
x86: use generic register name in the thread and tss structures
This changes size-specific register names (eip/rip, esp/rsp, etc.) to
generic names in the thread and tss structures.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/asm-offsets_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/doublefault_32.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 6 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/traps_32.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/traps_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/vm86_32.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 10 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 6 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 4 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 4 | ||||
-rw-r--r-- | include/asm-x86/paravirt.h | 6 | ||||
-rw-r--r-- | include/asm-x86/processor_32.h | 37 | ||||
-rw-r--r-- | include/asm-x86/processor_64.h | 20 | ||||
-rw-r--r-- | include/asm-x86/system_32.h | 4 | ||||
-rw-r--r-- | include/asm-x86/system_64.h | 2 |
23 files changed, 99 insertions, 101 deletions
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 4fc24a61f431..415313556708 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -101,8 +101,8 @@ void foo(void) OFFSET(pbe_orig_address, pbe, orig_address); OFFSET(pbe_next, pbe, next); - /* Offset from the sysenter stack to tss.esp0 */ - DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) - + /* Offset from the sysenter stack to tss.sp0 */ + DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - sizeof(struct tss_struct)); DEFINE(PAGE_SIZE_asm, PAGE_SIZE); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 5db2a163bf4b..235cd615b89d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -691,7 +691,7 @@ void __cpuinit cpu_init(void) BUG(); enter_lazy_tlb(&init_mm, curr); - load_esp0(t, thread); + load_sp0(t, thread); set_tss_desc(cpu,t); load_TR_desc(); load_LDT(&init_mm.context); diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index 40978af630e7..cc19a3ea403a 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c @@ -35,12 +35,13 @@ static void doublefault_fn(void) if (ptr_ok(tss)) { struct i386_hw_tss *t = (struct i386_hw_tss *)tss; - printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp); + printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", + t->ip, t->sp); printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", - t->eax, t->ebx, t->ecx, t->edx); + t->ax, t->bx, t->cx, t->dx); printk(KERN_EMERG "esi = %08lx, edi = %08lx\n", - t->esi, t->edi); + t->si, t->di); } } @@ -50,15 +51,15 @@ static void doublefault_fn(void) struct tss_struct doublefault_tss __cacheline_aligned = { .x86_tss = { - .esp0 = STACK_START, + .sp0 = STACK_START, .ss0 = __KERNEL_DS, .ldt = 0, .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, - .eip = (unsigned long) doublefault_fn, + .ip = (unsigned long) doublefault_fn, /* 0x2 bit is always set */ - .eflags = X86_EFLAGS_SF | 0x2, - .esp = STACK_START, + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, .es = __USER_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 153bb87a4eea..6a474e1028c7 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -288,7 +288,7 @@ ENTRY(ia32_sysenter_target) CFI_SIGNAL_FRAME CFI_DEF_CFA esp, 0 CFI_REGISTER esp, ebp - movl TSS_sysenter_esp0(%esp),%esp + movl TSS_sysenter_sp0(%esp),%esp sysenter_past_esp: /* * No need to follow this irqs on/off section: the syscall @@ -743,7 +743,7 @@ END(device_not_available) * that sets up the real kernel stack. Check here, since we can't * allow the wrong stack to be used. * - * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have + * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have * already pushed 3 words if it hits on the sysenter instruction: * eflags, cs and eip. * @@ -755,7 +755,7 @@ END(device_not_available) cmpw $__KERNEL_CS,4(%esp); \ jne ok; \ label: \ - movl TSS_sysenter_esp0+offset(%esp),%esp; \ + movl TSS_sysenter_sp0+offset(%esp),%esp; \ CFI_DEF_CFA esp, 0; \ CFI_UNDEFINED eip; \ pushfl; \ diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c index 706b0562ea40..f4e3a8e01cf2 100644 --- a/arch/x86/kernel/paravirt_32.c +++ b/arch/x86/kernel/paravirt_32.c @@ -382,7 +382,7 @@ struct pv_cpu_ops pv_cpu_ops = { .write_ldt_entry = write_dt_entry, .write_gdt_entry = write_dt_entry, .write_idt_entry = write_dt_entry, - .load_esp0 = native_load_esp0, + .load_sp0 = native_load_sp0, .irq_enable_syscall_ret = native_irq_enable_syscall_ret, .iret = native_iret, diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3744cf63682c..add3bf34e205 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -75,7 +75,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number); */ unsigned long thread_saved_pc(struct task_struct *tsk) { - return ((unsigned long *)tsk->thread.esp)[3]; + return ((unsigned long *)tsk->thread.sp)[3]; } /* @@ -488,10 +488,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, childregs->ax = 0; childregs->sp = sp; - p->thread.esp = (unsigned long) childregs; - p->thread.esp0 = (unsigned long) (childregs+1); + p->thread.sp = (unsigned long) childregs; + p->thread.sp0 = (unsigned long) (childregs+1); - p->thread.eip = (unsigned long) ret_from_fork; + p->thread.ip = (unsigned long) ret_from_fork; savesegment(gs,p->thread.gs); @@ -718,7 +718,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas /* * Reload esp0. */ - load_esp0(tss, next); + load_sp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the @@ -851,7 +851,7 @@ unsigned long get_wchan(struct task_struct *p) if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)task_stack_page(p); - sp = p->thread.esp; + sp = p->thread.sp; if (!stack_page || sp < stack_page || sp > top_esp+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes bp last. */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index efbb1a2eab97..238193822e23 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -493,9 +493,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, if (sp == ~0UL) childregs->sp = (unsigned long)childregs; - p->thread.rsp = (unsigned long) childregs; - p->thread.rsp0 = (unsigned long) (childregs+1); - p->thread.userrsp = me->thread.userrsp; + p->thread.sp = (unsigned long) childregs; + p->thread.sp0 = (unsigned long) (childregs+1); + p->thread.usersp = me->thread.usersp; set_tsk_thread_flag(p, TIF_FORK); @@ -607,7 +607,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0, LDT and the page table pointer: */ - tss->rsp0 = next->rsp0; + tss->sp0 = next->sp0; /* * Switch DS and ES. @@ -666,8 +666,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Switch the PDA and FPU contexts. */ - prev->userrsp = read_pda(oldrsp); - write_pda(oldrsp, next->userrsp); + prev->usersp = read_pda(oldrsp); + write_pda(oldrsp, next->usersp); write_pda(pcurrent, next_p); write_pda(kernelstack, @@ -769,9 +769,9 @@ unsigned long get_wchan(struct task_struct *p) if (!p || p == current || p->state==TASK_RUNNING) return 0; stack = (unsigned long)task_stack_page(p); - if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE) + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) return 0; - fp = *(u64 *)(p->thread.rsp); + fp = *(u64 *)(p->thread.sp); do { if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 3566191832b3..0f294d6e22cf 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -454,7 +454,7 @@ void __devinit initialize_secondary(void) "movl %0,%%esp\n\t" "jmp *%1" : - :"m" (current->thread.esp),"m" (current->thread.eip)); + :"m" (current->thread.sp),"m" (current->thread.ip)); } /* Static state in head.S used to set up a CPU */ @@ -753,7 +753,7 @@ static inline struct task_struct * __cpuinit alloc_idle_task(int cpu) /* initialize thread_struct. we really want to avoid destroy * idle tread */ - idle->thread.esp = (unsigned long)task_pt_regs(idle); + idle->thread.sp = (unsigned long)task_pt_regs(idle); init_idle(idle, cpu); return idle; } @@ -798,7 +798,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) per_cpu(current_task, cpu) = idle; early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); - idle->thread.eip = (unsigned long) start_secondary; + idle->thread.ip = (unsigned long) start_secondary; /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); @@ -808,7 +808,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* So we see what's up */ printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); /* Stack for startup_32 can be just as for start_secondary onwards */ - stack_start.sp = (void *) idle->thread.esp; + stack_start.sp = (void *) idle->thread.sp; irq_ctx_init(cpu); diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index ac1089f2b917..c3f2736ba530 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -577,7 +577,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { - c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *) + c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); init_idle(c_idle.idle, cpu); goto do_rest; @@ -613,8 +613,8 @@ do_rest: start_rip = setup_trampoline(); - init_rsp = c_idle.idle->thread.rsp; - per_cpu(init_tss,cpu).rsp0 = init_rsp; + init_rsp = c_idle.idle->thread.sp; + per_cpu(init_tss,cpu).sp0 = init_rsp; initial_code = start_secondary; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 27713553cc59..57491942cc4e 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -163,7 +163,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long dummy; stack = &dummy; if (task != current) - stack = (unsigned long *)task->thread.esp; + stack = (unsigned long *)task->thread.sp; } #ifdef CONFIG_FRAME_POINTER @@ -173,7 +173,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, asm ("movl %%ebp, %0" : "=r" (bp) : ); } else { /* bp is the last reg pushed by switch_to */ - bp = *(unsigned long *) task->thread.esp; + bp = *(unsigned long *) task->thread.sp; } } #endif @@ -253,7 +253,7 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, if (sp == NULL) { if (task) - sp = (unsigned long*)task->thread.esp; + sp = (unsigned long*)task->thread.sp; else sp = (unsigned long *)&sp; } diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index f7fecf9d47c3..965f2cc3a013 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -230,7 +230,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long dummy; stack = &dummy; if (tsk && tsk != current) - stack = (unsigned long *)tsk->thread.rsp; + stack = (unsigned long *)tsk->thread.sp; } /* @@ -366,7 +366,7 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp) if (sp == NULL) { if (tsk) - sp = (unsigned long *)tsk->thread.rsp; + sp = (unsigned long *)tsk->thread.sp; else sp = (unsigned long *)&sp; } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 980e85b90091..e85bb44265cb 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -147,10 +147,10 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) } tss = &per_cpu(init_tss, get_cpu()); - current->thread.esp0 = current->thread.saved_esp0; + current->thread.sp0 = current->thread.saved_sp0; current->thread.sysenter_cs = __KERNEL_CS; - load_esp0(tss, ¤t->thread); - current->thread.saved_esp0 = 0; + load_sp0(tss, ¤t->thread); + current->thread.saved_sp0 = 0; put_cpu(); ret = KVM86->regs32; @@ -207,7 +207,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs) int tmp, ret = -EPERM; tsk = current; - if (tsk->thread.saved_esp0) + if (tsk->thread.saved_sp0) goto out; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, vm86plus) - @@ -256,7 +256,7 @@ asmlinkage int sys_vm86(struct pt_regs regs) /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ ret = -EPERM; - if (tsk->thread.saved_esp0) + if (tsk->thread.saved_sp0) goto out; v86 = (struct vm86plus_struct __user *)regs.cx; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, @@ -318,15 +318,15 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk * Save old state, set default return value (%ax) to 0 */ info->regs32->ax = 0; - tsk->thread.saved_esp0 = tsk->thread.esp0; + tsk->thread.saved_sp0 = tsk->thread.sp0; tsk->thread.saved_fs = info->regs32->fs; savesegment(gs, tsk->thread.saved_gs); tss = &per_cpu(init_tss, get_cpu()); - tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; + tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; - load_esp0(tss, &tsk->thread); + load_sp0(tss, &tsk->thread); put_cpu(); tsk->thread.screen_bitmap = info->screen_bitmap; diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 599b6f2ed562..4cfda7dbe90f 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -62,7 +62,7 @@ static struct { void (*cpuid)(void /* non-c */); void (*_set_ldt)(u32 selector); void (*set_tr)(u32 selector); - void (*set_kernel_stack)(u32 selector, u32 esp0); + void (*set_kernel_stack)(u32 selector, u32 sp0); void (*allocate_page)(u32, u32, u32, u32, u32); void (*release_page)(u32, u32); void (*set_pte)(pte_t, pte_t *, unsigned); @@ -214,17 +214,17 @@ static void vmi_set_tr(void) vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); } -static void vmi_load_esp0(struct tss_struct *tss, +static void vmi_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - tss->x86_tss.esp0 = thread->esp0; + tss->x86_tss.sp0 = thread->sp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { tss->x86_tss.ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } - vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0); + vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0); } static void vmi_flush_tlb_user(void) @@ -793,7 +793,7 @@ static inline int __init activate_vmi(void) para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry); para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry); para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry); - para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); + para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack); para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); para_fill(pv_cpu_ops.io_delay, IODelay); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index ea46d05853bb..c751e3c03e85 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -755,10 +755,10 @@ static void lguest_time_init(void) * segment), the privilege level (we're privilege level 1, the Host is 0 and * will not tolerate us trying to use that), the stack pointer, and the number * of pages in the stack. */ -static void lguest_load_esp0(struct tss_struct *tss, +static void lguest_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0, + lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0, THREAD_SIZE/PAGE_SIZE); } @@ -957,7 +957,7 @@ __init void lguest_init(void) pv_cpu_ops.cpuid = lguest_cpuid; pv_cpu_ops.load_idt = lguest_load_idt; pv_cpu_ops.iret = lguest_iret; - pv_cpu_ops.load_esp0 = lguest_load_esp0; + pv_cpu_ops.load_sp0 = lguest_load_sp0; pv_cpu_ops.load_tr_desc = lguest_load_tr_desc; pv_cpu_ops.set_ldt = lguest_set_ldt; pv_cpu_ops.load_tls = lguest_load_tls; diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index d97a6d7d062b..e0feb66a2408 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -243,9 +243,9 @@ void enable_sep_cpu(void) } tss->x86_tss.ss1 = __KERNEL_CS; - tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss; + tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); - wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); put_cpu(); } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 29517faaa735..d81e8d709102 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -499,11 +499,11 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, preempt_enable(); } -static void xen_load_esp0(struct tss_struct *tss, +static void xen_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { struct multicall_space mcs = xen_mc_entry(0); - MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); + MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); } @@ -968,7 +968,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { .write_ldt_entry = xen_write_ldt_entry, .write_gdt_entry = xen_write_gdt_entry, .write_idt_entry = xen_write_idt_entry, - .load_esp0 = xen_load_esp0, + .load_sp0 = xen_load_sp0, .set_iopl_mask = xen_set_iopl_mask, .io_delay = xen_io_delay, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8e1234e14559..aafc54437403 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -239,10 +239,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); ctxt->user_regs.cs = __KERNEL_CS; - ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs); + ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); ctxt->kernel_ss = __KERNEL_DS; - ctxt->kernel_sp = idle->thread.esp0; + ctxt->kernel_sp = idle->thread.sp0; ctxt->event_callback_cs = __KERNEL_CS; ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 96d0fd07c57d..44adb00e1490 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -94,7 +94,7 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages) /* Set up the two "TSS" members which tell the CPU what stack to use * for traps which do directly into the Guest (ie. traps at privilege * level 1). */ - pages->state.guest_tss.esp1 = lg->esp1; + pages->state.guest_tss.sp1 = lg->esp1; pages->state.guest_tss.ss1 = lg->ss1; /* Copy direct-to-Guest trap entries. */ @@ -416,7 +416,7 @@ void __init lguest_arch_host_init(void) /* We know where we want the stack to be when the Guest enters * the switcher: in pages->regs. The stack grows upwards, so * we start it at the end of that structure. */ - state->guest_tss.esp0 = (long)(&pages->regs + 1); + state->guest_tss.sp0 = (long)(&pages->regs + 1); /* And this is the GDT entry to use for the stack: we keep a * couple of special LGUEST entries. */ state->guest_tss.ss0 = LGUEST_DS; diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index be7b934f6c54..d1780e32722e 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -101,7 +101,7 @@ struct pv_cpu_ops { int entrynum, u32 low, u32 high); void (*write_idt_entry)(struct desc_struct *, int entrynum, u32 low, u32 high); - void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); + void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); void (*set_iopl_mask)(unsigned mask); @@ -449,10 +449,10 @@ static inline int paravirt_enabled(void) return pv_info.paravirt_enabled; } -static inline void load_esp0(struct tss_struct *tss, +static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread); + PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); } #define ARCH_SETUP pv_init_ops.arch_setup(); diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h index d50a4b48d441..6846cc346400 100644 --- a/include/asm-x86/processor_32.h +++ b/include/asm-x86/processor_32.h @@ -292,20 +292,17 @@ struct thread_struct; /* This is the TSS defined by the hardware. */ struct i386_hw_tss { unsigned short back_link,__blh; - unsigned long esp0; + unsigned long sp0; unsigned short ss0,__ss0h; - unsigned long esp1; + unsigned long sp1; unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ - unsigned long esp2; + unsigned long sp2; unsigned short ss2,__ss2h; unsigned long __cr3; - unsigned long eip; - unsigned long eflags; - unsigned long eax,ecx,edx,ebx; - unsigned long esp; - unsigned long ebp; - unsigned long esi; - unsigned long edi; + unsigned long ip; + unsigned long flags; + unsigned long ax, cx, dx, bx; + unsigned long sp, bp, si, di; unsigned short es, __esh; unsigned short cs, __csh; unsigned short ss, __ssh; @@ -346,10 +343,10 @@ struct tss_struct { struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; - unsigned long esp0; + unsigned long sp0; unsigned long sysenter_cs; - unsigned long eip; - unsigned long esp; + unsigned long ip; + unsigned long sp; unsigned long fs; unsigned long gs; /* Hardware debugging registers */ @@ -366,7 +363,7 @@ struct thread_struct { /* virtual 86 mode info */ struct vm86_struct __user * vm86_info; unsigned long screen_bitmap; - unsigned long v86flags, v86mask, saved_esp0; + unsigned long v86flags, v86mask, saved_sp0; unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; @@ -378,7 +375,7 @@ struct thread_struct { }; #define INIT_THREAD { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ @@ -393,7 +390,7 @@ struct thread_struct { */ #define INIT_TSS { \ .x86_tss = { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ @@ -503,9 +500,9 @@ static inline void rep_nop(void) #define cpu_relax() rep_nop() -static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) +static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - tss->x86_tss.esp0 = thread->esp0; + tss->x86_tss.sp0 = thread->sp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { tss->x86_tss.ss1 = thread->sysenter_cs; @@ -585,9 +582,9 @@ static inline void native_set_iopl_mask(unsigned mask) #define paravirt_enabled() 0 #define __cpuid native_cpuid -static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) +static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - native_load_esp0(tss, thread); + native_load_sp0(tss, thread); } /* diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h index 797770113e6d..0780f3e3fdfe 100644 --- a/include/asm-x86/processor_64.h +++ b/include/asm-x86/processor_64.h @@ -177,9 +177,9 @@ union i387_union { struct tss_struct { u32 reserved1; - u64 rsp0; - u64 rsp1; - u64 rsp2; + u64 sp0; + u64 sp1; + u64 sp2; u64 reserved2; u64 ist[7]; u32 reserved3; @@ -216,9 +216,9 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist); #endif struct thread_struct { - unsigned long rsp0; - unsigned long rsp; - unsigned long userrsp; /* Copy from PDA */ + unsigned long sp0; + unsigned long sp; + unsigned long usersp; /* Copy from PDA */ unsigned long fs; unsigned long gs; unsigned short es, ds, fsindex, gsindex; @@ -245,11 +245,11 @@ struct thread_struct { } __attribute__((aligned(16))); #define INIT_THREAD { \ - .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #define INIT_TSS { \ - .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #define INIT_MMAP \ @@ -293,10 +293,10 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. */ -#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) +#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) extern unsigned long get_wchan(struct task_struct *p); -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) +#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ip) #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h index db6283eb5e46..f5b3f77f5310 100644 --- a/include/asm-x86/system_32.h +++ b/include/asm-x86/system_32.h @@ -28,9 +28,9 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc "1:\t" \ "popl %%ebp\n\t" \ "popfl" \ - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ + :"=m" (prev->thread.sp),"=m" (prev->thread.ip), \ "=a" (last),"=S" (esi),"=D" (edi) \ - :"m" (next->thread.esp),"m" (next->thread.eip), \ + :"m" (next->thread.sp),"m" (next->thread.ip), \ "2" (prev), "d" (next)); \ } while (0) diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index 6e9e4841a2da..3dcb217a7202 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h @@ -40,7 +40,7 @@ RESTORE_CONTEXT \ : "=a" (last) \ : [next] "S" (next), [prev] "D" (prev), \ - [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)),\ [tif_fork] "i" (TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ |