diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/entry/vdso/vgetcpu.c | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/segment.h | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 4 |
3 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c index de78fc9cd963..edd214f5264d 100644 --- a/arch/x86/entry/vdso/vgetcpu.c +++ b/arch/x86/entry/vdso/vgetcpu.c @@ -13,7 +13,7 @@ notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) { - vdso_read_cpu_node(cpu, node); + vdso_read_cpunode(cpu, node); return 0; } diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 4d1f6cc62e13..a314087add07 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -186,7 +186,7 @@ #define GDT_ENTRY_TLS_MIN 12 #define GDT_ENTRY_TLS_MAX 14 -#define GDT_ENTRY_CPU_NUMBER 15 +#define GDT_ENTRY_CPUNODE 15 /* * Number of entries in the GDT table: @@ -206,7 +206,7 @@ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) #define __USER32_DS __USER_DS #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3) -#define __CPU_NUMBER_SEG (GDT_ENTRY_CPU_NUMBER*8 + 3) +#define __CPUNODE_SEG (GDT_ENTRY_CPUNODE*8 + 3) #endif @@ -227,24 +227,24 @@ #ifdef CONFIG_X86_64 /* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */ -#define VDSO_CPU_SIZE 12 -#define VDSO_CPU_MASK 0xfff +#define VDSO_CPUNODE_BITS 12 +#define VDSO_CPUNODE_MASK 0xfff #ifndef __ASSEMBLY__ /* Helper functions to store/load CPU and node numbers */ -static inline unsigned long vdso_encode_cpu_node(int cpu, unsigned long node) +static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node) { - return ((node << VDSO_CPU_SIZE) | cpu); + return (node << VDSO_CPUNODE_BITS) | cpu; } -static inline void vdso_read_cpu_node(unsigned *cpu, unsigned *node) +static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) { unsigned int p; /* - * Load CPU and node number from GDT. LSL is faster than RDTSCP + * Load CPU and node number from the GDT. LSL is faster than RDTSCP * and works on all CPUs. This is volatile so that it orders * correctly with respect to barrier() and to keep GCC from cleverly * hoisting it out of the calling function. @@ -254,12 +254,12 @@ static inline void vdso_read_cpu_node(unsigned *cpu, unsigned *node) alternative_io ("lsl %[seg],%[p]", ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ X86_FEATURE_RDPID, - [p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG)); + [p] "=a" (p), [seg] "r" (__CPUNODE_SEG)); if (cpu) - *cpu = (p & VDSO_CPU_MASK); + *cpu = (p & VDSO_CPUNODE_MASK); if (node) - *node = (p >> VDSO_CPU_SIZE); + *node = (p >> VDSO_CPUNODE_BITS); } #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a148d18a1ef0..7da587f4af52 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1672,7 +1672,7 @@ static void wait_for_master_cpu(int cpu) #ifdef CONFIG_X86_64 static void setup_getcpu(int cpu) { - unsigned long cpudata = vdso_encode_cpu_node(cpu, early_cpu_to_node(cpu)); + unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); struct desc_struct d = { }; if (static_cpu_has(X86_FEATURE_RDTSCP)) @@ -1688,7 +1688,7 @@ static void setup_getcpu(int cpu) d.p = 1; /* Present */ d.d = 1; /* 32-bit */ - write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPU_NUMBER, &d, DESCTYPE_S); + write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); } #endif |