summaryrefslogtreecommitdiff
path: root/arch/x86/tools
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/tools')
-rwxr-xr-xarch/x86/tools/cpufeaturemasks.awk88
-rw-r--r--arch/x86/tools/insn_decoder_test.c3
-rw-r--r--arch/x86/tools/relocs.c155
3 files changed, 107 insertions, 139 deletions
diff --git a/arch/x86/tools/cpufeaturemasks.awk b/arch/x86/tools/cpufeaturemasks.awk
new file mode 100755
index 000000000000..173d5bf2d999
--- /dev/null
+++ b/arch/x86/tools/cpufeaturemasks.awk
@@ -0,0 +1,88 @@
+#!/usr/bin/awk
+#
+# Convert cpufeatures.h to a list of compile-time masks
+# Note: this blithely assumes that each word has at least one
+# feature defined in it; if not, something else is wrong!
+#
+
+BEGIN {
+ printf "#ifndef _ASM_X86_CPUFEATUREMASKS_H\n";
+ printf "#define _ASM_X86_CPUFEATUREMASKS_H\n\n";
+
+ file = 0
+}
+
+FNR == 1 {
+ ++file;
+
+ # arch/x86/include/asm/cpufeatures.h
+ if (file == 1)
+ FS = "[ \t()*+]+";
+
+ # .config
+ if (file == 2)
+ FS = "=";
+}
+
+# Create a dictionary of sorts, containing all defined feature bits
+file == 1 && $1 ~ /^#define$/ && $2 ~ /^X86_FEATURE_/ {
+ nfeat = $3 * $4 + $5;
+ feat = $2;
+ sub(/^X86_FEATURE_/, "", feat);
+ feats[nfeat] = feat;
+}
+file == 1 && $1 ~ /^#define$/ && $2 == "NCAPINTS" {
+ ncapints = int($3);
+}
+
+# Create a dictionary featstat[REQUIRED|DISABLED, FEATURE_NAME] = on | off
+file == 2 && $1 ~ /^CONFIG_X86_(REQUIRED|DISABLED)_FEATURE_/ {
+ on = ($2 == "y");
+ if (split($1, fs, "CONFIG_X86_|_FEATURE_") == 3)
+ featstat[fs[2], fs[3]] = on;
+}
+
+END {
+ sets[1] = "REQUIRED";
+ sets[2] = "DISABLED";
+
+ for (ns in sets) {
+ s = sets[ns];
+
+ printf "/*\n";
+ printf " * %s features:\n", s;
+ printf " *\n";
+ fstr = "";
+ for (i = 0; i < ncapints; i++) {
+ mask = 0;
+ for (j = 0; j < 32; j++) {
+ feat = feats[i*32 + j];
+ if (featstat[s, feat]) {
+ nfstr = fstr " " feat;
+ if (length(nfstr) > 72) {
+ printf " * %s\n", fstr;
+ nfstr = " " feat;
+ }
+ fstr = nfstr;
+ mask += (2 ^ j);
+ }
+ }
+ masks[i] = mask;
+ }
+ printf " * %s\n */\n", fstr;
+
+ for (i = 0; i < ncapints; i++)
+ printf "#define %s_MASK%d\t0x%08xU\n", s, i, masks[i];
+
+ printf "\n#define %s_MASK_BIT_SET(x)\t\t\t\\\n", s;
+ printf "\t((\t\t\t\t\t";
+ for (i = 0; i < ncapints; i++) {
+ if (masks[i])
+ printf "\t\\\n\t\t((x) >> 5) == %2d ? %s_MASK%d :", i, s, i;
+ }
+ printf " 0\t\\\n";
+ printf "\t) & (1U << ((x) & 31)))\n\n";
+ }
+
+ printf "#endif /* _ASM_X86_CPUFEATUREMASKS_H */\n";
+}
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 472540aeabc2..6c2986d2ad11 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -10,6 +10,7 @@
#include <assert.h>
#include <unistd.h>
#include <stdarg.h>
+#include <linux/kallsyms.h>
#define unlikely(cond) (cond)
@@ -106,7 +107,7 @@ static void parse_args(int argc, char **argv)
}
}
-#define BUFSIZE 256
+#define BUFSIZE (256 + KSYM_NAME_LEN)
int main(int argc, char **argv)
{
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 27441e5863b2..5778bc498415 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -29,9 +29,13 @@ static struct relocs relocs16;
static struct relocs relocs32;
#if ELF_BITS == 64
-static struct relocs relocs32neg;
static struct relocs relocs64;
# define FMT PRIu64
+
+#ifndef R_X86_64_REX_GOTPCRELX
+# define R_X86_64_REX_GOTPCRELX 42
+#endif
+
#else
# define FMT PRIu32
#endif
@@ -86,8 +90,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__initramfs_start|"
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
- "__per_cpu_load|"
- "init_per_cpu__.*|"
"__end_rodata_hpage_align|"
#endif
"_end)$"
@@ -227,6 +229,7 @@ static const char *rel_type(unsigned type)
REL_TYPE(R_X86_64_PC16),
REL_TYPE(R_X86_64_8),
REL_TYPE(R_X86_64_PC8),
+ REL_TYPE(R_X86_64_REX_GOTPCRELX),
#else
REL_TYPE(R_386_NONE),
REL_TYPE(R_386_32),
@@ -284,34 +287,6 @@ static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
return name;
}
-static Elf_Sym *sym_lookup(const char *symname)
-{
- int i;
-
- for (i = 0; i < shnum; i++) {
- struct section *sec = &secs[i];
- long nsyms;
- char *strtab;
- Elf_Sym *symtab;
- Elf_Sym *sym;
-
- if (sec->shdr.sh_type != SHT_SYMTAB)
- continue;
-
- nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
- symtab = sec->symtab;
- strtab = sec->link->strtab;
-
- for (sym = symtab; --nsyms >= 0; sym++) {
- if (!sym->st_name)
- continue;
- if (strcmp(symname, strtab + sym->st_name) == 0)
- return sym;
- }
- }
- return 0;
-}
-
#if BYTE_ORDER == LITTLE_ENDIAN
# define le16_to_cpu(val) (val)
# define le32_to_cpu(val) (val)
@@ -760,100 +735,18 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
}
}
-/*
- * The .data..percpu section is a special case for x86_64 SMP kernels.
- * It is used to initialize the actual per_cpu areas and to provide
- * definitions for the per_cpu variables that correspond to their offsets
- * within the percpu area. Since the values of all of the symbols need
- * to be offsets from the start of the per_cpu area the virtual address
- * (sh_addr) of .data..percpu is 0 in SMP kernels.
- *
- * This means that:
- *
- * Relocations that reference symbols in the per_cpu area do not
- * need further relocation (since the value is an offset relative
- * to the start of the per_cpu area that does not change).
- *
- * Relocations that apply to the per_cpu area need to have their
- * offset adjusted by by the value of __per_cpu_load to make them
- * point to the correct place in the loaded image (because the
- * virtual address of .data..percpu is 0).
- *
- * For non SMP kernels .data..percpu is linked as part of the normal
- * kernel data and does not require special treatment.
- *
- */
-static int per_cpu_shndx = -1;
-static Elf_Addr per_cpu_load_addr;
-
-static void percpu_init(void)
-{
- int i;
-
- for (i = 0; i < shnum; i++) {
- ElfW(Sym) *sym;
-
- if (strcmp(sec_name(i), ".data..percpu"))
- continue;
-
- if (secs[i].shdr.sh_addr != 0) /* non SMP kernel */
- return;
-
- sym = sym_lookup("__per_cpu_load");
- if (!sym)
- die("can't find __per_cpu_load\n");
-
- per_cpu_shndx = i;
- per_cpu_load_addr = sym->st_value;
-
- return;
- }
-}
-
#if ELF_BITS == 64
-/*
- * Check to see if a symbol lies in the .data..percpu section.
- *
- * The linker incorrectly associates some symbols with the
- * .data..percpu section so we also need to check the symbol
- * name to make sure that we classify the symbol correctly.
- *
- * The GNU linker incorrectly associates:
- * __init_begin
- * __per_cpu_load
- *
- * The "gold" linker incorrectly associates:
- * init_per_cpu__fixed_percpu_data
- * init_per_cpu__gdt_page
- */
-static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
-{
- int shndx = sym_index(sym);
-
- return (shndx == per_cpu_shndx) &&
- strcmp(symname, "__init_begin") &&
- strcmp(symname, "__per_cpu_load") &&
- strncmp(symname, "init_per_cpu_", 13);
-}
-
-
static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
const char *symname)
{
+ int headtext = !strcmp(sec_name(sec->shdr.sh_info), ".head.text");
unsigned r_type = ELF64_R_TYPE(rel->r_info);
ElfW(Addr) offset = rel->r_offset;
int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
-
if (sym->st_shndx == SHN_UNDEF)
return 0;
- /*
- * Adjust the offset if this reloc applies to the percpu section.
- */
- if (sec->shdr.sh_info == per_cpu_shndx)
- offset += per_cpu_load_addr;
-
switch (r_type) {
case R_X86_64_NONE:
/* NONE can be ignored. */
@@ -861,33 +754,23 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
case R_X86_64_PC32:
case R_X86_64_PLT32:
+ case R_X86_64_REX_GOTPCRELX:
/*
- * PC relative relocations don't need to be adjusted unless
- * referencing a percpu symbol.
+ * PC relative relocations don't need to be adjusted.
*
* NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
*/
- if (is_percpu_sym(sym, symname))
- add_reloc(&relocs32neg, offset);
break;
case R_X86_64_PC64:
/*
* Only used by jump labels
*/
- if (is_percpu_sym(sym, symname))
- die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n", symname);
break;
case R_X86_64_32:
case R_X86_64_32S:
case R_X86_64_64:
- /*
- * References to the percpu area don't need to be adjusted.
- */
- if (is_percpu_sym(sym, symname))
- break;
-
if (shn_abs) {
/*
* Whitelisted absolute symbols do not require
@@ -900,6 +783,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
break;
}
+ if (headtext) {
+ die("Absolute reference to symbol '%s' not permitted in .head.text\n",
+ symname);
+ break;
+ }
+
/*
* Relocation offsets for 64 bit kernels are output
* as 32 bits and sign extended back to 64 bits when
@@ -1049,7 +938,8 @@ static int cmp_relocs(const void *va, const void *vb)
static void sort_relocs(struct relocs *r)
{
- qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
+ if (r->count)
+ qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
}
static int write32(uint32_t v, FILE *f)
@@ -1093,7 +983,6 @@ static void emit_relocs(int as_text, int use_real_mode)
/* Order the relocations for more efficient processing */
sort_relocs(&relocs32);
#if ELF_BITS == 64
- sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
#else
sort_relocs(&relocs16);
@@ -1125,13 +1014,6 @@ static void emit_relocs(int as_text, int use_real_mode)
/* Now print each relocation */
for (i = 0; i < relocs64.count; i++)
write_reloc(relocs64.offset[i], stdout);
-
- /* Print a stop */
- write_reloc(0, stdout);
-
- /* Now print each inverse 32-bit relocation */
- for (i = 0; i < relocs32neg.count; i++)
- write_reloc(relocs32neg.offset[i], stdout);
#endif
/* Print a stop */
@@ -1184,9 +1066,6 @@ void process(FILE *fp, int use_real_mode, int as_text,
read_symtabs(fp);
read_relocs(fp);
- if (ELF_BITS == 64)
- percpu_init();
-
if (show_absolute_syms) {
print_absolute_symbols();
return;