summaryrefslogtreecommitdiff
path: root/arch/i386/kernel/alternative.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/alternative.c')
-rw-r--r--arch/i386/kernel/alternative.c40
1 files changed, 34 insertions, 6 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 0695be538de5..206ea2ca63cc 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -2,8 +2,12 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/kprobes.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include <asm/alternative.h>
#include <asm/sections.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
@@ -150,7 +154,7 @@ static void nop_out(void *insns, unsigned int len)
unsigned int noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
- memcpy(insns, noptable[noplen], noplen);
+ text_poke(insns, noptable[noplen], noplen);
insns += noplen;
len -= noplen;
}
@@ -202,7 +206,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
continue;
if (*ptr > text_end)
continue;
- **ptr = 0xf0; /* lock prefix */
+ text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
};
}
@@ -360,10 +364,6 @@ void apply_paravirt(struct paravirt_patch_site *start,
/* Pad the rest with nops */
nop_out(p->instr + used, p->len - used);
}
-
- /* Sync to be conservative, in case we patched following
- * instructions */
- sync_core();
}
extern struct paravirt_patch_site __start_parainstructions[],
__stop_parainstructions[];
@@ -406,3 +406,31 @@ void __init alternative_instructions(void)
apply_paravirt(__parainstructions, __parainstructions_end);
local_irq_restore(flags);
}
+
+/*
+ * Warning:
+ * When you use this code to patch more than one byte of an instruction
+ * you need to make sure that other CPUs cannot execute this code in parallel.
+ * Also no thread must be currently preempted in the middle of these instructions.
+ * And on the local CPU you need to be protected again NMI or MCE handlers
+ * seeing an inconsistent instruction while you patch.
+ */
+void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
+{
+ u8 *addr = oaddr;
+ if (!pte_write(*lookup_address((unsigned long)addr))) {
+ struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
+ addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
+ if (!addr)
+ return;
+ addr += ((unsigned long)oaddr) % PAGE_SIZE;
+ }
+ memcpy(addr, opcode, len);
+ sync_core();
+ /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
+ case. */
+ if (cpu_has_clflush)
+ asm("clflush (%0) " :: "r" (oaddr) : "memory");
+ if (addr != oaddr)
+ vunmap(addr);
+}