summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c77
1 files changed, 41 insertions, 36 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4dd0ad6c94d6..cace6e8d7cc7 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -118,13 +118,10 @@ ftrace_modify_code_direct(unsigned long ip, const char *old_code,
return ret;
/* replace the text with the new text */
- if (ftrace_poke_late) {
+ if (ftrace_poke_late)
text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
- } else {
- mutex_lock(&text_mutex);
- text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
- mutex_unlock(&text_mutex);
- }
+ else
+ text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
return 0;
}
@@ -321,7 +318,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
union ftrace_op_code_union op_ptr;
- void *ret;
+ int ret;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
start_offset = (unsigned long)ftrace_regs_caller;
@@ -352,15 +349,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
- ret = text_poke_copy(trampoline, (void *)start_offset, size);
- if (WARN_ON(!ret))
+ ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
+ if (WARN_ON(ret < 0))
goto fail;
ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
- text_poke_copy(ip, retq, sizeof(retq));
+ memcpy(ip, retq, sizeof(retq));
/* No need to test direct calls on created trampolines */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
@@ -368,7 +365,8 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
ip = trampoline + (jmp_offset - start_offset);
if (WARN_ON(*(char *)ip != 0x75))
goto fail;
- if (!text_poke_copy(ip, x86_nops[2], 2))
+ ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
+ if (ret < 0)
goto fail;
}
@@ -381,7 +379,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
*/
ptr = (unsigned long *)(trampoline + size + RET_SIZE);
- text_poke_copy(ptr, &ops, sizeof(unsigned long));
+ *ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
@@ -397,7 +395,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
op_ptr.offset = offset;
/* put in the new offset to the ftrace_ops */
- text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
+ memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
/* put in the call to the function */
mutex_lock(&text_mutex);
@@ -407,9 +405,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
* the depth accounting before the call already.
*/
dest = ftrace_ops_get_func(ops);
- text_poke_copy_locked(trampoline + call_offset,
- text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
- CALL_INSN_SIZE, false);
+ memcpy(trampoline + call_offset,
+ text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
+ CALL_INSN_SIZE);
mutex_unlock(&text_mutex);
/* ALLOC_TRAMP flags lets us know we created it */
@@ -607,16 +605,8 @@ int ftrace_disable_ftrace_graph_caller(void)
}
#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
-/*
- * Hook the return address and push it in the stack of return addrs
- * in current thread info.
- */
-void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
- unsigned long frame_pointer)
+static inline bool skip_ftrace_return(void)
{
- unsigned long return_hooker = (unsigned long)&return_to_handler;
- int bit;
-
/*
* When resuming from suspend-to-ram, this function can be indirectly
* called from early CPU startup code while the CPU is in real mode,
@@ -626,23 +616,31 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
* This check isn't as accurate as virt_addr_valid(), but it should be
* good enough for this purpose, and it's fast.
*/
- if (unlikely((long)__builtin_frame_address(0) >= 0))
- return;
+ if ((long)__builtin_frame_address(0) >= 0)
+ return true;
- if (unlikely(ftrace_graph_is_dead()))
- return;
+ if (ftrace_graph_is_dead())
+ return true;
- if (unlikely(atomic_read(&current->tracing_graph_pause)))
- return;
+ if (atomic_read(&current->tracing_graph_pause))
+ return true;
+ return false;
+}
- bit = ftrace_test_recursion_trylock(ip, *parent);
- if (bit < 0)
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(skip_ftrace_return()))
return;
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
*parent = return_hooker;
-
- ftrace_test_recursion_unlock(bit);
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
@@ -651,8 +649,15 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
{
struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long *parent = (unsigned long *)stack;
- prepare_ftrace_return(ip, (unsigned long *)stack, 0);
+ if (unlikely(skip_ftrace_return()))
+ return;
+
+
+ if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
+ *parent = return_hooker;
}
#endif