summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 01:00:07 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 07:53:45 -0800
commite65845235c8120be63001fc1a4ac00c819194bbe (patch)
tree209a081cc07375290743ceb45f52dc474f45382a /kernel
parent66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (diff)
downloadlwn-e65845235c8120be63001fc1a4ac00c819194bbe.tar.gz
lwn-e65845235c8120be63001fc1a4ac00c819194bbe.zip
[PATCH] Kprobes: Track kprobe on a per_cpu basis - base changes
Changes to the base kprobe infrastructure to track kprobe execution on a per-cpu basis. Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ce4915dd683a..6da8f9b33d1e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -51,7 +51,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
unsigned int kprobe_cpu = NR_CPUS;
static DEFINE_SPINLOCK(kprobe_lock);
-static struct kprobe *curr_kprobe;
+static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -188,6 +188,17 @@ void __kprobes unlock_kprobes(void)
local_irq_restore(flags);
}
+/* We have preemption disabled.. so it is safe to use __ versions */
+static inline void set_kprobe_instance(struct kprobe *kp)
+{
+ __get_cpu_var(kprobe_instance) = kp;
+}
+
+static inline void reset_kprobe_instance(void)
+{
+ __get_cpu_var(kprobe_instance) = NULL;
+}
+
/* You have to be holding the kprobe_lock */
struct kprobe __kprobes *get_kprobe(void *addr)
{
@@ -213,11 +224,11 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
list_for_each_entry(kp, &p->list, list) {
if (kp->pre_handler) {
- curr_kprobe = kp;
+ set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs))
return 1;
}
- curr_kprobe = NULL;
+ reset_kprobe_instance();
}
return 0;
}
@@ -229,9 +240,9 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
list_for_each_entry(kp, &p->list, list) {
if (kp->post_handler) {
- curr_kprobe = kp;
+ set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags);
- curr_kprobe = NULL;
+ reset_kprobe_instance();
}
}
return;
@@ -240,12 +251,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
int trapnr)
{
+ struct kprobe *cur = __get_cpu_var(kprobe_instance);
+
/*
* if we faulted "during" the execution of a user specified
* probe handler, invoke just that probe's fault handler
*/
- if (curr_kprobe && curr_kprobe->fault_handler) {
- if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
+ if (cur && cur->fault_handler) {
+ if (cur->fault_handler(cur, regs, trapnr))
return 1;
}
return 0;
@@ -253,15 +266,15 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kprobe *kp = curr_kprobe;
- if (curr_kprobe && kp->break_handler) {
- if (kp->break_handler(kp, regs)) {
- curr_kprobe = NULL;
- return 1;
- }
+ struct kprobe *cur = __get_cpu_var(kprobe_instance);
+ int ret = 0;
+
+ if (cur && cur->break_handler) {
+ if (cur->break_handler(cur, regs))
+ ret = 1;
}
- curr_kprobe = NULL;
- return 0;
+ reset_kprobe_instance();
+ return ret;
}
struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)