summaryrefslogtreecommitdiff
path: root/arch/i386/kernel/kprobes.c
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-10 13:47:26 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-10 13:47:26 -0500
commitf0cd91a68acdc9b49d7f6738b514a426da627649 (patch)
tree8ad73564015794197583b094217ae0a71e71e753 /arch/i386/kernel/kprobes.c
parent60eef25701d25e99c991dd0f4a9f3832a0c3ad3e (diff)
parent128e6ced247cda88f96fa9f2e4ba8b2c4a681560 (diff)
downloadlwn-f0cd91a68acdc9b49d7f6738b514a426da627649.tar.gz
lwn-f0cd91a68acdc9b49d7f6738b514a426da627649.zip
Merge ../linux-2.6
Diffstat (limited to 'arch/i386/kernel/kprobes.c')
-rw-r--r--arch/i386/kernel/kprobes.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index f19768789e8a..38806f427849 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/* insert a jmp code */
-static inline void set_jmp_op(void *from, void *to)
+static __always_inline void set_jmp_op(void *from, void *to)
{
struct __arch_jmp_op {
char op;
@@ -57,7 +57,7 @@ static inline void set_jmp_op(void *from, void *to)
/*
* returns non-zero if opcodes can be boosted.
*/
-static inline int can_boost(kprobe_opcode_t opcode)
+static __always_inline int can_boost(kprobe_opcode_t opcode)
{
switch (opcode & 0xf0 ) {
case 0x70:
@@ -88,7 +88,7 @@ static inline int can_boost(kprobe_opcode_t opcode)
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
-static inline int is_IF_modifier(kprobe_opcode_t opcode)
+static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
{
switch (opcode) {
case 0xfa: /* cli */
@@ -138,7 +138,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
mutex_unlock(&kprobe_mutex);
}
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -146,7 +146,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
}
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -154,7 +154,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
}
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;
@@ -164,7 +164,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_saved_eflags &= ~IF_MASK;
}
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
regs->eflags |= TF_MASK;
regs->eflags &= ~IF_MASK;
@@ -242,10 +242,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
- if (regs->eflags & VM_MASK) {
- /* We are in virtual-8086 mode. Return 0 */
- goto no_kprobe;
- }
if (*addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
@@ -265,11 +261,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
p = get_kprobe(addr);
if (!p) {
- if (regs->eflags & VM_MASK) {
- /* We are in virtual-8086 mode. Return 0 */
- goto no_kprobe;
- }
-
if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
@@ -452,10 +443,11 @@ static void __kprobes resume_execution(struct kprobe *p,
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kcb->kprobe_old_eflags;
break;
- case 0xc3: /* ret/lret */
- case 0xcb:
- case 0xc2:
+ case 0xc2: /* iret/ret/lret */
+ case 0xc3:
case 0xca:
+ case 0xcb:
+ case 0xcf:
case 0xea: /* jmp absolute -- eip is correct */
/* eip is already adjusted, no more changes required */
p->ainsn.boostable = 1;
@@ -463,10 +455,13 @@ static void __kprobes resume_execution(struct kprobe *p,
case 0xe8: /* call relative - Fix return addr */
*tos = orig_eip + (*tos - copy_eip);
break;
+ case 0x9a: /* call absolute -- same as call absolute, indirect */
+ *tos = orig_eip + (*tos - copy_eip);
+ goto no_change;
case 0xff:
if ((p->ainsn.insn[1] & 0x30) == 0x10) {
- /* call absolute, indirect */
/*
+ * call absolute, indirect
* Fix return addr; eip is correct.
* But this is not boostable
*/
@@ -507,7 +502,7 @@ no_change:
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
* remain disabled thoroughout this function.
*/
-static inline int post_kprobe_handler(struct pt_regs *regs)
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -543,7 +538,7 @@ out:
return 1;
}
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();