diff options
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 5 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/kprobes.c | 21 | ||||
-rw-r--r-- | arch/i386/kernel/ptrace.c | 7 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/timers/timer_tsc.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/vm86.c | 2 |
8 files changed, 21 insertions, 30 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 049a25583793..40e5aba3ad3d 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -215,7 +215,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) { struct acpi_table_madt *madt = NULL; - if (!phys_addr || !size || !cpu_has_apic) + if (!phys_addr || !size) return -EINVAL; madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); @@ -1102,9 +1102,6 @@ int __init acpi_boot_table_init(void) dmi_check_system(acpi_dmi_table); #endif - if (!cpu_has_apic) - return -ENODEV; - /* * If acpi_disabled, bail out * One exception: acpi=ht continues far enough to enumerate LAPICs diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 254cee9f0b7b..013b85df18c6 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -757,10 +757,6 @@ static int __init apic_set_verbosity(char *str) apic_verbosity = APIC_DEBUG; else if (strcmp("verbose", str) == 0) apic_verbosity = APIC_VERBOSE; - else - printk(KERN_WARNING "APIC Verbosity level %s not recognised" - " use apic=verbose or apic=debug\n", str); - return 1; } diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 043f5292e70a..38806f427849 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -242,10 +242,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) kcb->kprobe_status = KPROBE_REENTER; return 1; } else { - if (regs->eflags & VM_MASK) { - /* We are in virtual-8086 mode. Return 0 */ - goto no_kprobe; - } if (*addr != BREAKPOINT_INSTRUCTION) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further @@ -265,11 +261,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) p = get_kprobe(addr); if (!p) { - if (regs->eflags & VM_MASK) { - /* We are in virtual-8086 mode. Return 0 */ - goto no_kprobe; - } - if (*addr != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right @@ -452,10 +443,11 @@ static void __kprobes resume_execution(struct kprobe *p, *tos &= ~(TF_MASK | IF_MASK); *tos |= kcb->kprobe_old_eflags; break; - case 0xc3: /* ret/lret */ - case 0xcb: - case 0xc2: + case 0xc2: /* iret/ret/lret */ + case 0xc3: case 0xca: + case 0xcb: + case 0xcf: case 0xea: /* jmp absolute -- eip is correct */ /* eip is already adjusted, no more changes required */ p->ainsn.boostable = 1; @@ -463,10 +455,13 @@ static void __kprobes resume_execution(struct kprobe *p, case 0xe8: /* call relative - Fix return addr */ *tos = orig_eip + (*tos - copy_eip); break; + case 0x9a: /* call absolute -- same as call absolute, indirect */ + *tos = orig_eip + (*tos - copy_eip); + goto no_change; case 0xff: if ((p->ainsn.insn[1] & 0x30) == 0x10) { - /* call absolute, indirect */ /* + * call absolute, indirect * Fix return addr; eip is correct. * But this is not boostable */ diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 506462ef36a0..fd7eaf7866e0 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c @@ -671,7 +671,7 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit) if (unlikely(current->audit_context)) { if (entryexit) - audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), + audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax); /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is @@ -720,14 +720,13 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit) ret = is_sysemu; out: if (unlikely(current->audit_context) && !entryexit) - audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax, + audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax, regs->ebx, regs->ecx, regs->edx, regs->esi); if (ret == 0) return 0; regs->orig_eax = -1; /* force skip of syscall restarting */ if (unlikely(current->audit_context)) - audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), - regs->eax); + audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax); return 1; } diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 80cb3b2d0997..d77e89ac0d54 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -970,8 +970,10 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) * not-overlapping, which is the case */ int __init -e820_all_mapped(unsigned long start, unsigned long end, unsigned type) +e820_all_mapped(unsigned long s, unsigned long e, unsigned type) { + u64 start = s; + u64 end = e; int i; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index a6969903f2d6..825b2b4ca721 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -313,7 +313,9 @@ static void __init synchronize_tsc_bp (void) if (tsc_values[i] < avg) realdelta = -realdelta; - printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta); + if (realdelta > 0) + printk(KERN_INFO "CPU#%d had %ld usecs TSC " + "skew, fixed it up.\n", i, realdelta); } sum += delta; diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index 5e41ee29c8cf..f1187ddb0d0f 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -279,7 +279,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, { struct cpufreq_freqs *freq = data; - if (val != CPUFREQ_RESUMECHANGE) + if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) write_seqlock_irq(&xtime_lock); if (!ref_freq) { if (!freq->old){ @@ -312,7 +312,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, } end: - if (val != CPUFREQ_RESUMECHANGE) + if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) write_sequnlock_irq(&xtime_lock); return 0; diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index aee14fafd13d..00e0118e717c 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -312,7 +312,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk /*call audit_syscall_exit since we do not exit via the normal paths */ if (unlikely(current->audit_context)) - audit_syscall_exit(current, AUDITSC_RESULT(eax), eax); + audit_syscall_exit(AUDITSC_RESULT(eax), eax); __asm__ __volatile__( "movl %0,%%esp\n\t" |