summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-28 14:27:24 +0200
committerThomas Gleixner <tglx@linutronix.de>2009-07-28 14:27:24 +0200
commit8c6b7c11ce06f1faf124b31897de5107e6b6f676 (patch)
tree180163574cb381f18831af8df1b45627997f5892
parentba62e915c085072c51cf74cf7019e6a0ad928039 (diff)
parent665b55ef8df25e57fa82c3e24ded932e4db5c620 (diff)
downloadlwn-8c6b7c11ce06f1faf124b31897de5107e6b6f676.tar.gz
lwn-8c6b7c11ce06f1faf124b31897de5107e6b6f676.zip
Merge branch 'rt/atomic-locks' into rt/base
Conflicts: kernel/hrtimer.c Manual fixup of kernel/time/timekeeping.c Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/alpha/kernel/irq.c4
-rw-r--r--arch/alpha/kernel/time.c12
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/kernel/time.c12
-rw-r--r--arch/arm/mach-ns9xxx/irq.c8
-rw-r--r--arch/arm/oprofile/common.c4
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c4
-rw-r--r--arch/avr32/kernel/irq.c4
-rw-r--r--arch/blackfin/kernel/irqchip.c6
-rw-r--r--arch/blackfin/kernel/time.c12
-rw-r--r--arch/cris/kernel/irq.c4
-rw-r--r--arch/cris/kernel/time.c4
-rw-r--r--arch/frv/kernel/irq.c4
-rw-r--r--arch/frv/kernel/time.c4
-rw-r--r--arch/h8300/kernel/irq.c4
-rw-r--r--arch/h8300/kernel/time.c4
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/time.c8
-rw-r--r--arch/ia64/xen/time.c4
-rw-r--r--arch/m32r/kernel/irq.c4
-rw-r--r--arch/m32r/kernel/time.c12
-rw-r--r--arch/m68k/kernel/time.c8
-rw-r--r--arch/m68knommu/kernel/time.c4
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/mips/include/asm/i8253.h2
-rw-r--r--arch/mips/kernel/i8253.c14
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/vr41xx/common/icu.c92
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/mn10300/kernel/time.c4
-rw-r--r--arch/parisc/kernel/irq.c4
-rw-r--r--arch/parisc/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c8
-rw-r--r--arch/powerpc/platforms/iseries/irq.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c6
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/uic.c8
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/sh/kernel/irq.c6
-rw-r--r--arch/sparc/kernel/irq_64.c8
-rw-r--r--arch/sparc/kernel/pcic.c8
-rw-r--r--arch/sparc/kernel/time_32.c12
-rw-r--r--arch/um/kernel/irq.c4
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/kernel/apic/io_apic.c118
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/i8253.c14
-rw-r--r--arch/x86/kernel/i8259.c30
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/irq_64.c6
-rw-r--r--arch/x86/kernel/time_32.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vsyscall_64.c14
-rw-r--r--arch/x86/kvm/i8254.c10
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c6
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/mm/tlb.c8
-rw-r--r--arch/x86/oprofile/nmi_int.c4
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/direct.c16
-rw-r--r--arch/x86/pci/mmconfig_32.c8
-rw-r--r--arch/x86/pci/numaq_32.c8
-rw-r--r--arch/x86/pci/pcbios.c8
-rw-r--r--arch/x86/vdso/vclock_gettime.c8
-rw-r--r--arch/xtensa/kernel/irq.c4
-rw-r--r--drivers/acpi/processor_idle.c10
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/oprofile/event_buffer.c4
-rw-r--r--drivers/oprofile/oprofilefs.c6
-rw-r--r--drivers/pci/access.c34
-rw-r--r--drivers/video/console/vgacon.c42
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/oprofile.h2
-rw-r--r--include/linux/percpu_counter.h2
-rw-r--r--include/linux/perf_counter.h2
-rw-r--r--include/linux/plist.h41
-rw-r--r--include/linux/proportions.h6
-rw-r--r--include/linux/rtmutex.h8
-rw-r--r--include/linux/rwlock.h151
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/seqlock.h86
-rw-r--r--include/linux/spinlock.h365
-rw-r--r--include/linux/spinlock_api_smp.h60
-rw-r--r--include/linux/spinlock_api_up.h26
-rw-r--r--include/linux/spinlock_types.h95
-rw-r--r--include/linux/time.h2
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/cgroup.c18
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c48
-rw-r--r--kernel/hrtimer.c59
-rw-r--r--kernel/irq/autoprobe.c20
-rw-r--r--kernel/irq/chip.c74
-rw-r--r--kernel/irq/handle.c22
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c46
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/numa_migrate.c8
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kprobes.c34
-rw-r--r--kernel/lock-internals.h75
-rw-r--r--kernel/perf_counter.c102
-rw-r--r--kernel/posix-cpu-timers.c8
-rw-r--r--kernel/printk.c38
-rw-r--r--kernel/rcupreempt.c60
-rw-r--r--kernel/rtmutex-debug.c4
-rw-r--r--kernel/rtmutex.c106
-rw-r--r--kernel/rwlock.c223
-rw-r--r--kernel/sched.c193
-rw-r--r--kernel/sched_cpupri.c10
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_idletask.c4
-rw-r--r--kernel/sched_rt.c64
-rw-r--r--kernel/sched_stats.h12
-rw-r--r--kernel/smp.c52
-rw-r--r--kernel/softlockup.c6
-rw-r--r--kernel/spinlock.c318
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/time/clockevents.c14
-rw-r--r--kernel/time/clocksource.c38
-rw-r--r--kernel/time/ntp.c8
-rw-r--r--kernel/time/tick-broadcast.c42
-rw-r--r--kernel/time/tick-common.c28
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/time/timekeeping.c47
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/time/timer_stats.c20
-rw-r--r--kernel/trace/ring_buffer.c44
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_irqsoff.c6
-rw-r--r--lib/debugobjects.c74
-rw-r--r--lib/dec_and_lock.c8
-rw-r--r--lib/kernel_lock.c4
-rw-r--r--lib/percpu_counter.c18
-rw-r--r--lib/plist.c8
-rw-r--r--lib/proportions.c12
-rw-r--r--lib/ratelimit.c8
-rw-r--r--lib/spinlock_debug.c24
-rw-r--r--sound/drivers/pcsp/pcsp.h2
-rw-r--r--sound/drivers/pcsp/pcsp_input.c4
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c12
164 files changed, 2143 insertions, 1688 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index cc7834661427..2e0b75e80696 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v)
#endif
if (irq < ACTUAL_NR_IRQS) {
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[irq].lock, flags);
action = irq_desc[irq].action;
if (!action)
goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} else if (irq == ACTUAL_NR_IRQS) {
#ifdef CONFIG_SMP
seq_puts(p, "IPI: ");
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index b04e2cbf23a4..991a967eadcc 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -106,7 +106,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
profile_tick(CPU_PROFILING);
#endif
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
/*
* Calculate how many ticks have passed since the last update,
@@ -136,7 +136,7 @@ irqreturn_t timer_interrupt(int irq, void *dev)
state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
}
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
while (nticks--)
@@ -416,14 +416,14 @@ do_gettimeofday(struct timeval *tv)
unsigned long delta_cycles, delta_usec, partial_tick;
do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
delta_cycles = rpcc() - state.last_time;
sec = xtime.tv_sec;
usec = (xtime.tv_nsec / 1000);
partial_tick = state.partial_tick;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
@@ -470,7 +470,7 @@ do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
/* The offset that is added into time in do_gettimeofday above
must be subtracted out here to keep a coherent view of the
@@ -496,7 +496,7 @@ do_settimeofday(struct timespec *tv)
ntp_clear();
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index b7c3490eaa24..6ea2a033f25f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_ARCH_ACORN
show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
}
desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
desc->status &= ~IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
desc->status &= ~IRQ_NOAUTOEN;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
/*
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 4cdc4a0bd02d..8a545e260903 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -244,11 +244,11 @@ void do_gettimeofday(struct timeval *tv)
unsigned long usec, sec;
do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = system_timer->offset();
sec = xtime.tv_sec;
usec += xtime.tv_nsec / 1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
/* usec may have gone up a lot: be safe */
while (usec >= 1000000) {
@@ -270,7 +270,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
@@ -286,7 +286,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
@@ -336,9 +336,9 @@ void timer_tick(void)
profile_tick(CPU_PROFILING);
do_leds();
do_set_rtc();
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index feb0e54a91de..6873b7bd70d8 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
BUG_ON(desc->status & IRQ_INPROGRESS);
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
goto out_mask;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
* Maybe this function should go to kernel/irq/chip.c? */
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
/* ack unconditionally to unmask lower prio irqs */
desc->chip->ack(irq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
#define handle_irq handle_prio_irq
#endif
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 3fcd752d6146..695f8e2e6297 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -48,9 +48,9 @@ static int op_arm_setup(void)
{
int ret;
- spin_lock(&oprofilefs_lock);
+ atomic_spin_lock(&oprofilefs_lock);
ret = op_arm_model->setup_ctrs();
- spin_unlock(&oprofilefs_lock);
+ atomic_spin_unlock(&oprofilefs_lock);
return ret;
}
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 4ce0f9801e2e..55764a06f99f 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -263,10 +263,10 @@ static void em_route_irq(int irq, unsigned int cpu)
struct irq_desc *desc = irq_desc + irq;
const struct cpumask *mask = cpumask_of(cpu);
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
cpumask_copy(desc->affinity, mask);
desc->chip->set_affinity(irq, mask);
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
static int em_setup(void)
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index 9f572229d318..778f6efba857 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -66,7 +66,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 4b5fd36187d9..d82bcb04c156 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -46,7 +46,7 @@ void ack_bad_irq(unsigned int irq)
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(irq_desc->lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -62,7 +62,7 @@ int show_interrupts(struct seq_file *p, void *v)
unsigned long flags;
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index adb54aa7d7c8..96b676a8714d 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -128,7 +128,7 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
/* last time the cmos clock got updated */
static long last_rtc_update;
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(1);
/*
@@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
/* Do it again in 60s. */
last_rtc_update = xtime.tv_sec - 600;
}
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
#ifdef CONFIG_IPIPE
update_root_process_times(get_irq_regs());
@@ -192,12 +192,12 @@ void do_gettimeofday(struct timeval *tv)
unsigned long usec, sec;
do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = gettimeoffset();
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / NSEC_PER_USEC);
}
- while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= USEC_PER_SEC) {
usec -= USEC_PER_SEC;
@@ -217,7 +217,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set the xtime.tv_usec
* correctly. However, the value in this location is
@@ -235,7 +235,7 @@ int do_settimeofday(struct timespec *tv)
ntp_clear();
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 7f642fcffbfc..45a270e733dd 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -57,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index 074fe7dea96b..72408edec3be 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -87,7 +87,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
@@ -103,7 +103,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index af3e824b91b3..7b29f55b9eeb 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
}
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index fb0ce7577225..fced1e333fcb 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -70,7 +70,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
* the irq version of write_lock because as just said we have irq
* locally disabled. -arca
*/
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(1);
@@ -96,7 +96,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
__set_LEDS(n);
#endif /* CONFIG_HEARTBEAT */
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 74f8dd7b34d2..7dde350adf98 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -191,7 +191,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_puts(p, " CPU0");
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -205,7 +205,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 7f2d6cfbb4b6..cb110a42d6fa 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -35,9 +35,9 @@ void h8300_timer_tick(void)
{
if (current->pid)
profile_tick(CPU_PROFILING);
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
}
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..26f5123754e1 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..70763a087ea2 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
desc = irq_desc + irq;
cfg = irq_cfg + irq;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
return IRQ_HANDLED;
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 4990495d7531..c6e8a37dc45a 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -197,10 +197,10 @@ timer_interrupt (int irq, void *dev_id)
* another CPU. We need to avoid to SMP race by acquiring the
* xtime_lock.
*/
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(1);
local_cpu_data->itm_next = new_itm;
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
} else
local_cpu_data->itm_next = new_itm;
@@ -477,7 +477,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
{
unsigned long flags;
- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+ write_atomic_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
@@ -500,6 +500,6 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
+ write_atomic_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index fb8332690179..7ec3f56cff98 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -141,10 +141,10 @@ consider_steal_time(unsigned long new_itm)
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
if (cpu == time_keeper_id) {
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(stolen + blocked);
local_cpu_data->itm_next = delta_itm + new_itm;
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
} else {
local_cpu_data->itm_next = delta_itm + new_itm;
}
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 8dfd31e87c4c..351e82d6c7f4 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index cada3ba4b990..3e00242695a5 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -106,7 +106,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
usec = do_gettimeoffset();
@@ -120,7 +120,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
while (usec >= 1000000) {
usec -= 1000000;
@@ -141,7 +141,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
/*
* This is revolting. We need to set "xtime" correctly. However, the
* value in this location is the value at the most recent update of
@@ -157,7 +157,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
@@ -202,7 +202,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
if (ntp_synced()
&& xtime.tv_sec > last_rtc_update + 660
&& (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
@@ -213,7 +213,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
else /* do it again in 60 s */
last_rtc_update = xtime.tv_sec - 600;
}
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
/* As we return to user mode fire off the other CPU schedulers..
this is basically because we don't yet share IRQ's around.
This message is rigged to be safe on the 386 - basically it's
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 54d980795fc4..612259cb8b3a 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -102,7 +102,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = mach_gettimeoffset();
@@ -116,7 +116,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += xtime.tv_nsec/1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
@@ -138,7 +138,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
/* This is revolting. We need to set the xtime.tv_nsec
* correctly. However, the value in this location is
* is value at the last tick.
@@ -154,7 +154,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index d182b2f72211..d3c646d05ff2 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -44,11 +44,11 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
if (current->pid)
profile_tick(CPU_PROFILING);
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index f688ee93e3b9..2c63c879b4ab 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -77,7 +77,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < nr_irq) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -98,7 +98,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
index 032ca73f181b..ed7899f45b4e 100644
--- a/arch/mips/include/asm/i8253.h
+++ b/arch/mips/include/asm/i8253.h
@@ -12,7 +12,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern atomic_spinlock_t i8253_lock;
extern void setup_pit_timer(void);
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index f7d8d5d0ddbf..4ac943db87c8 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -15,7 +15,7 @@
#include <asm/io.h>
#include <asm/time.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_ATOMIC_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
@@ -26,7 +26,7 @@ EXPORT_SYMBOL(i8253_lock);
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ atomic_spin_lock(&i8253_lock);
switch(mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -55,7 +55,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ atomic_spin_unlock(&i8253_lock);
}
/*
@@ -65,10 +65,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ atomic_spin_lock(&i8253_lock);
outb_p(delta & 0xff , PIT_CH0); /* LSB */
outb(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ atomic_spin_unlock(&i8253_lock);
return 0;
}
@@ -137,7 +137,7 @@ static cycle_t pit_read(struct clocksource *cs)
static int old_count;
static u32 old_jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -183,7 +183,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7b845ba9dff4..50a74511d5c8 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 6d39e222b170..3da2ed286530 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask)
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask)
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, PCIINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, SCUINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, BCUINTR);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
pin = SYSINT1_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
intassign0 = icu1_read(INTASSIGN0);
intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
intassign1 |= (uint16_t)assign << 9;
break;
default:
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
icu1_write(INTASSIGN0, intassign0);
icu1_write(INTASSIGN1, intassign1);
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
return 0;
}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
pin = SYSINT2_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
intassign2 = icu1_read(INTASSIGN2);
intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
intassign3 |= (uint16_t)assign << 12;
break;
default:
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
icu1_write(INTASSIGN2, intassign2);
icu1_write(INTASSIGN3, intassign3);
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
return 0;
}
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 4c3c58ef5cda..c076cff8caeb 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v)
/* display information rows, one per active CPU */
case 1 ... NR_IRQS - 1:
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
break;
/* polish off with NMI and error counters */
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 395caf01b909..b25588ca0b2c 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -99,7 +99,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned tsc, elapse;
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
while (tsc = get_cycles(),
elapse = mn10300_last_tsc - tsc, /* time elapsed since last
@@ -114,7 +114,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
check_rtc_time();
}
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 330f536a9324..5bbc62b3a7dd 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index a79c6f9e7e2c..908cfde81e80 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -163,9 +163,9 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
}
if (cpu == 0) {
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
do_timer(ticks_elapsed);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
}
return IRQ_HANDLED;
@@ -268,12 +268,12 @@ void __init time_init(void)
if (pdc_tod_read(&tod_data) == 0) {
unsigned long flags;
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
xtime.tv_sec = tod_data.tod_sec;
xtime.tv_nsec = tod_data.tod_usec * 1000;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
} else {
printk(KERN_ERR "Error reading tod clock\n");
xtime.tv_sec = 0;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f7f376ea7b17..6ee328240556 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -191,7 +191,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action || !action->handler)
goto skip;
@@ -212,7 +212,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS) {
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized){
@@ -1065,7 +1065,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
for (i = 1; i < NR_IRQS; i++) {
desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
seq_printf(m, "%5d ", i);
@@ -1084,7 +1084,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
seq_printf(m, "%s\n", p);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index eae4511ceeac..15d42919c63c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1000,7 +1000,7 @@ void __init time_init(void)
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
boot_tb = get_tb_or_rtc();
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
@@ -1014,7 +1014,7 @@ void __init time_init(void)
vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
/* Register the clocksource, if we're not running on iSeries */
if (!firmware_has_feature(FW_FEATURE_ISERIES))
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 68e4f1696d14..afffc6dc0c51 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
u32 status, enable;
/* Mask off the cascaded IRQ */
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->chip->mask(virq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
* are pending. 'ffs()' is 1 based */
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
}
/* Processing done; can reenable the cascade now */
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED))
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 882e47080e74..0d3022ece2fb 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -237,7 +237,7 @@ extern int noirqdebug;
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
goto out_eoi;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_eoi:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
static int iic_host_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 94f444758836..af7cce0f3e94 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
struct irq_desc *desc = get_irq_desc(irq);
if (desc && desc->chip && desc->chip->startup) {
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->chip->startup(irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 0e8db6771252..82d4513aa6dc 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -70,12 +70,12 @@ static int irq_in_use(unsigned int irq)
{
int rc = 0;
unsigned long flags;
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
if (desc->action)
rc = 1;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 419f8a637ffe..d4fbbdcebdea 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -851,7 +851,7 @@ void xics_migrate_irqs_away(void)
|| desc->chip->set_affinity == NULL)
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
@@ -875,7 +875,7 @@ void xics_migrate_irqs_away(void)
cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index da38a1ff97bb..d5702c4e2445 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 intr_index;
u32 have_shift = 0;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (desc->chip->mask_ack)
desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
break;
}
unlock:
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
static int __devinit fsl_of_msi_probe(struct of_device *dev,
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 466ce9ace127..2492a4aa6cbd 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
int src;
int subvirq;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->mask(virq);
else
desc->chip->mask_ack(virq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
generic_handle_irq(subvirq);
uic_irq_ret:
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
static struct uic * __init uic_init_one(struct device_node *node)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..5582ff17666d 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -272,14 +272,14 @@ void __init time_init(void)
* small for /proc/uptime to be accurate.
* Reset xtime and wall_to_monotonic to sane values.
*/
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
now = get_clock();
tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
clocksource_tod.cycle_last = now;
clocksource_tod.raw_time = xtime;
tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 3d09062f4682..b17f303cabdb 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -1,4 +1,4 @@
-/*
+1/*
* linux/arch/sh/kernel/irq.c
*
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
@@ -67,7 +67,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -88,7 +88,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index f0ee79055409..0cb8a194e436 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
unsigned long flags;
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
irq_desc[irq].affinity);
}
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
tick_ops->disable_irq();
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 85e7037429b9..bea30b24252b 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -703,10 +703,10 @@ static void pcic_clear_clock_irq(void)
static irqreturn_t pcic_timer_handler (int irq, void *h)
{
- write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
+ write_atomic_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
@@ -766,7 +766,7 @@ static void pci_do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
/*
@@ -779,7 +779,7 @@ static void pci_do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 614ac7b4a9dd..653094262ee0 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -93,7 +93,7 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
#endif
/* Protect counter clear so that do_gettimeoffset works */
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
clear_clock_irq();
@@ -109,7 +109,7 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
else
last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
}
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
@@ -251,7 +251,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick = tick_usec - tickadj;
do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ seq = read_atomic_seqbegin_irqsave(&xtime_lock, flags);
usec = do_gettimeoffset();
/*
@@ -264,7 +264,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ } while (read_atomic_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
@@ -281,9 +281,9 @@ int do_settimeofday(struct timespec *tv)
{
int ret;
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
ret = bus_do_settimeofday(tv);
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
return ret;
}
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 454cdb43e351..048ca4abba99 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -33,7 +33,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_putc(p, '\n');
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index 1edbf89680fd..a2bd5b5ec560 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,7 +6,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern atomic_spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091eeb1f..47201262b113 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
-extern spinlock_t i8259A_lock;
+extern atomic_spinlock_t i8259A_lock;
extern void init_8259A(int auto_eoi);
extern void enable_8259A_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b399988eee3a..a2baf26a70f5 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -83,7 +83,7 @@ struct irq_routing_table {
extern unsigned int pcibios_irq_mask;
extern int pcibios_scanned;
-extern spinlock_t pci_config_lock;
+extern atomic_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index dc27a69e5d2a..c5360008a5b6 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,7 +5,7 @@
#include <linux/clocksource.h>
struct vsyscall_gtod_data {
- seqlock_t lock;
+ atomic_seqlock_t lock;
/* open coded 'struct timespec' */
time_t wall_time_sec;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ce322b2b4c6e..c3495d091916 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
*/
int sis_apic_bug = -1;
-static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
+static DEFINE_ATOMIC_SPINLOCK(ioapic_lock);
+static DEFINE_ATOMIC_SPINLOCK(vector_lock);
/*
* # of IRQ routing registers
@@ -413,7 +413,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
struct irq_pin_list *entry;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
@@ -425,14 +425,14 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
reg = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
if (reg & IO_APIC_REDIR_REMOTE_IRR) {
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return true;
}
if (!entry->next)
break;
entry = entry->next;
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return false;
}
@@ -446,10 +446,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
union entry_union eu;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return eu.entry;
}
@@ -472,9 +472,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(apic, pin, e);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -487,10 +487,10 @@ static void ioapic_mask_entry(int apic, int pin)
unsigned long flags;
union entry_union eu = { .entry.mask = 1 };
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
}
/*
@@ -622,9 +622,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
BUG_ON(!cfg);
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
__mask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -632,9 +632,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
struct irq_cfg *cfg = desc->chip_data;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
__unmask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void mask_IO_APIC_irq(unsigned int irq)
@@ -1158,12 +1158,12 @@ void lock_vector_lock(void)
/* Used to the online set of cpus does not change
* during assign_irq_vector.
*/
- spin_lock(&vector_lock);
+ atomic_spin_lock(&vector_lock);
}
void unlock_vector_lock(void)
{
- spin_unlock(&vector_lock);
+ atomic_spin_unlock(&vector_lock);
}
static int
@@ -1251,9 +1251,9 @@ assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
int err;
unsigned long flags;
- spin_lock_irqsave(&vector_lock, flags);
+ atomic_spin_lock_irqsave(&vector_lock, flags);
err = __assign_irq_vector(irq, cfg, mask);
- spin_unlock_irqrestore(&vector_lock, flags);
+ atomic_spin_unlock_irqrestore(&vector_lock, flags);
return err;
}
@@ -1623,14 +1623,14 @@ __apicdebuginit(void) print_IO_APIC(void)
for (apic = 0; apic < nr_ioapics; apic++) {
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic, 0);
reg_01.raw = io_apic_read(apic, 1);
if (reg_01.bits.version >= 0x10)
reg_02.raw = io_apic_read(apic, 2);
if (reg_01.bits.version >= 0x20)
reg_03.raw = io_apic_read(apic, 3);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1856,7 +1856,7 @@ __apicdebuginit(void) print_PIC(void)
printk(KERN_DEBUG "\nprinting PIC contents\n");
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
v = inb(0xa1) << 8 | inb(0x21);
printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1870,7 +1870,7 @@ __apicdebuginit(void) print_PIC(void)
outb(0x0a,0xa0);
outb(0x0a,0x20);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
@@ -1909,9 +1909,9 @@ void __init enable_IO_APIC(void)
* The number of IO-APIC IRQ registers (== #pins):
*/
for (apic = 0; apic < nr_ioapics; apic++) {
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(apic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
nr_ioapic_registers[apic] = reg_01.bits.entries+1;
}
for(apic = 0; apic < nr_ioapics; apic++) {
@@ -2045,9 +2045,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
/* Read the register 0 value */
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic_id, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
old_id = mp_ioapics[apic_id].apicid;
@@ -2106,16 +2106,16 @@ static void __init setup_ioapic_ids_from_mpc(void)
mp_ioapics[apic_id].apicid);
reg_00.bits.ID = mp_ioapics[apic_id].apicid;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic_id, 0, reg_00.raw);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
/*
* Sanity check
*/
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic_id, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
printk("could not set ID!\n");
else
@@ -2200,7 +2200,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
unsigned long flags;
struct irq_cfg *cfg;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
if (irq < NR_IRQS_LEGACY) {
disable_8259A_irq(irq);
if (i8259A_irq_pending(irq))
@@ -2208,7 +2208,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
}
cfg = irq_cfg(irq);
__unmask_IO_APIC_irq(cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
}
@@ -2220,9 +2220,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
struct irq_cfg *cfg = irq_cfg(irq);
unsigned long flags;
- spin_lock_irqsave(&vector_lock, flags);
+ atomic_spin_lock_irqsave(&vector_lock, flags);
apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
- spin_unlock_irqrestore(&vector_lock, flags);
+ atomic_spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
@@ -2335,7 +2335,7 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
irq = desc->irq;
cfg = desc->chip_data;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
dest = set_desc_affinity(desc, mask);
if (dest != BAD_APICID) {
/* Only the high 8 bits are valid. */
@@ -2343,7 +2343,7 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
__target_IO_APIC_irq(irq, dest, cfg);
ret = 0;
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
@@ -2456,7 +2456,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
continue;
cfg = irq_cfg(irq);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -2478,7 +2478,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
__get_cpu_var(vector_irq)[vector] = -1;
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
irq_exit();
@@ -2604,10 +2604,10 @@ static void ack_apic_level(unsigned int irq)
#ifdef CONFIG_X86_32
if (!(v & (1 << (i & 0x1f)))) {
atomic_inc(&irq_mis_count);
- spin_lock(&ioapic_lock);
+ atomic_spin_lock(&ioapic_lock);
__mask_and_edge_IO_APIC_irq(cfg);
__unmask_and_level_IO_APIC_irq(cfg);
- spin_unlock(&ioapic_lock);
+ atomic_spin_unlock(&ioapic_lock);
}
#endif
}
@@ -2641,9 +2641,9 @@ eoi_ioapic_irq(struct irq_desc *desc)
irq = desc->irq;
cfg = desc->chip_data;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ir_ack_apic_edge(unsigned int irq)
@@ -3119,13 +3119,13 @@ static int ioapic_resume(struct sys_device *dev)
data = container_of(dev, struct sysfs_ioapic_data, dev);
entry = data->entry;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0);
if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw);
}
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
ioapic_write_entry(dev->id, i, entry[i]);
@@ -3189,7 +3189,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
if (irq_want < nr_irqs_gsi)
irq_want = nr_irqs_gsi;
- spin_lock_irqsave(&vector_lock, flags);
+ atomic_spin_lock_irqsave(&vector_lock, flags);
for (new = irq_want; new < nr_irqs; new++) {
desc_new = irq_to_desc_alloc_node(new, node);
if (!desc_new) {
@@ -3207,7 +3207,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
irq = new;
break;
}
- spin_unlock_irqrestore(&vector_lock, flags);
+ atomic_spin_unlock_irqrestore(&vector_lock, flags);
if (irq > 0) {
dynamic_irq_init(irq);
@@ -3248,9 +3248,9 @@ void destroy_irq(unsigned int irq)
desc->chip_data = cfg;
free_irte(irq);
- spin_lock_irqsave(&vector_lock, flags);
+ atomic_spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq, cfg);
- spin_unlock_irqrestore(&vector_lock, flags);
+ atomic_spin_unlock_irqrestore(&vector_lock, flags);
}
/*
@@ -3778,10 +3778,10 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0)
return err;
- spin_lock_irqsave(&vector_lock, flags);
+ atomic_spin_lock_irqsave(&vector_lock, flags);
set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
irq_name);
- spin_unlock_irqrestore(&vector_lock, flags);
+ atomic_spin_unlock_irqrestore(&vector_lock, flags);
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
@@ -3825,9 +3825,9 @@ int __init io_apic_get_redir_entries (int ioapic)
union IO_APIC_reg_01 reg_01;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(ioapic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.entries;
}
@@ -3968,9 +3968,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
if (physids_empty(apic_id_map))
apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(ioapic, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
if (apic_id >= get_physical_broadcast()) {
printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -4004,10 +4004,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
if (reg_00.bits.ID != apic_id) {
reg_00.bits.ID = apic_id;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic, 0, reg_00.raw);
reg_00.raw = io_apic_read(ioapic, 0);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
/* Sanity check */
if (reg_00.bits.ID != apic_id) {
@@ -4028,9 +4028,9 @@ int __init io_apic_get_version(int ioapic)
union IO_APIC_reg_01 reg_01;
unsigned long flags;
- spin_lock_irqsave(&ioapic_lock, flags);
+ atomic_spin_lock_irqsave(&ioapic_lock, flags);
reg_01.raw = io_apic_read(ioapic, 1);
- spin_unlock_irqrestore(&ioapic_lock, flags);
+ atomic_spin_unlock_irqrestore(&ioapic_lock, flags);
return reg_01.bits.version;
}
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index b3025b43b63a..fa15da6d6884 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,12 +416,12 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
/* We can be called before check_nmi_watchdog, hence NULL check. */
if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
- static DEFINE_SPINLOCK(lock); /* Serialise the printks */
+ static DEFINE_ATOMIC_SPINLOCK(lock); /* Serialise the printks */
- spin_lock(&lock);
+ atomic_spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
dump_stack();
- spin_unlock(&lock);
+ atomic_spin_unlock(&lock);
cpumask_clear_cpu(cpu, backtrace_mask);
}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 79302e9a33a4..5dc9baa43858 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1216,7 +1216,7 @@ static void reinit_timer(void)
#ifdef INIT_TIMER_AFTER_SUSPEND
unsigned long flags;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
/* set the clock to HZ */
outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
@@ -1224,7 +1224,7 @@ static void reinit_timer(void)
udelay(10);
outb_pit(LATCH >> 8, PIT_CH0); /* MSB */
udelay(10);
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
#endif
}
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0543f69f0b27..d486197c3855 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -549,7 +549,7 @@ static unsigned long set_mtrr_state(void)
static unsigned long cr4 = 0;
-static DEFINE_SPINLOCK(set_atomicity_lock);
+static DEFINE_ATOMIC_SPINLOCK(set_atomicity_lock);
/*
* Since we are disabling the cache don't allow any interrupts - they
@@ -566,7 +566,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
- spin_lock(&set_atomicity_lock);
+ atomic_spin_lock(&set_atomicity_lock);
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
cr0 = read_cr0() | X86_CR0_CD;
@@ -603,7 +603,7 @@ static void post_set(void) __releases(set_atomicity_lock)
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
- spin_unlock(&set_atomicity_lock);
+ atomic_spin_unlock(&set_atomicity_lock);
}
static void generic_set_all(void)
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 5cf36c053ac4..84d4433bb24d 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -16,7 +16,7 @@
#include <asm/hpet.h>
#include <asm/smp.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_ATOMIC_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
#ifdef CONFIG_X86_32
@@ -39,7 +39,7 @@ struct clock_event_device *global_clock_event;
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ atomic_spin_lock(&i8253_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -70,7 +70,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ atomic_spin_unlock(&i8253_lock);
}
/*
@@ -80,10 +80,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ atomic_spin_lock(&i8253_lock);
outb_pit(delta & 0xff , PIT_CH0); /* LSB */
outb_pit(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ atomic_spin_unlock(&i8253_lock);
return 0;
}
@@ -138,7 +138,7 @@ static cycle_t pit_read(struct clocksource *cs)
int count;
u32 jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -184,7 +184,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index df89102bef80..eadb89137d26 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
*/
static int i8259A_auto_eoi;
-DEFINE_SPINLOCK(i8259A_lock);
+DEFINE_ATOMIC_SPINLOCK(i8259A_lock);
static void mask_and_ack_8259A(unsigned int);
struct irq_chip i8259A_chip = {
@@ -68,13 +68,13 @@ void disable_8259A_irq(unsigned int irq)
unsigned int mask = 1 << irq;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void enable_8259A_irq(unsigned int irq)
@@ -82,13 +82,13 @@ void enable_8259A_irq(unsigned int irq)
unsigned int mask = ~(1 << irq);
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
@@ -97,12 +97,12 @@ int i8259A_irq_pending(unsigned int irq)
unsigned long flags;
int ret;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
@@ -150,7 +150,7 @@ static void mask_and_ack_8259A(unsigned int irq)
unsigned int irqmask = 1 << irq;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
@@ -183,7 +183,7 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
@@ -285,24 +285,24 @@ void mask_8259A(void)
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void unmask_8259A(void)
{
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
}
void init_8259A(int auto_eoi)
@@ -311,7 +311,7 @@ void init_8259A(int auto_eoi)
i8259A_auto_eoi = auto_eoi;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -356,5 +356,5 @@ void init_8259A(int auto_eoi)
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index b0cdde6932f5..c51d8e60718a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 977d8b43a0dd..1aa5228a13eb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -81,12 +81,12 @@ void fixup_irqs(void)
continue;
/* interrupt's are disabled at this point */
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
continue;
}
@@ -106,7 +106,7 @@ void fixup_irqs(void)
if (desc->chip->unmask)
desc->chip->unmask(irq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 5c5d87f0b2e1..9e2cb0b149a9 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -84,11 +84,11 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
* manually to deassert NMI lines for the watchdog if run
* on an 82489DX-based system.
*/
- spin_lock(&i8259A_lock);
+ atomic_spin_lock(&i8259A_lock);
outb(0x0c, PIC_MASTER_OCW3);
/* Ack the IRQ; AEOI will end it automatically. */
inb(PIC_MASTER_POLL);
- spin_unlock(&i8259A_lock);
+ atomic_spin_unlock(&i8259A_lock);
}
#endif
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index c40ba3f8c259..24e6d2bea474 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -581,7 +581,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
struct irq_desc *desc;
unsigned long flags;
- spin_lock_irqsave(&i8259A_lock, flags);
+ atomic_spin_lock_irqsave(&i8259A_lock, flags);
/* Find out what's interrupting in the PIIX4 master 8259 */
outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -618,7 +618,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
outb(0x60 + realirq, 0x20);
}
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
desc = irq_to_desc(realirq);
@@ -636,7 +636,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
return IRQ_HANDLED;
out_unlock:
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8259A_lock, flags);
return IRQ_NONE;
}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 25ee06a80aad..18d0d5526f7c 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -59,7 +59,7 @@ int __vgetcpu_mode __section_vgetcpu_mode;
struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
{
- .lock = SEQLOCK_UNLOCKED,
+ .lock = __ATOMIC_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
.sysctl_enabled = 1,
};
@@ -67,17 +67,17 @@ void update_vsyscall_tz(void)
{
unsigned long flags;
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ write_atomic_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* sys_tz has changed */
vsyscall_gtod_data.sys_tz = sys_tz;
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ write_atomic_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{
unsigned long flags;
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ write_atomic_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* copy vsyscall data */
vsyscall_gtod_data.clock.vread = clock->vread;
vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
@@ -87,7 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ write_atomic_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
/* RED-PEN may want to readd seq locking, but then the variable should be
@@ -124,7 +124,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
unsigned long mult, shift, nsec;
cycle_t (*vread)(void);
do {
- seq = read_seqbegin(&__vsyscall_gtod_data.lock);
+ seq = read_atomic_seqbegin(&__vsyscall_gtod_data.lock);
vread = __vsyscall_gtod_data.clock.vread;
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
@@ -140,7 +140,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
nsec = __vsyscall_gtod_data.wall_time_nsec;
- } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
+ } while (read_atomic_seqretry(&__vsyscall_gtod_data.lock, seq));
/* calculate interval: */
cycle_delta = (now - base) & mask;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 4d6f0d293ee2..bee99bdea5d9 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -237,11 +237,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
irq_ack_notifier);
- spin_lock(&ps->inject_lock);
+ atomic_spin_lock(&ps->inject_lock);
if (atomic_dec_return(&ps->pit_timer.pending) < 0)
atomic_inc(&ps->pit_timer.pending);
ps->irq_ack = 1;
- spin_unlock(&ps->inject_lock);
+ atomic_spin_unlock(&ps->inject_lock);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -577,7 +577,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
mutex_init(&pit->pit_state.lock);
mutex_lock(&pit->pit_state.lock);
- spin_lock_init(&pit->pit_state.inject_lock);
+ atomic_spin_lock_init(&pit->pit_state.inject_lock);
/* Initialize PIO device */
pit->dev.read = pit_ioport_read;
@@ -669,12 +669,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
/* Try to inject pending interrupts when
* last one has been acked.
*/
- spin_lock(&ps->inject_lock);
+ atomic_spin_lock(&ps->inject_lock);
if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
- spin_unlock(&ps->inject_lock);
+ atomic_spin_unlock(&ps->inject_lock);
if (inject)
__inject_pit_timer_intr(kvm);
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index bbd863ff60b7..33e30e3be9b6 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -26,7 +26,7 @@ struct kvm_kpit_state {
u32 speaker_data_on;
struct mutex lock;
struct kvm_pit *pit;
- spinlock_t inject_lock;
+ atomic_spinlock_t inject_lock;
unsigned long irq_ack;
struct kvm_irq_ack_notifier irq_ack_notifier;
};
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1ccb50c74f18..91fcca196a69 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -34,7 +34,7 @@
static void pic_lock(struct kvm_pic *s)
__acquires(&s->lock)
{
- spin_lock(&s->lock);
+ atomic_spin_lock(&s->lock);
}
static void pic_unlock(struct kvm_pic *s)
@@ -48,7 +48,7 @@ static void pic_unlock(struct kvm_pic *s)
s->pending_acks = 0;
s->wakeup_needed = false;
- spin_unlock(&s->lock);
+ atomic_spin_unlock(&s->lock);
while (acks) {
kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)),
@@ -522,7 +522,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
if (!s)
return NULL;
- spin_lock_init(&s->lock);
+ atomic_spin_lock_init(&s->lock);
s->kvm = kvm;
s->pics[0].elcr_mask = 0xf8;
s->pics[1].elcr_mask = 0xde;
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 9f593188129e..fdae2efd20cb 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -62,7 +62,7 @@ struct kvm_kpic_state {
};
struct kvm_pic {
- spinlock_t lock;
+ atomic_spinlock_t lock;
bool wakeup_needed;
unsigned pending_acks;
struct kvm *kvm;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 821e97017e95..55ceed8429fa 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -40,7 +40,7 @@ union smp_flush_state {
struct {
struct mm_struct *flush_mm;
unsigned long flush_va;
- spinlock_t tlbstate_lock;
+ atomic_spinlock_t tlbstate_lock;
DECLARE_BITMAP(flush_cpumask, NR_CPUS);
};
char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
@@ -179,7 +179,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
* num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
* probably not worth checking this for a cache-hot lock.
*/
- spin_lock(&f->tlbstate_lock);
+ atomic_spin_lock(&f->tlbstate_lock);
f->flush_mm = mm;
f->flush_va = va;
@@ -198,7 +198,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
f->flush_mm = NULL;
f->flush_va = 0;
- spin_unlock(&f->tlbstate_lock);
+ atomic_spin_unlock(&f->tlbstate_lock);
}
void native_flush_tlb_others(const struct cpumask *cpumask,
@@ -222,7 +222,7 @@ static int __cpuinit init_smp_flush(void)
int i;
for (i = 0; i < ARRAY_SIZE(flush_state); i++)
- spin_lock_init(&flush_state[i].tlbstate_lock);
+ atomic_spin_lock_init(&flush_state[i].tlbstate_lock);
return 0;
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 89b9a5cd63da..dea0100f5a19 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -125,9 +125,9 @@ static void nmi_cpu_setup(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
- spin_lock(&oprofilefs_lock);
+ atomic_spin_lock(&oprofilefs_lock);
model->setup_ctrs(msrs);
- spin_unlock(&oprofilefs_lock);
+ atomic_spin_unlock(&oprofilefs_lock);
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2202b6257b82..fc2697cfc750 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -81,7 +81,7 @@ int pcibios_scanned;
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
*/
-DEFINE_SPINLOCK(pci_config_lock);
+DEFINE_ATOMIC_SPINLOCK(pci_config_lock);
static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
{
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
index bd13c3e4c6db..286ca0121e0b 100644
--- a/arch/x86/pci/direct.c
+++ b/arch/x86/pci/direct.c
@@ -27,7 +27,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
return -EINVAL;
}
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
@@ -43,7 +43,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -56,7 +56,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
@@ -72,7 +72,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -108,7 +108,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus,
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
@@ -127,7 +127,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus,
outb(0, 0xCF8);
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -147,7 +147,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
@@ -166,7 +166,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
outb(0, 0xCF8);
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 8b2d561046a3..7ca333ff0bee 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -72,7 +72,7 @@ err: *value = -1;
if (!base)
goto err;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
@@ -87,7 +87,7 @@ err: *value = -1;
*value = mmio_config_readl(mmcfg_virt_addr + reg);
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -105,7 +105,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
if (!base)
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
@@ -120,7 +120,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
mmio_config_writel(mmcfg_virt_addr + reg, value);
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 8eb295e116f6..f2a1f1fbd504 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -41,7 +41,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus,
if (!value || (bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
write_cf8(bus, devfn, reg);
@@ -66,7 +66,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -80,7 +80,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
if ((bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
write_cf8(bus, devfn, reg);
@@ -105,7 +105,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 1c975cc9839e..ffebd804afbb 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -161,7 +161,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
@@ -212,7 +212,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
@@ -227,7 +227,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ atomic_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
@@ -268,7 +268,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6a40b78b46aa..cd36532096b0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -47,11 +47,11 @@ notrace static noinline int do_realtime(struct timespec *ts)
{
unsigned long seq, ns;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_atomic_seqbegin(&gtod->lock);
ts->tv_sec = gtod->wall_time_sec;
ts->tv_nsec = gtod->wall_time_nsec;
ns = vgetns();
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_atomic_seqretry(&gtod->lock, seq)));
timespec_add_ns(ts, ns);
return 0;
}
@@ -76,12 +76,12 @@ notrace static noinline int do_monotonic(struct timespec *ts)
{
unsigned long seq, ns, secs;
do {
- seq = read_seqbegin(&gtod->lock);
+ seq = read_atomic_seqbegin(&gtod->lock);
secs = gtod->wall_time_sec;
ns = gtod->wall_time_nsec + vgetns();
secs += gtod->wall_to_monotonic.tv_sec;
ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(&gtod->lock, seq)));
+ } while (unlikely(read_atomic_seqretry(&gtod->lock, seq)));
vset_normalized_timespec(ts, secs, ns);
return 0;
}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a1badb32fcda..bb599c305033 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0efa59e7e3af..0f90a640f832 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -919,7 +919,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
}
static int c3_cpu_count;
-static DEFINE_SPINLOCK(c3_lock);
+static DEFINE_ATOMIC_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
@@ -994,12 +994,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
- spin_lock(&c3_lock);
+ atomic_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
- spin_unlock(&c3_lock);
+ atomic_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
@@ -1008,10 +1008,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
- spin_lock(&c3_lock);
+ atomic_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
- spin_unlock(&c3_lock);
+ atomic_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index f9d01608cbe2..cc71770034f1 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -165,12 +165,12 @@ unsigned long read_timer(void)
unsigned long t, flags;
int i;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
t = jiffies * 11932;
outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
return(t - i);
}
#endif
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 65b0c448e4b7..29577a87daaf 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -58,11 +58,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 1c0b529c06aa..2c52c688943b 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 21cb755a54fb..5dc0ccd0a2f1 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr");
#include <asm/i8253.h>
#else
#include <asm/8253pit.h>
-static DEFINE_SPINLOCK(i8253_lock);
+static DEFINE_ATOMIC_SPINLOCK(i8253_lock);
#endif
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
return 0;
}
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 2b7ae366ceb1..284814bf2256 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -72,10 +72,10 @@ int alloc_event_buffer(void)
int err = -ENOMEM;
unsigned long flags;
- spin_lock_irqsave(&oprofilefs_lock, flags);
+ atomic_spin_lock_irqsave(&oprofilefs_lock, flags);
buffer_size = oprofile_buffer_size;
buffer_watershed = oprofile_buffer_watershed;
- spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ atomic_spin_unlock_irqrestore(&oprofilefs_lock, flags);
if (buffer_watershed >= buffer_size)
return -EINVAL;
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b7e4cee24269..7cbf76d5749c 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -21,7 +21,7 @@
#define OPROFILEFS_MAGIC 0x6f70726f
-DEFINE_SPINLOCK(oprofilefs_lock);
+DEFINE_ATOMIC_SPINLOCK(oprofilefs_lock);
static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
{
@@ -75,9 +75,9 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
if (copy_from_user(tmpbuf, buf, count))
return -EFAULT;
- spin_lock_irqsave(&oprofilefs_lock, flags);
+ atomic_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
- spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ atomic_spin_unlock_irqrestore(&oprofilefs_lock, flags);
return 0;
}
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index db23200c4874..fddeb639ac09 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -12,7 +12,7 @@
* configuration space.
*/
-static DEFINE_SPINLOCK(pci_lock);
+static DEFINE_ATOMIC_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -32,10 +32,10 @@ int pci_bus_read_config_##size \
unsigned long flags; \
u32 data = 0; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ atomic_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ atomic_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -46,9 +46,9 @@ int pci_bus_write_config_##size \
int res; \
unsigned long flags; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ atomic_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ atomic_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -78,10 +78,10 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
struct pci_ops *old_ops;
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ atomic_spin_lock_irqsave(&pci_lock, flags);
old_ops = bus->ops;
bus->ops = ops;
- spin_unlock_irqrestore(&pci_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_lock, flags);
return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
@@ -135,9 +135,9 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev)
__add_wait_queue(&pci_ucfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&pci_lock);
+ atomic_spin_unlock_irq(&pci_lock);
schedule();
- spin_lock_irq(&pci_lock);
+ atomic_spin_lock_irq(&pci_lock);
} while (dev->block_ucfg_access);
__remove_wait_queue(&pci_ucfg_wait, &wait);
}
@@ -149,11 +149,11 @@ int pci_user_read_config_##size \
int ret = 0; \
u32 data = -1; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ atomic_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
- spin_unlock_irq(&pci_lock); \
+ atomic_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
return ret; \
}
@@ -164,11 +164,11 @@ int pci_user_write_config_##size \
{ \
int ret = -EIO; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ atomic_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
- spin_unlock_irq(&pci_lock); \
+ atomic_spin_unlock_irq(&pci_lock); \
return ret; \
}
@@ -395,10 +395,10 @@ void pci_block_user_cfg_access(struct pci_dev *dev)
unsigned long flags;
int was_blocked;
- spin_lock_irqsave(&pci_lock, flags);
+ atomic_spin_lock_irqsave(&pci_lock, flags);
was_blocked = dev->block_ucfg_access;
dev->block_ucfg_access = 1;
- spin_unlock_irqrestore(&pci_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_lock, flags);
/* If we BUG() inside the pci_lock, we're guaranteed to hose
* the machine */
@@ -416,7 +416,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
{
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ atomic_spin_lock_irqsave(&pci_lock, flags);
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
@@ -424,6 +424,6 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
dev->block_ucfg_access = 0;
wake_up_all(&pci_ucfg_wait);
- spin_unlock_irqrestore(&pci_lock, flags);
+ atomic_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 59d7d5ec17a4..41cc04c77afa 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -51,7 +51,7 @@
#include <video/vga.h>
#include <asm/io.h>
-static DEFINE_SPINLOCK(vga_lock);
+static DEFINE_ATOMIC_SPINLOCK(vga_lock);
static int cursor_size_lastfrom;
static int cursor_size_lastto;
static u32 vgacon_xres;
@@ -158,7 +158,7 @@ static inline void write_vga(unsigned char reg, unsigned int val)
* ddprintk might set the console position from interrupt
* handlers, thus the write has to be IRQ-atomic.
*/
- spin_lock_irqsave(&vga_lock, flags);
+ atomic_spin_lock_irqsave(&vga_lock, flags);
#ifndef SLOW_VGA
v1 = reg + (val & 0xff00);
@@ -171,7 +171,7 @@ static inline void write_vga(unsigned char reg, unsigned int val)
outb_p(reg + 1, vga_video_port_reg);
outb_p(val & 0xff, vga_video_port_val);
#endif
- spin_unlock_irqrestore(&vga_lock, flags);
+ atomic_spin_unlock_irqrestore(&vga_lock, flags);
}
static inline void vga_set_mem_top(struct vc_data *c)
@@ -662,7 +662,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
cursor_size_lastfrom = from;
cursor_size_lastto = to;
- spin_lock_irqsave(&vga_lock, flags);
+ atomic_spin_lock_irqsave(&vga_lock, flags);
if (vga_video_type >= VIDEO_TYPE_VGAC) {
outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
curs = inb_p(vga_video_port_val);
@@ -680,7 +680,7 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
outb_p(curs, vga_video_port_val);
outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
outb_p(cure, vga_video_port_val);
- spin_unlock_irqrestore(&vga_lock, flags);
+ atomic_spin_unlock_irqrestore(&vga_lock, flags);
}
static void vgacon_cursor(struct vc_data *c, int mode)
@@ -755,7 +755,7 @@ static int vgacon_doresize(struct vc_data *c,
unsigned int scanlines = height * c->vc_font.height;
u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
- spin_lock_irqsave(&vga_lock, flags);
+ atomic_spin_lock_irqsave(&vga_lock, flags);
vgacon_xres = width * VGA_FONTWIDTH;
vgacon_yres = height * c->vc_font.height;
@@ -806,7 +806,7 @@ static int vgacon_doresize(struct vc_data *c,
outb_p(vsync_end, vga_video_port_val);
}
- spin_unlock_irqrestore(&vga_lock, flags);
+ atomic_spin_unlock_irqrestore(&vga_lock, flags);
return 0;
}
@@ -889,11 +889,11 @@ static void vga_vesa_blank(struct vgastate *state, int mode)
{
/* save original values of VGA controller registers */
if (!vga_vesa_blanked) {
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I);
vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg);
vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R);
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
vga_state.HorizontalTotal = inb_p(vga_video_port_val);
@@ -916,7 +916,7 @@ static void vga_vesa_blank(struct vgastate *state, int mode)
/* assure that video is enabled */
/* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20);
/* test for vertical retrace in process.... */
@@ -952,13 +952,13 @@ static void vga_vesa_blank(struct vgastate *state, int mode)
/* restore both index registers */
vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
}
static void vga_vesa_unblank(struct vgastate *state)
{
/* restore original values of VGA controller registers */
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO);
outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
@@ -983,7 +983,7 @@ static void vga_vesa_unblank(struct vgastate *state)
/* restore index/control registers */
vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
}
static void vga_pal_blank(struct vgastate *state)
@@ -1103,7 +1103,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
#endif
unlock_kernel();
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
/* First, the Sequencer */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
/* CPU writes only to map 2 */
@@ -1119,7 +1119,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00);
/* map start at A000:0000 */
vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00);
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
if (arg) {
if (set)
@@ -1146,7 +1146,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
}
}
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
/* First, the sequencer, Synchronous reset */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
/* CPU writes to maps 0 and 1 */
@@ -1185,7 +1185,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
inb_p(video_port_status);
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
}
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
lock_kernel();
return 0;
}
@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
registers; they are write-only on EGA, but it appears that they
are all don't care bits on EGA, so I guess it doesn't matter. */
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
ovr = inb_p(vga_video_port_val);
outb_p(0x09, vga_video_port_reg); /* Font size register */
fsr = inb_p(vga_video_port_val);
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
vde = maxscan & 0xff; /* Vertical display end reg */
ovr = (ovr & 0xbd) + /* Overflow register */
((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3);
fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */
- spin_lock_irq(&vga_lock);
+ atomic_spin_lock_irq(&vga_lock);
outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
outb_p(ovr, vga_video_port_val);
outb_p(0x09, vga_video_port_reg); /* Font size */
outb_p(fsr, vga_video_port_val);
outb_p(0x12, vga_video_port_reg); /* Vertical display limit */
outb_p(vde, vga_video_port_val);
- spin_unlock_irq(&vga_lock);
+ atomic_spin_unlock_irq(&vga_lock);
vga_video_font_height = fontheight;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 4759917adc71..f6177e6c7c9d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -170,7 +170,7 @@ struct hrtimer_clock_base {
* @nr_events: Total number of timer interrupt events
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ atomic_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fc01b13be43..91665bd3d5e1 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -27,7 +27,7 @@ extern struct fs_struct init_fs;
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
}
@@ -159,7 +159,7 @@ extern struct cred init_cred;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __ATOMIC_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a3f7f7..5e59d7a0de7a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -184,7 +184,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ atomic_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index bcd9c07848be..4dffaec6d14e 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -170,7 +170,7 @@ struct kretprobe {
int nmissed;
size_t data_size;
struct hlist_head free_instances;
- spinlock_t lock;
+ atomic_spinlock_t lock;
};
struct kretprobe_instance {
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1d9518bc4c58..aa89457dbaaa 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -153,7 +153,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
/** lock for read/write safety */
-extern spinlock_t oprofilefs_lock;
+extern atomic_spinlock_t oprofilefs_lock;
/**
* Add the contents of a circular buffer to the event buffer.
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index a7684a513994..fafe0a6c7c59 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -16,7 +16,7 @@
#ifdef CONFIG_SMP
struct percpu_counter {
- spinlock_t lock;
+ atomic_spinlock_t lock;
s64 count;
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index bd15d7a5f5ce..ba7a2bcfc243 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -584,7 +584,7 @@ struct perf_counter_context {
* Protect the states of the counters in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ atomic_spinlock_t lock;
/*
* Protect the list of counters. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..1eef0565f744 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ atomic_spinlock_t *alock;
+ spinlock_t *slock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .slock = _lock
+# define PLIST_HEAD_LOCK_INIT_ATOMIC(_lock) .alock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_ATOMIC(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_ATOMIC - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_ATOMIC(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_ATOMIC(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,7 +133,7 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->slock = lock;
+ head->alock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_atomic - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: list atomic_spinlock, remembered for debugging
+ */
+static inline void
+plist_head_init_atomic(struct plist_head *head, atomic_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->alock = lock;
+ head->slock = NULL;
#endif
}
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index cf793bbbd05e..de2e44784a6e 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -58,7 +58,7 @@ struct prop_local_percpu {
*/
int shift;
unsigned long period;
- spinlock_t lock; /* protect the snapshot state */
+ atomic_spinlock_t lock; /* protect the snapshot state */
};
int prop_local_init_percpu(struct prop_local_percpu *pl);
@@ -106,11 +106,11 @@ struct prop_local_single {
*/
unsigned long period;
int shift;
- spinlock_t lock; /* protect the snapshot state */
+ atomic_spinlock_t lock; /* protect the snapshot state */
};
#define INIT_PROP_LOCAL_SINGLE(name) \
-{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+{ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(name.lock), \
}
int prop_local_init_single(struct prop_local_single *pl);
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..dd9612d436e2 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ atomic_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __ATOMIC_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_ATOMIC(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
@@ -98,7 +98,7 @@ extern void rt_mutex_unlock(struct rt_mutex *lock);
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
+ .pi_waiters = PLIST_HEAD_INIT_ATOMIC(tsk.pi_waiters, tsk.pi_lock), \
INIT_RT_MUTEX_DEBUG(tsk)
#else
# define INIT_RT_MUTEXES(tsk)
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..b4458c6fdd3a
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,151 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_read_lock(rwlock_t *lock);
+#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock_flags(lock, flags) \
+ __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock_flags(lock, flags) \
+ __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+
+#define write_lock(lock) _write_lock(lock)
+#define read_lock(lock) _read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _read_lock_irq(lock)
+#define read_lock_bh(lock) _read_lock_bh(lock)
+
+#define write_lock_irq(lock) _write_lock_irq(lock)
+#define write_lock_bh(lock) _write_lock_bh(lock)
+
+/*
+ * We inline the unlock functions in the nondebug case:
+ */
+#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
+ !defined(CONFIG_SMP)
+# define read_unlock(lock) _read_unlock(lock)
+# define write_unlock(lock) _write_unlock(lock)
+# define read_unlock_irq(lock) _read_unlock_irq(lock)
+# define write_unlock_irq(lock) _write_unlock_irq(lock)
+#else
+# define read_unlock(lock) \
+ do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
+# define write_unlock(lock) \
+ do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
+# define read_unlock_irq(lock) \
+do { \
+ __raw_read_unlock(&(lock)->raw_lock); \
+ __release(lock); \
+ local_irq_enable(); \
+} while (0)
+# define write_unlock_irq(lock) \
+do { \
+ __raw_write_unlock(&(lock)->raw_lock); \
+ __release(lock); \
+ local_irq_enable(); \
+} while (0)
+#endif
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..f8c935206a41
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ raw_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ab6b265be11b..9757414fbb12 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -520,7 +520,7 @@ struct task_cputime {
struct thread_group_cputimer {
struct task_cputime cputime;
int running;
- spinlock_t lock;
+ atomic_spinlock_t lock;
};
/*
@@ -1347,7 +1347,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ atomic_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
@@ -2322,7 +2322,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
sig->cputimer.cputime = INIT_CPUTIME;
- spin_lock_init(&sig->cputimer.lock);
+ atomic_spin_lock_init(&sig->cputimer.lock);
sig->cputimer.running = 0;
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 632205ccc25d..dc64d96aaab8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -31,6 +31,11 @@
typedef struct {
unsigned sequence;
+ atomic_spinlock_t lock;
+} atomic_seqlock_t;
+
+typedef struct {
+ unsigned sequence;
spinlock_t lock;
} seqlock_t;
@@ -38,11 +43,23 @@ typedef struct {
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
+#define __ATOMIC_SEQLOCK_UNLOCKED(lockname) \
+ { 0, __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) }
+
+#define seqlock_atomic_init(x) \
+ do { \
+ (x)->sequence = 0; \
+ atomic_spin_lock_init(&(x)->lock); \
+ } while (0)
+
+#define DEFINE_ATOMIC_SEQLOCK(x) \
+ atomic_seqlock_t x = __ATOMIC_SEQLOCK_UNLOCKED(x)
+
#define __SEQLOCK_UNLOCKED(lockname) \
- { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+ { 0, __SPIN_LOCK_UNLOCKED(lockname) }
#define SEQLOCK_UNLOCKED \
- __SEQLOCK_UNLOCKED(old_style_seqlock_init)
+ __SEQLOCK_UNLOCKED(old_style_seqlock_init)
#define seqlock_init(x) \
do { \
@@ -51,12 +68,19 @@ typedef struct {
} while (0)
#define DEFINE_SEQLOCK(x) \
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
/* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
* Don't need preempt_disable() because that is in the spin_lock already.
*/
+static inline void write_atomic_seqlock(atomic_seqlock_t *sl)
+{
+ atomic_spin_lock(&sl->lock);
+ ++sl->sequence;
+ smp_wmb();
+}
+
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
@@ -64,6 +88,13 @@ static inline void write_seqlock(seqlock_t *sl)
smp_wmb();
}
+static inline void write_atomic_sequnlock(atomic_seqlock_t *sl)
+{
+ smp_wmb();
+ sl->sequence++;
+ atomic_spin_unlock(&sl->lock);
+}
+
static inline void write_sequnlock(seqlock_t *sl)
{
smp_wmb();
@@ -83,6 +114,21 @@ static inline int write_tryseqlock(seqlock_t *sl)
}
/* Start of read calculation -- fetch last complete writer token */
+static __always_inline unsigned read_atomic_seqbegin(const atomic_seqlock_t *sl)
+{
+ unsigned ret;
+
+repeat:
+ ret = sl->sequence;
+ smp_rmb();
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+
+ return ret;
+}
+
static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret;
@@ -103,6 +149,14 @@ repeat:
*
* If sequence value changed then writer changed data while in section.
*/
+static __always_inline int
+read_atomic_seqretry(const atomic_seqlock_t *sl, unsigned start)
+{
+ smp_rmb();
+
+ return (sl->sequence != start);
+}
+
static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{
smp_rmb();
@@ -170,12 +224,36 @@ static inline void write_seqcount_end(seqcount_t *s)
/*
* Possible sw/hw IRQ protected versions of the interfaces.
*/
+#define write_atomic_seqlock_irqsave(lock, flags) \
+ do { local_irq_save(flags); write_atomic_seqlock(lock); } while (0)
+#define write_atomic_seqlock_irq(lock) \
+ do { local_irq_disable(); write_atomic_seqlock(lock); } while (0)
+#define write_atomic_seqlock_bh(lock) \
+ do { local_bh_disable(); write_atomic_seqlock(lock); } while (0)
+
+#define write_atomic_sequnlock_irqrestore(lock, flags) \
+ do { write_atomic_sequnlock(lock); local_irq_restore(flags); } while(0)
+#define write_atomic_sequnlock_irq(lock) \
+ do { write_atomic_sequnlock(lock); local_irq_enable(); } while(0)
+#define write_atomic_sequnlock_bh(lock) \
+ do { write_atomic_sequnlock(lock); local_bh_enable(); } while(0)
+
+#define read_atomic_seqbegin_irqsave(lock, flags) \
+ ({ local_irq_save(flags); read_atomic_seqbegin(lock); })
+
+#define read_atomic_seqretry_irqrestore(lock, iv, flags) \
+ ({ \
+ int ret = read_atomic_seqretry(lock, iv); \
+ local_irq_restore(flags); \
+ ret; \
+ })
+
#define write_seqlock_irqsave(lock, flags) \
do { local_irq_save(flags); write_seqlock(lock); } while (0)
#define write_seqlock_irq(lock) \
do { local_irq_disable(); write_seqlock(lock); } while (0)
#define write_seqlock_bh(lock) \
- do { local_bh_disable(); write_seqlock(lock); } while (0)
+ do { local_bh_disable(); write_seqlock(lock); } while (0)
#define write_sequnlock_irqrestore(lock, flags) \
do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 4be57ab03478..4e84c621713e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -91,44 +91,32 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __spin_lock_init((lock), #lock, &__key); \
-} while (0)
+ extern void __atomic_spin_lock_init(atomic_spinlock_t *lock,
+ const char *name,
+ struct lock_class_key *key);
-#else
-# define spin_lock_init(lock) \
- do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
+# define atomic_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __rwlock_init((lock), #lock, &__key); \
+ __atomic_spin_lock_init((lock), #lock, &__key); \
} while (0)
+
#else
-# define rwlock_init(lock) \
- do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+# define atomic_spin_lock_init(lock) \
+ do { *(lock) = __ATOMIC_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define atomic_spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define atomic_spin_is_contended(lock) ((lock)->break_lock)
#else
#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#define atomic_spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
+#define atomic_spin_is_contended(lock) (((void)(lock), 0))
#endif /*__raw_spin_is_contended*/
#endif
@@ -141,7 +129,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define atomic_spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -153,209 +141,128 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
+ extern void _raw_spin_lock(atomic_spinlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern int _raw_spin_trylock(atomic_spinlock_t *lock);
+ extern void _raw_spin_unlock(atomic_spinlock_t *lock);
#else
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define atomic_spin_trylock(lock) __cond_lock(lock, _atomic_spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define atomic_spin_lock(lock) _atomic_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define atomic_spin_lock_nested(lock, subclass) \
+ _atomic_spin_lock_nested(lock, subclass)
+
+# define atomic_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _atomic_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);\
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define atomic_spin_lock_nested(lock, subclass) _atomic_spin_lock(lock)
+# define atomic_spin_lock_nest_lock(lock, nest_lock) _atomic_spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
+#define atomic_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _atomic_spin_lock_irqsave(lock);\
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define atomic_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _atomic_spin_lock_irqsave_nested(lock, subclass);\
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define atomic_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _atomic_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
+#define atomic_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _atomic_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
-
-#endif
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
+#define atomic_spin_lock_irqsave_nested(lock, flags, subclass) \
+ atomic_spin_lock_irqsave(lock, flags)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
+#endif
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
+#define atomic_spin_lock_irq(lock) _atomic_spin_lock_irq(lock)
+#define atomic_spin_lock_bh(lock) _atomic_spin_lock_bh(lock)
/*
* We inline the unlock functions in the nondebug case:
*/
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
!defined(CONFIG_SMP)
-# define spin_unlock(lock) _spin_unlock(lock)
-# define read_unlock(lock) _read_unlock(lock)
-# define write_unlock(lock) _write_unlock(lock)
-# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-# define read_unlock_irq(lock) _read_unlock_irq(lock)
-# define write_unlock_irq(lock) _write_unlock_irq(lock)
+# define atomic_spin_unlock(lock) _atomic_spin_unlock(lock)
+# define atomic_spin_unlock_irq(lock) _atomic_spin_unlock_irq(lock)
#else
-# define spin_unlock(lock) \
+# define atomic_spin_unlock(lock) \
do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define read_unlock(lock) \
- do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define write_unlock(lock) \
- do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define spin_unlock_irq(lock) \
+
+# define atomic_spin_unlock_irq(lock) \
do { \
__raw_spin_unlock(&(lock)->raw_lock); \
__release(lock); \
local_irq_enable(); \
} while (0)
-# define read_unlock_irq(lock) \
-do { \
- __raw_read_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-# define write_unlock_irq(lock) \
-do { \
- __raw_write_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
#endif
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
- } while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
-
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
-
-#define write_unlock_irqrestore(lock, flags) \
+#define atomic_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+ _atomic_spin_unlock_irqrestore(lock, flags);\
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define atomic_spin_unlock_bh(lock) _atomic_spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define atomic_spin_trylock_bh(lock) \
+ __cond_lock(lock, _atomic_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define atomic_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ atomic_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define atomic_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ atomic_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+/**
+ * spin_can_lock - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define atomic_spin_can_lock(lock) (!atomic_spin_is_locked(lock))
/*
* Pull the atomic_t declaration:
@@ -370,14 +277,152 @@ do { \
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+extern int
+_atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock);
+
+#define atomic_dec_and_atomic_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_atomic_lock(atomic, lock))
+
+/*
+ * Map spin* to atomic_spin* for PREEMPT_RT=n
+ */
+static inline void spin_lockcheck(spinlock_t *lock) { }
+
+#define spin_lock_init(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_init((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_lock(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_lock_bh(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_bh((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_trylock(lock) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_trylock((atomic_spinlock_t *)lock); \
+})
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_nested((atomic_spinlock_t *)lock, subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_nest_lock((atomic_spinlock_t *)lock, nest_lock); \
+} while (0)
+
+#define spin_lock_irq(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_irq((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_irqsave((atomic_spinlock_t *)lock, flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_lock_irqsave_nested((atomic_spinlock_t *)lock, flags, subclass); \
+} while (0)
+
+#define spin_unlock(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_unlock((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_unlock_bh(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_unlock_bh((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_unlock_irq(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_unlock_irq((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_unlock_irqrestore((atomic_spinlock_t *)lock, flags); \
+} while (0)
+
+#define spin_trylock_bh(lock) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_trylock_bh((atomic_spinlock_t *)lock); \
+})
+
+#define spin_trylock_irq(lock) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_trylock_irq((atomic_spinlock_t *)lock); \
+})
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_trylock_irqsave((atomic_spinlock_t *)lock, flags); \
+})
+
+#define spin_unlock_wait(lock) \
+do { \
+ spin_lockcheck(lock); \
+ atomic_spin_unlock_wait((atomic_spinlock_t *)lock); \
+} while (0)
+
+#define spin_is_locked(lock) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_is_locked((atomic_spinlock_t *)lock); \
+})
+
+#define spin_is_contended(lock) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_is_contended((atomic_spinlock_t *)lock); \
+})
+
+#define spin_can_lock(lock) \
+({ \
+ spin_lockcheck(lock); \
+ atomic_spin_can_lock((atomic_spinlock_t *)lock); \
+})
+
+#define assert_spin_locked(lock) \
+do { \
+ spin_lockcheck(lock); \
+ assert_atomic_spin_locked((atomic_spinlock_t *)lock); \
+} while (0)
+
#define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+({ \
+ spin_lockcheck(lock); \
+ atomic_dec_and_atomic_lock(atomic, (atomic_spinlock_t *)lock); \
+})
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
+/*
+ * Get the rwlock part
*/
-#define spin_can_lock(lock) (!spin_is_locked(lock))
+#include <linux/rwlock.h>
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index d79845d034b5..52df959a5687 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,44 +17,70 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
+#define assert_atomic_spin_locked(x) BUG_ON(!atomic_spin_is_locked(x))
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+void __lockfunc
+_atomic_spin_lock(atomic_spinlock_t *lock) __acquires(lock);
+
+void __lockfunc
+_atomic_spin_lock_nested(atomic_spinlock_t *lock, int subclass)
+ __acquires(lock);
+
+void __lockfunc
+_atomic_spin_lock_nest_lock(atomic_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+void __lockfunc
+_atomic_spin_lock_bh(atomic_spinlock_t *lock) __acquires(lock);
+
+void __lockfunc
+_atomic_spin_lock_irq(atomic_spinlock_t *lock) __acquires(lock);
+
+unsigned long __lockfunc
+_atomic_spin_lock_irqsave(atomic_spinlock_t *lock) __acquires(lock);
+
+unsigned long __lockfunc
+_atomic_spin_lock_irqsave_nested(atomic_spinlock_t *lock, int subclass)
__acquires(lock);
+
+int __lockfunc _atomic_spin_trylock(atomic_spinlock_t *lock);
+int __lockfunc _atomic_spin_trylock_bh(atomic_spinlock_t *lock);
+
+void __lockfunc
+_atomic_spin_unlock(atomic_spinlock_t *lock) __releases(lock);
+
+void __lockfunc
+_atomic_spin_unlock_bh(atomic_spinlock_t *lock) __releases(lock);
+
+void __lockfunc
+_atomic_spin_unlock_irq(atomic_spinlock_t *lock) __releases(lock);
+
+void __lockfunc
+_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
+
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
int __lockfunc _read_trylock(rwlock_t *lock);
int __lockfunc _write_trylock(rwlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
+
void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
+
void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
- __releases(lock);
+
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..d90b0702a0b8 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_atomic_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
@@ -48,33 +48,35 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _atomic_spin_lock(lock) __LOCK(lock)
+#define _atomic_spin_lock_nested(lock, subclass) \
+ __LOCK(lock)
#define _read_lock(lock) __LOCK(lock)
#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
+#define _atomic_spin_lock_bh(lock) __LOCK_BH(lock)
#define _read_lock_bh(lock) __LOCK_BH(lock)
#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _atomic_spin_lock_irq(lock) __LOCK_IRQ(lock)
#define _read_lock_irq(lock) __LOCK_IRQ(lock)
#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _atomic_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _atomic_spin_trylock(lock) ({ __LOCK(lock); 1; })
#define _read_trylock(lock) ({ __LOCK(lock); 1; })
#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
+#define _atomic_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _atomic_spin_unlock(lock) __UNLOCK(lock)
#define _read_unlock(lock) __UNLOCK(lock)
#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _atomic_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _atomic_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _atomic_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..b13efc9840dd 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -29,26 +29,10 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} atomic_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,71 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+# define __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) \
+ (atomic_spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+# define __ATOMIC_SPIN_LOCK_UNLOCKED(lockname) \
+ (atomic_spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
#endif
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
+ */
+#define DEFINE_ATOMIC_SPINLOCK(x) \
+ atomic_spinlock_t x = __ATOMIC_SPIN_LOCK_UNLOCKED(x)
+
+/*
+ * For PREEMPT_RT=n we use the same data structures and the spinlock
+ * functions are mapped to the atomic_spinlock functions
+ */
+typedef struct {
+ raw_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} spinlock_t;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ .magic = SPINLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#else
+# define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/time.h b/include/linux/time.h
index ea16c1a01d51..89cf57bc0c6d 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -99,7 +99,7 @@ static inline struct timespec timespec_sub(struct timespec lhs,
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
-extern seqlock_t xtime_lock;
+extern atomic_seqlock_t xtime_lock;
extern unsigned long read_persistent_clock(void);
extern int update_persistent_clock(struct timespec now);
diff --git a/kernel/Makefile b/kernel/Makefile
index 2093a691f1c2..cbd298929475 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -45,9 +45,9 @@ obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
ifneq ($(CONFIG_SMP),y)
obj-y += up.o
endif
-obj-$(CONFIG_SMP) += spinlock.o
-obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
-obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+obj-$(CONFIG_SMP) += spinlock.o rwlock.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o rwlock.o
+obj-$(CONFIG_PROVE_LOCKING) += spinlock.o rwlock.o
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3737a682cdf5..3c04b8b662ba 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -188,7 +188,7 @@ list_for_each_entry(_root, &roots, root_list)
/* the list of cgroups eligible for automatic release. Protected by
* release_list_lock */
static LIST_HEAD(release_list);
-static DEFINE_SPINLOCK(release_list_lock);
+static DEFINE_ATOMIC_SPINLOCK(release_list_lock);
static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp);
@@ -2733,11 +2733,11 @@ again:
finish_wait(&cgroup_rmdir_waitq, &wait);
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
- spin_lock(&release_list_lock);
+ atomic_spin_lock(&release_list_lock);
set_bit(CGRP_REMOVED, &cgrp->flags);
if (!list_empty(&cgrp->release_list))
list_del(&cgrp->release_list);
- spin_unlock(&release_list_lock);
+ atomic_spin_unlock(&release_list_lock);
cgroup_lock_hierarchy(cgrp->root);
/* delete this cgroup from parent->children */
@@ -3273,13 +3273,13 @@ static void check_for_release(struct cgroup *cgrp)
* already queued for a userspace notification, queue
* it now */
int need_schedule_work = 0;
- spin_lock(&release_list_lock);
+ atomic_spin_lock(&release_list_lock);
if (!cgroup_is_removed(cgrp) &&
list_empty(&cgrp->release_list)) {
list_add(&cgrp->release_list, &release_list);
need_schedule_work = 1;
}
- spin_unlock(&release_list_lock);
+ atomic_spin_unlock(&release_list_lock);
if (need_schedule_work)
schedule_work(&release_agent_work);
}
@@ -3326,7 +3326,7 @@ static void cgroup_release_agent(struct work_struct *work)
{
BUG_ON(work != &release_agent_work);
mutex_lock(&cgroup_mutex);
- spin_lock(&release_list_lock);
+ atomic_spin_lock(&release_list_lock);
while (!list_empty(&release_list)) {
char *argv[3], *envp[3];
int i;
@@ -3335,7 +3335,7 @@ static void cgroup_release_agent(struct work_struct *work)
struct cgroup,
release_list);
list_del_init(&cgrp->release_list);
- spin_unlock(&release_list_lock);
+ atomic_spin_unlock(&release_list_lock);
pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pathbuf)
goto continue_free;
@@ -3365,9 +3365,9 @@ static void cgroup_release_agent(struct work_struct *work)
continue_free:
kfree(pathbuf);
kfree(agentbuf);
- spin_lock(&release_list_lock);
+ atomic_spin_lock(&release_list_lock);
}
- spin_unlock(&release_list_lock);
+ atomic_spin_unlock(&release_list_lock);
mutex_unlock(&cgroup_mutex);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 869dc221733e..aed4d2e8a66a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -930,7 +930,7 @@ NORET_TYPE void do_exit(long code)
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
- spin_unlock_wait(&tsk->pi_lock);
+ atomic_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
diff --git a/kernel/fork.c b/kernel/fork.c
index 03d3b8317327..714976f65c72 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -907,9 +907,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
static void rt_mutex_init_task(struct task_struct *p)
{
- spin_lock_init(&p->pi_lock);
+ atomic_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&p->pi_waiters, &p->pi_lock);
+ plist_head_init_atomic(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 0672ff88f159..974c24b985e1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -392,9 +392,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ atomic_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ atomic_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
@@ -459,18 +459,18 @@ void exit_pi_state_list(struct task_struct *curr)
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
- spin_lock_irq(&curr->pi_lock);
+ atomic_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
- spin_unlock_irq(&curr->pi_lock);
+ atomic_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ atomic_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
@@ -484,15 +484,15 @@ void exit_pi_state_list(struct task_struct *curr)
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
- spin_unlock_irq(&curr->pi_lock);
+ atomic_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ atomic_spin_lock_irq(&curr->pi_lock);
}
- spin_unlock_irq(&curr->pi_lock);
+ atomic_spin_unlock_irq(&curr->pi_lock);
}
static int
@@ -547,7 +547,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
* change of the task flags, we do this protected by
* p->pi_lock:
*/
- spin_lock_irq(&p->pi_lock);
+ atomic_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
@@ -556,7 +556,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
- spin_unlock_irq(&p->pi_lock);
+ atomic_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
@@ -575,7 +575,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
- spin_unlock_irq(&p->pi_lock);
+ atomic_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -749,7 +749,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!pi_state)
return -EINVAL;
- spin_lock(&pi_state->pi_mutex.wait_lock);
+ atomic_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
@@ -778,23 +778,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
else if (curval != uval)
ret = -EINVAL;
if (ret) {
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ atomic_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
- spin_lock_irq(&pi_state->owner->pi_lock);
+ atomic_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ atomic_spin_unlock_irq(&pi_state->owner->pi_lock);
- spin_lock_irq(&new_owner->pi_lock);
+ atomic_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
- spin_unlock_irq(&new_owner->pi_lock);
+ atomic_spin_unlock_irq(&new_owner->pi_lock);
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ atomic_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
@@ -999,7 +999,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb2->lock;
+ q->list.plist.slock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
@@ -1337,7 +1337,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.slock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
@@ -1474,18 +1474,18 @@ retry:
* itself.
*/
if (pi_state->owner != NULL) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ atomic_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ atomic_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
- spin_lock_irq(&newowner->pi_lock);
+ atomic_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
- spin_unlock_irq(&newowner->pi_lock);
+ atomic_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ab5eb7042880..82636dfd71cf 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -86,10 +86,10 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
xts = current_kernel_time();
tom = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
xtim = timespec_to_ktime(xts);
tomono = timespec_to_ktime(tom);
@@ -125,11 +125,12 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
for (;;) {
base = timer->base;
if (likely(base != NULL)) {
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ atomic_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
- spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+ atomic_spin_unlock_irqrestore(&base->cpu_base->lock,
+ *flags);
}
cpu_relax();
}
@@ -206,13 +207,13 @@ again:
/* See the comment in lock_timer_base() */
timer->base = NULL;
- spin_unlock(&base->cpu_base->lock);
- spin_lock(&new_base->cpu_base->lock);
+ atomic_spin_unlock(&base->cpu_base->lock);
+ atomic_spin_lock(&new_base->cpu_base->lock);
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
- spin_unlock(&new_base->cpu_base->lock);
- spin_lock(&base->cpu_base->lock);
+ atomic_spin_unlock(&new_base->cpu_base->lock);
+ atomic_spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
@@ -228,7 +229,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
struct hrtimer_clock_base *base = timer->base;
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ atomic_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
@@ -581,21 +582,21 @@ static void retrigger_next_event(void *arg)
return;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
set_normalized_timespec(&realtime_offset,
-wall_to_monotonic.tv_sec,
-wall_to_monotonic.tv_nsec);
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
base = &__get_cpu_var(hrtimer_bases);
/* Adjust CLOCK_REALTIME offset */
- spin_lock(&base->lock);
+ atomic_spin_lock(&base->lock);
base->clock_base[CLOCK_REALTIME].offset =
timespec_to_ktime(realtime_offset);
hrtimer_force_reprogram(base);
- spin_unlock(&base->lock);
+ atomic_spin_unlock(&base->lock);
}
/*
@@ -656,9 +657,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
if (wakeup) {
- spin_unlock(&base->cpu_base->lock);
+ atomic_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ atomic_spin_lock(&base->cpu_base->lock);
} else
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
@@ -737,7 +738,7 @@ void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
- spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+ atomic_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
@@ -1060,7 +1061,7 @@ ktime_t hrtimer_get_next_event(void)
unsigned long flags;
int i;
- spin_lock_irqsave(&cpu_base->lock, flags);
+ atomic_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active()) {
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1077,7 +1078,7 @@ ktime_t hrtimer_get_next_event(void)
}
}
- spin_unlock_irqrestore(&cpu_base->lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_base->lock, flags);
if (mindelta.tv64 < 0)
mindelta.tv64 = 0;
@@ -1160,9 +1161,9 @@ static void __run_hrtimer(struct hrtimer *timer)
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
*/
- spin_unlock(&cpu_base->lock);
+ atomic_spin_unlock(&cpu_base->lock);
restart = fn(timer);
- spin_lock(&cpu_base->lock);
+ atomic_spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1226,7 +1227,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
expires_next.tv64 = KTIME_MAX;
- spin_lock(&cpu_base->lock);
+ atomic_spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
@@ -1282,7 +1283,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
* against it.
*/
cpu_base->expires_next = expires_next;
- spin_unlock(&cpu_base->lock);
+ atomic_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
if (expires_next.tv64 != KTIME_MAX) {
@@ -1384,7 +1385,7 @@ void hrtimer_run_queues(void)
gettime = 0;
}
- spin_lock(&cpu_base->lock);
+ atomic_spin_lock(&cpu_base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
@@ -1396,7 +1397,7 @@ void hrtimer_run_queues(void)
__run_hrtimer(timer);
}
- spin_unlock(&cpu_base->lock);
+ atomic_spin_unlock(&cpu_base->lock);
}
}
@@ -1551,7 +1552,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- spin_lock_init(&cpu_base->lock);
+ atomic_spin_lock_init(&cpu_base->lock);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1609,16 +1610,16 @@ static void migrate_hrtimers(int scpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ atomic_spin_lock(&new_base->lock);
+ atomic_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ atomic_spin_unlock(&old_base->lock);
+ atomic_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 1de9700f416e..ed0377d97288 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
* An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
desc->chip->set_type(i, IRQ_TYPE_PROBE);
desc->chip->startup(i);
}
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->chip->startup(i))
desc->status |= IRQ_PENDING;
}
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
/*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
if (i < 32)
mask |= 1 << i;
}
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
int i;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
unsigned int status;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 13c68e71b726..474657a806d7 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
}
/* Ensure we don't have left over values from a previous use of this irq */
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
desc->chip = &no_irq_chip;
desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
cpumask_clear(desc->pending_mask);
#endif
#endif
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
irq);
return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
desc->chip = &no_irq_chip;
desc->name = NULL;
clear_kstat_irqs(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
if (!chip)
chip = &no_irq_chip;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->chip = chip;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
if (type == IRQ_TYPE_NONE)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
ret = __irq_set_trigger(desc, irq, type);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->handler_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
entry->irq = irq;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->chip_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -317,7 +317,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -329,16 +329,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
/**
@@ -357,7 +357,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -374,18 +374,18 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -405,7 +405,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out;
@@ -427,18 +427,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
desc->status |= IRQ_INPROGRESS;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
/**
@@ -460,7 +460,7 @@ out:
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -506,17 +506,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
}
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
}
/**
@@ -572,7 +572,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->chip = &dummy_irq_chip;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
if (handle == handle_bad_irq) {
@@ -590,7 +590,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->depth = 0;
desc->chip->startup(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -620,9 +620,9 @@ void __init set_irq_noprobe(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init set_irq_probe(unsigned int irq)
@@ -635,7 +635,7 @@ void __init set_irq_probe(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
desc->status &= ~IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 065205bdd920..3d5e31a45650 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -79,7 +79,7 @@ static struct irq_desc irq_desc_init = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -107,7 +107,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- spin_lock_init(&desc->lock);
+ atomic_spin_lock_init(&desc->lock);
desc->irq = irq;
#ifdef CONFIG_SMP
desc->node = node;
@@ -129,7 +129,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
/*
* Protect the sparse_irqs:
*/
-DEFINE_SPINLOCK(sparse_irq_lock);
+DEFINE_ATOMIC_SPINLOCK(sparse_irq_lock);
struct irq_desc **irq_desc_ptrs __read_mostly;
@@ -140,7 +140,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
}
};
@@ -208,7 +208,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
if (desc)
return desc;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ atomic_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -230,7 +230,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
irq_desc_ptrs[irq] = desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ atomic_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
@@ -243,7 +243,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
@@ -469,7 +469,7 @@ unsigned int __do_IRQ(unsigned int irq)
return 1;
}
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
@@ -513,13 +513,13 @@ unsigned int __do_IRQ(unsigned int irq)
for (;;) {
irqreturn_t action_ret;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
@@ -532,7 +532,7 @@ out:
* disabled while the handler was running.
*/
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
return 1;
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e70ed5592eb9..d53fa526665b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
extern void clear_kstat_irqs(struct irq_desc *desc);
-extern spinlock_t sparse_irq_lock;
+extern atomic_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 61c679db4687..8ca7b20d202f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
status = desc->status;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
if (!desc->chip->set_affinity)
return -EINVAL;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
}
#endif
desc->status |= IRQ_AFFINITY_SET;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
unsigned long flags;
int ret;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
if (!ret)
irq_set_thread_affinity(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -230,9 +230,9 @@ void disable_irq_nosync(unsigned int irq)
if (!desc)
return;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -304,9 +304,9 @@ void enable_irq(unsigned int irq)
if (!desc)
return;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(enable_irq);
@@ -342,7 +342,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
@@ -363,7 +363,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_wake);
@@ -472,9 +472,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
return;
}
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
cpumask_copy(mask, desc->affinity);
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
@@ -503,7 +503,7 @@ static int irq_thread(void *data)
atomic_inc(&desc->threads_active);
- spin_lock_irq(&desc->lock);
+ atomic_spin_lock_irq(&desc->lock);
if (unlikely(desc->status & IRQ_DISABLED)) {
/*
* CHECKME: We might need a dedicated
@@ -513,9 +513,9 @@ static int irq_thread(void *data)
* retriggers the interrupt itself --- tglx
*/
desc->status |= IRQ_PENDING;
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
} else {
- spin_unlock_irq(&desc->lock);
+ atomic_spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
}
@@ -613,7 +613,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
old = *old_ptr;
if (old) {
@@ -705,7 +705,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
__enable_irq(desc, irq, false);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
new->irq = irq;
register_irq_proc(irq, desc);
@@ -726,7 +726,7 @@ mismatch:
ret = -EBUSY;
out_thread:
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) {
struct task_struct *t = new->thread;
@@ -769,7 +769,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!desc)
return NULL;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
/*
* There can be multiple actions per IRQ descriptor, find the right
@@ -781,7 +781,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!action) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return NULL;
}
@@ -812,7 +812,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
irqthread = action->thread;
action->thread = NULL;
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index fcb6c96f2627..b1de750f91f2 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
if (!desc->chip->set_affinity)
return;
- assert_spin_locked(&desc->lock);
+ assert_atomic_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 2f69bee57bf2..e002f4383b38 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
"for migration.\n", irq);
return false;
}
- spin_lock_init(&desc->lock);
+ atomic_spin_lock_init(&desc->lock);
desc->node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
irq = old_desc->irq;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ atomic_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
}
irq_desc_ptrs[irq] = desc;
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ atomic_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
return desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ atomic_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 638d8bedec14..e4e378324fbf 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
if (!(desc->status & IRQ_SUSPENDED))
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
}
}
EXPORT_SYMBOL_GPL(resume_device_irqs);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 692363dd591f..d4ae675787cb 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -169,7 +169,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&desc->lock, flags);
+ atomic_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
@@ -177,7 +177,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
break;
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ atomic_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 4d568294de3e..4ee89c9c031c 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
struct irqaction *action;
int ok = 0, work = 0;
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (desc->action && (desc->action->flags & IRQF_SHARED))
desc->status |= IRQ_PENDING;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
return ok;
}
/* Honour the normal IRQ locking */
desc->status |= IRQ_INPROGRESS;
action = desc->action;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
while (action) {
/* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
}
local_irq_disable();
/* Now clean up the flags */
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
action = desc->action;
/*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
* Perform real IRQ processing for the IRQ we deferred
*/
work = 1;
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
handle_IRQ_event(irq, action);
- spin_lock(&desc->lock);
+ atomic_spin_lock(&desc->lock);
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (work && desc->chip && desc->chip->end)
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ atomic_spin_unlock(&desc->lock);
return ok;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 16b5739c516a..0147a3108b16 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -73,10 +73,10 @@ static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
- spinlock_t lock ____cacheline_aligned_in_smp;
+ atomic_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE];
-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
+static atomic_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
return &(kretprobe_table_locks[hash].lock);
}
@@ -415,9 +415,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
hlist_del(&ri->hlist);
INIT_HLIST_NODE(&ri->hlist);
if (likely(rp)) {
- spin_lock(&rp->lock);
+ atomic_spin_lock(&rp->lock);
hlist_add_head(&ri->hlist, &rp->free_instances);
- spin_unlock(&rp->lock);
+ atomic_spin_unlock(&rp->lock);
} else
/* Unregistering */
hlist_add_head(&ri->hlist, head);
@@ -427,34 +427,34 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
struct hlist_head **head, unsigned long *flags)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- spinlock_t *hlist_lock;
+ atomic_spinlock_t *hlist_lock;
*head = &kretprobe_inst_table[hash];
hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_lock_irqsave(hlist_lock, *flags);
+ atomic_spin_lock_irqsave(hlist_lock, *flags);
}
static void __kprobes kretprobe_table_lock(unsigned long hash,
unsigned long *flags)
{
- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_lock_irqsave(hlist_lock, *flags);
+ atomic_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+ atomic_spin_lock_irqsave(hlist_lock, *flags);
}
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
unsigned long *flags)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- spinlock_t *hlist_lock;
+ atomic_spinlock_t *hlist_lock;
hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_unlock_irqrestore(hlist_lock, *flags);
+ atomic_spin_unlock_irqrestore(hlist_lock, *flags);
}
void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
{
- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_unlock_irqrestore(hlist_lock, *flags);
+ atomic_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+ atomic_spin_unlock_irqrestore(hlist_lock, *flags);
}
/*
@@ -969,12 +969,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
/*TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS);
- spin_lock_irqsave(&rp->lock, flags);
+ atomic_spin_lock_irqsave(&rp->lock, flags);
if (!hlist_empty(&rp->free_instances)) {
ri = hlist_entry(rp->free_instances.first,
struct kretprobe_instance, hlist);
hlist_del(&ri->hlist);
- spin_unlock_irqrestore(&rp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rp->lock, flags);
ri->rp = rp;
ri->task = current;
@@ -991,7 +991,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
kretprobe_table_unlock(hash, &flags);
} else {
rp->nmissed++;
- spin_unlock_irqrestore(&rp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rp->lock, flags);
}
return 0;
}
@@ -1027,7 +1027,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->maxactive = NR_CPUS;
#endif
}
- spin_lock_init(&rp->lock);
+ atomic_spin_lock_init(&rp->lock);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -1207,7 +1207,7 @@ static int __init init_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
- spin_lock_init(&(kretprobe_table_locks[i].lock));
+ atomic_spin_lock_init(&(kretprobe_table_locks[i].lock));
}
/*
diff --git a/kernel/lock-internals.h b/kernel/lock-internals.h
new file mode 100644
index 000000000000..4f0bc8bdc444
--- /dev/null
+++ b/kernel/lock-internals.h
@@ -0,0 +1,75 @@
+/*
+ * Macros shared by spinlock.c and rwlock.c
+ */
+/*
+ * This could be a long-held lock. We both prepare to spin for a long
+ * time (making _this_ CPU preemptable if possible), and we also signal
+ * towards that other CPU that it should break the lock ASAP.
+ *
+ * (We do this in a function because inlining it would be excessive.)
+ */
+
+#define BUILD_LOCK_OPS(op, locktype) \
+void __lockfunc _##op##_lock(locktype##_t *lock) \
+{ \
+ for (;;) { \
+ preempt_disable(); \
+ if (likely(_raw_##op##_trylock(lock))) \
+ break; \
+ preempt_enable(); \
+ \
+ if (!(lock)->break_lock) \
+ (lock)->break_lock = 1; \
+ while (!op##_can_lock(lock) && (lock)->break_lock) \
+ _raw_##op##_relax(&lock->raw_lock); \
+ } \
+ (lock)->break_lock = 0; \
+} \
+ \
+EXPORT_SYMBOL(_##op##_lock); \
+ \
+unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
+{ \
+ unsigned long flags; \
+ \
+ for (;;) { \
+ preempt_disable(); \
+ local_irq_save(flags); \
+ if (likely(_raw_##op##_trylock(lock))) \
+ break; \
+ local_irq_restore(flags); \
+ preempt_enable(); \
+ \
+ if (!(lock)->break_lock) \
+ (lock)->break_lock = 1; \
+ while (!op##_can_lock(lock) && (lock)->break_lock) \
+ _raw_##op##_relax(&lock->raw_lock); \
+ } \
+ (lock)->break_lock = 0; \
+ return flags; \
+} \
+ \
+EXPORT_SYMBOL(_##op##_lock_irqsave); \
+ \
+void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
+{ \
+ _##op##_lock_irqsave(lock); \
+} \
+ \
+EXPORT_SYMBOL(_##op##_lock_irq); \
+ \
+void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
+{ \
+ unsigned long flags; \
+ \
+ /* */ \
+ /* Careful: we must exclude softirqs too, hence the */ \
+ /* irq-disabling. We use the generic preemption-aware */ \
+ /* function: */ \
+ /**/ \
+ flags = _##op##_lock_irqsave(lock); \
+ local_bh_disable(); \
+ local_irq_restore(flags); \
+} \
+ \
+EXPORT_SYMBOL(_##op##_lock_bh)
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 950931041954..d740e431cfab 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -192,14 +192,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- spin_lock_irqsave(&ctx->lock, *flags);
+ atomic_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ atomic_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ atomic_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
@@ -220,7 +220,7 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ atomic_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
@@ -229,9 +229,9 @@ static void perf_unpin_context(struct perf_counter_context *ctx)
{
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ atomic_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ atomic_spin_unlock_irqrestore(&ctx->lock, flags);
put_ctx(ctx);
}
@@ -358,7 +358,7 @@ static void __perf_counter_remove_from_context(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
* counters on a global level.
@@ -380,7 +380,7 @@ static void __perf_counter_remove_from_context(void *info)
}
perf_enable();
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
@@ -419,12 +419,12 @@ retry:
task_oncpu_function_call(task, __perf_counter_remove_from_context,
counter);
- spin_lock_irq(&ctx->lock);
+ atomic_spin_lock_irq(&ctx->lock);
/*
* If the context is active we need to retry the smp call.
*/
if (ctx->nr_active && !list_empty(&counter->list_entry)) {
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -436,7 +436,7 @@ retry:
if (!list_empty(&counter->list_entry)) {
list_del_counter(counter, ctx);
}
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
}
static inline u64 perf_clock(void)
@@ -504,7 +504,7 @@ static void __perf_counter_disable(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
/*
* If the counter is on, turn it off.
@@ -520,7 +520,7 @@ static void __perf_counter_disable(void *info)
counter->state = PERF_COUNTER_STATE_OFF;
}
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
/*
@@ -553,12 +553,12 @@ static void perf_counter_disable(struct perf_counter *counter)
retry:
task_oncpu_function_call(task, __perf_counter_disable, counter);
- spin_lock_irq(&ctx->lock);
+ atomic_spin_lock_irq(&ctx->lock);
/*
* If the counter is still active, we need to retry the cross-call.
*/
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -571,7 +571,7 @@ static void perf_counter_disable(struct perf_counter *counter)
counter->state = PERF_COUNTER_STATE_OFF;
}
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
}
static int
@@ -739,7 +739,7 @@ static void __perf_install_in_context(void *info)
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -789,7 +789,7 @@ static void __perf_install_in_context(void *info)
unlock:
perf_enable();
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
/*
@@ -825,12 +825,12 @@ retry:
task_oncpu_function_call(task, __perf_install_in_context,
counter);
- spin_lock_irq(&ctx->lock);
+ atomic_spin_lock_irq(&ctx->lock);
/*
* we need to retry the smp call.
*/
if (ctx->is_active && list_empty(&counter->list_entry)) {
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -841,7 +841,7 @@ retry:
*/
if (list_empty(&counter->list_entry))
add_counter_to_ctx(counter, ctx);
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
}
/*
@@ -865,7 +865,7 @@ static void __perf_counter_enable(void *info)
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -908,7 +908,7 @@ static void __perf_counter_enable(void *info)
}
unlock:
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
/*
@@ -934,7 +934,7 @@ static void perf_counter_enable(struct perf_counter *counter)
return;
}
- spin_lock_irq(&ctx->lock);
+ atomic_spin_lock_irq(&ctx->lock);
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
goto out;
@@ -949,10 +949,10 @@ static void perf_counter_enable(struct perf_counter *counter)
counter->state = PERF_COUNTER_STATE_OFF;
retry:
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_counter_enable, counter);
- spin_lock_irq(&ctx->lock);
+ atomic_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the counter is still off,
@@ -971,7 +971,7 @@ static void perf_counter_enable(struct perf_counter *counter)
ctx->time - counter->total_time_enabled;
}
out:
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
}
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
@@ -993,7 +993,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
{
struct perf_counter *counter;
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
ctx->is_active = 0;
if (likely(!ctx->nr_counters))
goto out;
@@ -1010,7 +1010,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
/*
@@ -1150,8 +1150,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
- spin_lock(&ctx->lock);
- spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ atomic_spin_lock(&ctx->lock);
+ atomic_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
@@ -1165,8 +1165,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
perf_counter_sync_stat(ctx, next_ctx);
}
- spin_unlock(&next_ctx->lock);
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&next_ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
@@ -1208,7 +1208,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
struct perf_counter *counter;
int can_add_hw = 1;
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_counters))
goto out;
@@ -1273,7 +1273,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
/*
@@ -1337,7 +1337,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
struct hw_perf_counter *hwc;
u64 interrupts, freq;
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
continue;
@@ -1392,7 +1392,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
perf_enable();
}
}
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
/*
@@ -1405,7 +1405,7 @@ static void rotate_ctx(struct perf_counter_context *ctx)
if (!ctx->nr_counters)
return;
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
/*
* Rotate the first entry last (works just fine for group counters too):
*/
@@ -1416,7 +1416,7 @@ static void rotate_ctx(struct perf_counter_context *ctx)
}
perf_enable();
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
}
void perf_counter_task_tick(struct task_struct *curr, int cpu)
@@ -1465,7 +1465,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
__perf_counter_task_sched_out(ctx);
- spin_lock(&ctx->lock);
+ atomic_spin_lock(&ctx->lock);
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (!counter->attr.enable_on_exec)
@@ -1485,7 +1485,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
if (enabled)
unclone_ctx(ctx);
- spin_unlock(&ctx->lock);
+ atomic_spin_unlock(&ctx->lock);
perf_counter_task_sched_in(task, smp_processor_id());
out:
@@ -1533,7 +1533,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
struct task_struct *task)
{
memset(ctx, 0, sizeof(*ctx));
- spin_lock_init(&ctx->lock);
+ atomic_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->counter_list);
INIT_LIST_HEAD(&ctx->event_list);
@@ -1603,7 +1603,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
unclone_ctx(ctx);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ atomic_spin_unlock_irqrestore(&ctx->lock, flags);
}
if (!ctx) {
@@ -1814,7 +1814,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
if (!value)
return -EINVAL;
- spin_lock_irq(&ctx->lock);
+ atomic_spin_lock_irq(&ctx->lock);
if (counter->attr.freq) {
if (value > sysctl_perf_counter_sample_rate) {
ret = -EINVAL;
@@ -1827,7 +1827,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
counter->hw.sample_period = value;
}
unlock:
- spin_unlock_irq(&ctx->lock);
+ atomic_spin_unlock_irq(&ctx->lock);
return ret;
}
@@ -3415,14 +3415,14 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
* which protects the existence of *ctx.
*/
ctx = counter->ctx;
- spin_lock_irqsave(&ctx->lock, flags);
+ atomic_spin_lock_irqsave(&ctx->lock, flags);
count = 1;
/* Re-check state now we have the lock */
if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
counter->ctx->is_active ||
counter->tstamp_stopped < ctx->time)
count = 0;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ atomic_spin_unlock_irqrestore(&ctx->lock, flags);
return count;
}
@@ -4254,7 +4254,7 @@ void perf_counter_exit_task(struct task_struct *child)
* reading child->perf_counter_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
- spin_lock(&child_ctx->lock);
+ atomic_spin_lock(&child_ctx->lock);
child->perf_counter_ctxp = NULL;
/*
* If this context is a clone; unclone it so it can't get
@@ -4262,7 +4262,7 @@ void perf_counter_exit_task(struct task_struct *child)
* the counters from it.
*/
unclone_ctx(child_ctx);
- spin_unlock(&child_ctx->lock);
+ atomic_spin_unlock(&child_ctx->lock);
local_irq_restore(flags);
/*
@@ -4535,11 +4535,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
- spin_lock_irq(&cpuctx->ctx.lock);
+ atomic_spin_lock_irq(&cpuctx->ctx.lock);
mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
perf_max_counters - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
- spin_unlock_irq(&cpuctx->ctx.lock);
+ atomic_spin_unlock_irq(&cpuctx->ctx.lock);
}
spin_unlock(&perf_resource_lock);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bece7c0b67b2..95ec5a0eb116 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -279,7 +279,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
struct task_cputime sum;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
+ atomic_spin_lock_irqsave(&cputimer->lock, flags);
if (!cputimer->running) {
cputimer->running = 1;
/*
@@ -292,7 +292,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
update_gt_cputime(&cputimer->cputime, &sum);
}
*times = cputimer->cputime;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ atomic_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
@@ -1065,9 +1065,9 @@ static void stop_process_timers(struct task_struct *tsk)
if (!cputimer->running)
return;
- spin_lock_irqsave(&cputimer->lock, flags);
+ atomic_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ atomic_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
diff --git a/kernel/printk.c b/kernel/printk.c
index b4d97b54c1ec..e0e794b87542 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -92,7 +92,7 @@ static int console_locked, console_suspended;
* It is also used in interesting ways to provide interlocking in
* release_console_sem().
*/
-static DEFINE_SPINLOCK(logbuf_lock);
+static DEFINE_ATOMIC_SPINLOCK(logbuf_lock);
#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
@@ -171,7 +171,7 @@ static int __init log_buf_len_setup(char *str)
goto out;
}
- spin_lock_irqsave(&logbuf_lock, flags);
+ atomic_spin_lock_irqsave(&logbuf_lock, flags);
log_buf_len = size;
log_buf = new_log_buf;
@@ -185,7 +185,7 @@ static int __init log_buf_len_setup(char *str)
log_start -= offset;
con_start -= offset;
log_end -= offset;
- spin_unlock_irqrestore(&logbuf_lock, flags);
+ atomic_spin_unlock_irqrestore(&logbuf_lock, flags);
printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len);
}
@@ -297,18 +297,18 @@ int do_syslog(int type, char __user *buf, int len)
if (error)
goto out;
i = 0;
- spin_lock_irq(&logbuf_lock);
+ atomic_spin_lock_irq(&logbuf_lock);
while (!error && (log_start != log_end) && i < len) {
c = LOG_BUF(log_start);
log_start++;
- spin_unlock_irq(&logbuf_lock);
+ atomic_spin_unlock_irq(&logbuf_lock);
error = __put_user(c,buf);
buf++;
i++;
cond_resched();
- spin_lock_irq(&logbuf_lock);
+ atomic_spin_lock_irq(&logbuf_lock);
}
- spin_unlock_irq(&logbuf_lock);
+ atomic_spin_unlock_irq(&logbuf_lock);
if (!error)
error = i;
break;
@@ -329,7 +329,7 @@ int do_syslog(int type, char __user *buf, int len)
count = len;
if (count > log_buf_len)
count = log_buf_len;
- spin_lock_irq(&logbuf_lock);
+ atomic_spin_lock_irq(&logbuf_lock);
if (count > logged_chars)
count = logged_chars;
if (do_clear)
@@ -346,12 +346,12 @@ int do_syslog(int type, char __user *buf, int len)
if (j + log_buf_len < log_end)
break;
c = LOG_BUF(j);
- spin_unlock_irq(&logbuf_lock);
+ atomic_spin_unlock_irq(&logbuf_lock);
error = __put_user(c,&buf[count-1-i]);
cond_resched();
- spin_lock_irq(&logbuf_lock);
+ atomic_spin_lock_irq(&logbuf_lock);
}
- spin_unlock_irq(&logbuf_lock);
+ atomic_spin_unlock_irq(&logbuf_lock);
if (error)
break;
error = i;
@@ -527,7 +527,7 @@ static void zap_locks(void)
oops_timestamp = jiffies;
/* If a crash is occurring, make sure we can't deadlock */
- spin_lock_init(&logbuf_lock);
+ atomic_spin_lock_init(&logbuf_lock);
/* And make sure that we print immediately */
init_MUTEX(&console_sem);
}
@@ -631,7 +631,7 @@ static int acquire_console_semaphore_for_printk(unsigned int cpu)
}
}
printk_cpu = UINT_MAX;
- spin_unlock(&logbuf_lock);
+ atomic_spin_unlock(&logbuf_lock);
return retval;
}
static const char recursion_bug_msg [] =
@@ -674,7 +674,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
}
lockdep_off();
- spin_lock(&logbuf_lock);
+ atomic_spin_lock(&logbuf_lock);
printk_cpu = this_cpu;
if (recursion_bug) {
@@ -1023,14 +1023,14 @@ void release_console_sem(void)
console_may_schedule = 0;
for ( ; ; ) {
- spin_lock_irqsave(&logbuf_lock, flags);
+ atomic_spin_lock_irqsave(&logbuf_lock, flags);
wake_klogd |= log_start - log_end;
if (con_start == log_end)
break; /* Nothing to print */
_con_start = con_start;
_log_end = log_end;
con_start = log_end; /* Flush */
- spin_unlock(&logbuf_lock);
+ atomic_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(_con_start, _log_end);
start_critical_timings();
@@ -1038,7 +1038,7 @@ void release_console_sem(void)
}
console_locked = 0;
up(&console_sem);
- spin_unlock_irqrestore(&logbuf_lock, flags);
+ atomic_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
@@ -1240,9 +1240,9 @@ void register_console(struct console *console)
* release_console_sem() will print out the buffered messages
* for us.
*/
- spin_lock_irqsave(&logbuf_lock, flags);
+ atomic_spin_lock_irqsave(&logbuf_lock, flags);
con_start = log_start;
- spin_unlock_irqrestore(&logbuf_lock, flags);
+ atomic_spin_unlock_irqrestore(&logbuf_lock, flags);
}
release_console_sem();
}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index beb0e659adcc..e52017699e55 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -71,7 +71,7 @@
*/
#define GP_STAGES 2
struct rcu_data {
- spinlock_t lock; /* Protect rcu_data fields. */
+ atomic_spinlock_t lock; /* Protect rcu_data fields. */
long completed; /* Number of last completed batch. */
int waitlistcount;
struct rcu_head *nextlist;
@@ -138,7 +138,7 @@ enum rcu_sched_sleep_states {
};
struct rcu_ctrlblk {
- spinlock_t fliplock; /* Protect state-machine transitions. */
+ atomic_spinlock_t fliplock; /* Protect state-machine transitions. */
long completed; /* Number of last completed batch. */
enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
the rcu state machine */
@@ -193,7 +193,7 @@ void rcu_exit_nohz(void)
static DEFINE_PER_CPU(struct rcu_data, rcu_data);
static struct rcu_ctrlblk rcu_ctrlblk = {
- .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
+ .fliplock = __ATOMIC_SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
.completed = 0,
.rcu_try_flip_state = rcu_try_flip_idle_state,
.schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock),
@@ -910,7 +910,7 @@ static void rcu_try_flip(void)
unsigned long flags;
RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
- if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
+ if (unlikely(!atomic_spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
return;
}
@@ -941,7 +941,7 @@ static void rcu_try_flip(void)
rcu_ctrlblk.rcu_try_flip_state =
rcu_try_flip_idle_state;
}
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+ atomic_spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
}
/*
@@ -986,13 +986,13 @@ void rcu_check_callbacks(int cpu, int user)
rcu_check_mb(cpu);
if (rcu_ctrlblk.completed == rdp->completed)
rcu_try_flip();
- spin_lock_irqsave(&rdp->lock, flags);
+ atomic_spin_lock_irqsave(&rdp->lock, flags);
RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
__rcu_advance_callbacks(rdp);
if (rdp->donelist == NULL) {
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
} else {
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
raise_softirq(RCU_SOFTIRQ);
}
}
@@ -1011,10 +1011,10 @@ void rcu_advance_callbacks(int cpu, int user)
if (rcu_ctrlblk.completed == rdp->completed)
return;
}
- spin_lock_irqsave(&rdp->lock, flags);
+ atomic_spin_lock_irqsave(&rdp->lock, flags);
RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
__rcu_advance_callbacks(rdp);
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1042,7 +1042,7 @@ void rcu_offline_cpu(int cpu)
* Otherwise rcu_barrier() will fail
*/
- spin_lock_irqsave(&rdp->lock, flags);
+ atomic_spin_lock_irqsave(&rdp->lock, flags);
rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
for (i = GP_STAGES - 1; i >= 0; i--)
rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
@@ -1053,12 +1053,12 @@ void rcu_offline_cpu(int cpu)
rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail,
schedlist, schedtail);
rdp->rcu_sched_sleeping = 0;
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
rdp->waitlistcount = 0;
/* Disengage the newly dead CPU from the grace-period computation. */
- spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+ atomic_spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
rcu_check_mb(cpu);
if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
smp_mb(); /* Subsequent counter accesses must see new value */
@@ -1075,7 +1075,7 @@ void rcu_offline_cpu(int cpu)
cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+ atomic_spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
/*
* Place the removed callbacks on the current CPU's queue.
@@ -1089,14 +1089,14 @@ void rcu_offline_cpu(int cpu)
local_irq_save(flags); /* disable preempt till we know what lock. */
rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
+ atomic_spin_lock(&rdp->lock);
*rdp->nexttail = list;
if (list)
rdp->nexttail = tail;
*rdp->nextschedtail = schedlist;
if (schedlist)
rdp->nextschedtail = schedtail;
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1112,9 +1112,9 @@ void __cpuinit rcu_online_cpu(int cpu)
unsigned long flags;
struct rcu_data *rdp;
- spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
+ atomic_spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+ atomic_spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
/*
* The rcu_sched grace-period processing might have bypassed
@@ -1126,9 +1126,9 @@ void __cpuinit rcu_online_cpu(int cpu)
*/
rdp = RCU_DATA_CPU(cpu);
- spin_lock_irqsave(&rdp->lock, flags);
+ atomic_spin_lock_irqsave(&rdp->lock, flags);
rdp->rcu_sched_sleeping = 1;
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
}
static void rcu_process_callbacks(struct softirq_action *unused)
@@ -1139,16 +1139,16 @@ static void rcu_process_callbacks(struct softirq_action *unused)
local_irq_save(flags);
rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
+ atomic_spin_lock(&rdp->lock);
list = rdp->donelist;
if (list == NULL) {
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
return;
}
rdp->donelist = NULL;
rdp->donetail = &rdp->donelist;
RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
while (list) {
next = list->next;
list->func(list);
@@ -1166,12 +1166,12 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
head->next = NULL;
local_irq_save(flags);
rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
+ atomic_spin_lock(&rdp->lock);
__rcu_advance_callbacks(rdp);
*rdp->nexttail = head;
rdp->nexttail = &head->next;
RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
}
EXPORT_SYMBOL_GPL(call_rcu);
@@ -1185,7 +1185,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
head->next = NULL;
local_irq_save(flags);
rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
+ atomic_spin_lock(&rdp->lock);
*rdp->nextschedtail = head;
rdp->nextschedtail = &head->next;
if (rdp->rcu_sched_sleeping) {
@@ -1195,7 +1195,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
rdp->rcu_sched_sleeping = 0;
wake_gp = 1;
}
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
if (wake_gp) {
/* Wake up grace-period processing, unless someone beat us. */
@@ -1291,7 +1291,7 @@ static int rcu_sched_grace_period(void *arg)
for_each_online_cpu(cpu) {
rdp = RCU_DATA_CPU(cpu);
- spin_lock_irqsave(&rdp->lock, flags);
+ atomic_spin_lock_irqsave(&rdp->lock, flags);
/*
* We are running on this CPU irq-disabled, so no
@@ -1330,7 +1330,7 @@ static int rcu_sched_grace_period(void *arg)
rdp->rcu_sched_sleeping = couldsleep;
- spin_unlock_irqrestore(&rdp->lock, flags);
+ atomic_spin_unlock_irqrestore(&rdp->lock, flags);
}
/* If we saw callbacks on the last scan, go deal with them. */
@@ -1452,7 +1452,7 @@ void __init __rcu_init(void)
printk(KERN_NOTICE "Preemptible RCU implementation.\n");
for_each_possible_cpu(cpu) {
rdp = RCU_DATA_CPU(cpu);
- spin_lock_init(&rdp->lock);
+ atomic_spin_lock_init(&rdp->lock);
rdp->completed = 0;
rdp->waitlistcount = 0;
rdp->nextlist = NULL;
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5fcb4fe645e2..c97ffa45317a 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
if (rt_trace_on) { \
rt_trace_on = 0; \
console_verbose(); \
- if (spin_is_locked(&current->pi_lock)) \
- spin_unlock(&current->pi_lock); \
+ if (atomic_spin_is_locked(&current->pi_lock)) \
+ atomic_spin_unlock(&current->pi_lock); \
} \
} while (0)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index fcd107a78c5a..3210dfd0000c 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -131,16 +131,16 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
*
* (Note: We do this outside of the protection of lock->wait_lock to
* allow the lock to be taken while or before we readjust the priority
- * of task. We do not use the spin_xx_mutex() variants here as we are
+ * of task. We do not use the atomic_spin_xx_mutex() variants here as we are
* outside of the debug path.)
*/
static void rt_mutex_adjust_prio(struct task_struct *task)
{
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ atomic_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
}
/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/*
* Task can not go away as we did a get_task() before !
*/
- spin_lock_irqsave(&task->pi_lock, flags);
+ atomic_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
/*
@@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto out_unlock_pi;
lock = waiter->lock;
- if (!spin_trylock(&lock->wait_lock)) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!atomic_spin_trylock(&lock->wait_lock)) {
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
}
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
plist_add(&waiter->list_entry, &lock->wait_list);
/* Release the task */
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/* Grab the next task */
task = rt_mutex_owner(lock);
get_task_struct(task);
- spin_lock_irqsave(&task->pi_lock, flags);
+ atomic_spin_lock_irqsave(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
@@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
__rt_mutex_adjust_prio(task);
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto again;
out_unlock_pi:
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
if (pendowner == task)
return 1;
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ atomic_spin_lock_irqsave(&pendowner->pi_lock, flags);
if (task->prio >= pendowner->prio) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
* priority.
*/
if (likely(!rt_mutex_has_waiters(lock))) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 1;
}
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
next = rt_mutex_top_waiter(lock);
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
__rt_mutex_adjust_prio(pendowner);
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
/*
* We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
* might be task:
*/
if (likely(next->task != task)) {
- spin_lock_irqsave(&task->pi_lock, flags);
+ atomic_spin_lock_irqsave(&task->pi_lock, flags);
plist_add(&next->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0, res;
- spin_lock_irqsave(&task->pi_lock, flags);
+ atomic_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
task->pi_blocked_on = waiter;
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ atomic_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
chain_walk = 1;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
*/
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
task);
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
return res;
}
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
struct task_struct *pendowner;
unsigned long flags;
- spin_lock_irqsave(&current->pi_lock, flags);
+ atomic_spin_lock_irqsave(&current->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&current->pi_lock, flags);
/*
* Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* waiter with higher priority than pending-owner->normal_prio
* is blocked on the unboosted (pending) owner.
*/
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ atomic_spin_lock_irqsave(&pendowner->pi_lock, flags);
WARN_ON(!pendowner->pi_blocked_on);
WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
}
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
wake_up_process(pendowner);
}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0;
- spin_lock_irqsave(&current->pi_lock, flags);
+ atomic_spin_lock_irqsave(&current->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
current->pi_blocked_on = NULL;
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&current->pi_lock, flags);
if (first && owner != current) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ atomic_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock,
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock,
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
}
/*
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task)
struct rt_mutex_waiter *waiter;
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ atomic_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
break;
}
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
if (waiter->task)
schedule_rt_mutex(lock);
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
set_current_state(state);
}
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock)) {
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
/* Remove pending timer: */
if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
fixup_rt_mutex_waiters(lock);
}
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
return ret;
}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
return;
}
wakeup_next_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- spin_lock_init(&lock->wait_lock);
- plist_head_init(&lock->wait_list, &lock->wait_lock);
+ atomic_spin_lock_init(&lock->wait_lock);
+ plist_head_init_atomic(&lock->wait_list, &lock->wait_lock);
debug_rt_mutex_init(lock, name);
}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
{
int ret;
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
mark_rt_mutex_waiters(lock);
@@ -1058,7 +1058,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
*/
ret = 0;
}
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1108,7 +1108,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
{
int ret;
- spin_lock(&lock->wait_lock);
+ atomic_spin_lock(&lock->wait_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1126,7 +1126,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ atomic_spin_unlock(&lock->wait_lock);
/*
* Readjust priority, when we did not get the lock. We might have been
diff --git a/kernel/rwlock.c b/kernel/rwlock.c
new file mode 100644
index 000000000000..35460b37b815
--- /dev/null
+++ b/kernel/rwlock.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (2004) Linus Torvalds
+ *
+ * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
+ *
+ * Copyright (2004, 2005) Ingo Molnar
+ *
+ * This file contains the spinlock/rwlock implementations for the
+ * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
+ *
+ * Note that some architectures have special knowledge about the
+ * stack frames of these functions in their profile_pc. If you
+ * change anything significant here that could change the stack
+ * frame contact the architecture maintainers.
+ */
+
+#include <linux/linkage.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/debug_locks.h>
+#include <linux/module.h>
+
+#include "lock-internals.h"
+
+int __lockfunc _read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+
+ preempt_enable();
+ return 0;
+}
+EXPORT_SYMBOL(_read_trylock);
+
+int __lockfunc _write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+
+ preempt_enable();
+ return 0;
+}
+EXPORT_SYMBOL(_write_trylock);
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+void __lockfunc _read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+EXPORT_SYMBOL(_read_lock);
+
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
+ return flags;
+}
+EXPORT_SYMBOL(_read_lock_irqsave);
+
+void __lockfunc _read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+EXPORT_SYMBOL(_read_lock_irq);
+
+void __lockfunc _read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+EXPORT_SYMBOL(_read_lock_bh);
+
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
+ return flags;
+}
+EXPORT_SYMBOL(_write_lock_irqsave);
+
+void __lockfunc _write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+EXPORT_SYMBOL(_write_lock_irq);
+
+void __lockfunc _write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+EXPORT_SYMBOL(_write_lock_bh);
+
+void __lockfunc _write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+EXPORT_SYMBOL(_write_lock);
+
+#else /* CONFIG_PREEMPT: */
+
+/*
+ * Build preemption-friendly versions of the following
+ * lock-spinning functions:
+ *
+ * _[read|write]_lock()
+ * _[read|write]_lock_irq()
+ * _[read|write]_lock_irqsave()
+ * _[read|write]_lock_bh()
+ */
+BUILD_LOCK_OPS(read, rwlock);
+BUILD_LOCK_OPS(write, rwlock);
+
+#endif /* CONFIG_PREEMPT */
+
+void __lockfunc _write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable();
+}
+EXPORT_SYMBOL(_write_unlock);
+
+void __lockfunc _read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable();
+}
+EXPORT_SYMBOL(_read_unlock);
+
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL(_read_unlock_irqrestore);
+
+void __lockfunc _read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(_read_unlock_irq);
+
+void __lockfunc _read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+EXPORT_SYMBOL(_read_unlock_bh);
+
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL(_write_unlock_irqrestore);
+
+void __lockfunc _write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(_write_unlock_irq);
+
+void __lockfunc _write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+EXPORT_SYMBOL(_write_unlock_bh);
+
diff --git a/kernel/sched.c b/kernel/sched.c
index b8f0262a5105..156fff19920a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -166,7 +166,7 @@ struct rt_prio_array {
struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ atomic_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -203,7 +203,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
- spin_lock_init(&rt_b->rt_runtime_lock);
+ atomic_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -225,7 +225,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
if (hrtimer_active(&rt_b->rt_period_timer))
return;
- spin_lock(&rt_b->rt_runtime_lock);
+ atomic_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
@@ -242,7 +242,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ atomic_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -501,7 +501,7 @@ struct rt_rq {
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ atomic_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -564,7 +564,7 @@ static struct root_domain def_root_domain;
*/
struct rq {
/* runqueue lock: */
- spinlock_t lock;
+ atomic_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -721,7 +721,7 @@ int runqueue_is_locked(void)
struct rq *rq = cpu_rq(cpu);
int ret;
- ret = spin_is_locked(&rq->lock);
+ ret = atomic_spin_is_locked(&rq->lock);
put_cpu();
return ret;
}
@@ -922,7 +922,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -978,10 +978,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
{
for (;;) {
struct rq *rq = task_rq(p);
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
}
}
@@ -998,10 +998,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock_irqrestore(&rq->lock, *flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
@@ -1010,19 +1010,19 @@ void task_rq_unlock_wait(struct task_struct *p)
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
- spin_unlock_wait(&rq->lock);
+ atomic_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
@@ -1035,7 +1035,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
return rq;
}
@@ -1082,10 +1082,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
@@ -1098,10 +1098,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
}
/*
@@ -1208,7 +1208,7 @@ static void resched_task(struct task_struct *p)
{
int cpu;
- assert_spin_locked(&task_rq(p)->lock);
+ assert_atomic_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -1230,10 +1230,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!spin_trylock_irqsave(&rq->lock, flags))
+ if (!atomic_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
@@ -1281,7 +1281,7 @@ void wake_up_idle_cpu(int cpu)
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
- assert_spin_locked(&task_rq(p)->lock);
+ assert_atomic_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
#endif /* CONFIG_SMP */
@@ -1544,11 +1544,11 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->shares = shares;
__set_se_shares(tg->se[cpu], shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
}
@@ -1627,9 +1627,9 @@ static void update_shares(struct sched_domain *sd)
static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
update_shares(sd);
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
@@ -1664,7 +1664,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- spin_unlock(&this_rq->lock);
+ atomic_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
@@ -1685,14 +1685,14 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
int ret = 0;
- if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (unlikely(!atomic_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
- spin_unlock(&this_rq->lock);
- spin_lock(&busiest->lock);
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+ atomic_spin_unlock(&this_rq->lock);
+ atomic_spin_lock(&busiest->lock);
+ atomic_spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+ atomic_spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
}
return ret;
}
@@ -1706,7 +1706,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
+ atomic_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
@@ -1716,7 +1716,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- spin_unlock(&busiest->lock);
+ atomic_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
@@ -3091,15 +3091,17 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
- spin_lock(&rq1->lock);
+ atomic_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
- spin_lock(&rq1->lock);
- spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ atomic_spin_lock(&rq1->lock);
+ atomic_spin_lock_nested(&rq2->lock,
+ SINGLE_DEPTH_NESTING);
} else {
- spin_lock(&rq2->lock);
- spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ atomic_spin_lock(&rq2->lock);
+ atomic_spin_lock_nested(&rq1->lock,
+ SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -3116,9 +3118,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ atomic_spin_unlock(&rq1->lock);
if (rq1 != rq2)
- spin_unlock(&rq2->lock);
+ atomic_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
@@ -4105,14 +4107,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
- spin_lock_irqsave(&busiest->lock, flags);
+ atomic_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
- spin_unlock_irqrestore(&busiest->lock, flags);
+ atomic_spin_unlock_irqrestore(&busiest->lock,
+ flags);
all_pinned = 1;
goto out_one_pinned;
}
@@ -4122,7 +4125,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- spin_unlock_irqrestore(&busiest->lock, flags);
+ atomic_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
@@ -4304,10 +4307,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
- spin_unlock(&this_rq->lock);
+ atomic_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
- spin_lock(&this_rq->lock);
+ atomic_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -5139,11 +5142,11 @@ void scheduler_tick(void)
sched_clock_tick();
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
perf_counter_task_tick(curr, cpu);
@@ -5337,7 +5340,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
@@ -5376,7 +5379,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
@@ -6201,7 +6204,7 @@ recheck:
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
- spin_lock_irqsave(&p->pi_lock, flags);
+ atomic_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -6211,7 +6214,7 @@ recheck:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
update_rq_clock(rq);
@@ -6233,7 +6236,7 @@ recheck:
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ atomic_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
@@ -6872,7 +6875,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->se.exec_start = sched_clock();
@@ -6885,7 +6888,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
@@ -7071,10 +7074,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
break;
}
@@ -7086,7 +7089,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
@@ -7094,7 +7097,7 @@ static int migration_thread(void *data)
req = list_entry(head->next, struct migration_req, list);
list_del_init(head->next);
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
local_irq_enable();
@@ -7216,14 +7219,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -7259,9 +7262,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -7528,13 +7531,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -7559,14 +7562,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -7576,30 +7579,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
complete(&req->done);
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
@@ -7820,7 +7823,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
struct root_domain *old_rd = NULL;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -7846,7 +7849,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
set_rq_online(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
@@ -9099,13 +9102,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init_atomic(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
- spin_lock_init(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -9265,7 +9268,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ atomic_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9360,7 +9363,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init_atomic(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
@@ -9476,13 +9479,13 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock(&p->pi_lock);
+ atomic_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock(&p->pi_lock);
+ atomic_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
@@ -9841,9 +9844,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
struct rq *rq = cfs_rq->rq;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -10028,18 +10031,18 @@ static int tg_set_bandwidth(struct task_group *tg,
if (err)
goto unlock;
- spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ atomic_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ atomic_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -10144,7 +10147,7 @@ static int sched_rt_global_constraints(void)
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
- spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ atomic_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
@@ -10152,7 +10155,7 @@ static int sched_rt_global_constraints(void)
rt_rq->rt_runtime = global_rt_runtime();
spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+ atomic_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
@@ -10414,9 +10417,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ atomic_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ atomic_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
@@ -10432,9 +10435,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ atomic_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ atomic_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index e6c251790dde..8971e14f9d69 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -119,27 +119,27 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
if (likely(oldpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
- spin_lock_irqsave(&vec->lock, flags);
+ atomic_spin_lock_irqsave(&vec->lock, flags);
vec->count--;
if (!vec->count)
clear_bit(oldpri, cp->pri_active);
cpumask_clear_cpu(cpu, vec->mask);
- spin_unlock_irqrestore(&vec->lock, flags);
+ atomic_spin_unlock_irqrestore(&vec->lock, flags);
}
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
- spin_lock_irqsave(&vec->lock, flags);
+ atomic_spin_lock_irqsave(&vec->lock, flags);
cpumask_set_cpu(cpu, vec->mask);
vec->count++;
if (vec->count == 1)
set_bit(newpri, cp->pri_active);
- spin_unlock_irqrestore(&vec->lock, flags);
+ atomic_spin_unlock_irqrestore(&vec->lock, flags);
}
*currpri = newpri;
@@ -165,7 +165,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem)
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
- spin_lock_init(&vec->lock);
+ atomic_spin_lock_init(&vec->lock);
vec->count = 0;
if (!zalloc_cpumask_var(&vec->mask, gfp))
goto cleanup;
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 9a7e859b8fbf..9a4c0f37d119 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
/* values 2-101 are RT priorities 0-99 */
struct cpupri_vec {
- spinlock_t lock;
+ atomic_spinlock_t lock;
int count;
cpumask_var_t mask;
};
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 70c7e0b79946..899d71ff8bdf 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 499672c10cbd..467d6d22011b 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3918e01994e0..54779ebe48cc 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -314,7 +314,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
weight = cpumask_weight(rd->span);
- spin_lock(&rt_b->rt_runtime_lock);
+ atomic_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -323,7 +323,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
if (iter == rt_rq)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ atomic_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
@@ -345,14 +345,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
rt_rq->rt_runtime += diff;
more = 1;
if (rt_rq->rt_runtime == rt_period) {
- spin_unlock(&iter->rt_runtime_lock);
+ atomic_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
- spin_unlock(&iter->rt_runtime_lock);
+ atomic_spin_unlock(&iter->rt_runtime_lock);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ atomic_spin_unlock(&rt_b->rt_runtime_lock);
return more;
}
@@ -373,8 +373,8 @@ static void __disable_runtime(struct rq *rq)
s64 want;
int i;
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_b->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
@@ -383,7 +383,7 @@ static void __disable_runtime(struct rq *rq)
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
@@ -405,7 +405,7 @@ static void __disable_runtime(struct rq *rq)
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ atomic_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
@@ -414,13 +414,13 @@ static void __disable_runtime(struct rq *rq)
iter->rt_runtime -= want;
want -= want;
}
- spin_unlock(&iter->rt_runtime_lock);
+ atomic_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
@@ -432,8 +432,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -441,9 +441,9 @@ static void disable_runtime(struct rq *rq)
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
@@ -459,13 +459,13 @@ static void __enable_runtime(struct rq *rq)
for_each_leaf_rt_rq(rt_rq, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_b->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -473,9 +473,9 @@ static void enable_runtime(struct rq *rq)
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ atomic_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ atomic_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
@@ -483,9 +483,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
int more = 0;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
- spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
}
return more;
@@ -511,11 +511,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
- spin_lock(&rq->lock);
+ atomic_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
@@ -526,13 +526,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running)
idle = 0;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
- spin_unlock(&rq->lock);
+ atomic_spin_unlock(&rq->lock);
}
return idle;
@@ -609,11 +609,11 @@ static void update_curr_rt(struct rq *rq)
rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
- spin_lock(&rt_rq->rt_runtime_lock);
+ atomic_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr);
- spin_unlock(&rt_rq->rt_runtime_lock);
+ atomic_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
}
@@ -1244,7 +1244,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->se.on_rq)) {
- spin_unlock(&lowest_rq->lock);
+ atomic_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
@@ -1480,9 +1480,9 @@ static void post_schedule_rt(struct rq *rq)
* This is only called if needs_post_schedule_rt() indicates that
* we need to push tasks away
*/
- spin_lock_irq(&rq->lock);
+ atomic_spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
- spin_unlock_irq(&rq->lock);
+ atomic_spin_unlock_irq(&rq->lock);
}
/*
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 32d2bd4061b0..7f69cea0b6dd 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -306,10 +306,10 @@ static inline void account_group_user_time(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ atomic_spin_lock(&cputimer->lock);
cputimer->cputime.utime =
cputime_add(cputimer->cputime.utime, cputime);
- spin_unlock(&cputimer->lock);
+ atomic_spin_unlock(&cputimer->lock);
}
/**
@@ -336,10 +336,10 @@ static inline void account_group_system_time(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ atomic_spin_lock(&cputimer->lock);
cputimer->cputime.stime =
cputime_add(cputimer->cputime.stime, cputime);
- spin_unlock(&cputimer->lock);
+ atomic_spin_unlock(&cputimer->lock);
}
/**
@@ -369,7 +369,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ atomic_spin_lock(&cputimer->lock);
cputimer->cputime.sum_exec_runtime += ns;
- spin_unlock(&cputimer->lock);
+ atomic_spin_unlock(&cputimer->lock);
}
diff --git a/kernel/smp.c b/kernel/smp.c
index ad63d8501207..4239f17f8803 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
static struct {
struct list_head queue;
- spinlock_t lock;
+ atomic_spinlock_t lock;
} call_function __cacheline_aligned_in_smp =
{
- .queue = LIST_HEAD_INIT(call_function.queue),
- .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
+ .queue = LIST_HEAD_INIT(call_function.queue),
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(call_function.lock),
};
enum {
@@ -29,18 +29,18 @@ enum {
struct call_function_data {
struct call_single_data csd;
- spinlock_t lock;
+ atomic_spinlock_t lock;
unsigned int refs;
cpumask_var_t cpumask;
};
struct call_single_queue {
struct list_head list;
- spinlock_t lock;
+ atomic_spinlock_t lock;
};
static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
- .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
+ .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(cfd_data.lock),
};
static int
@@ -83,7 +83,7 @@ static int __cpuinit init_call_single_data(void)
for_each_possible_cpu(i) {
struct call_single_queue *q = &per_cpu(call_single_queue, i);
- spin_lock_init(&q->lock);
+ atomic_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
@@ -144,10 +144,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
unsigned long flags;
int ipi;
- spin_lock_irqsave(&dst->lock, flags);
+ atomic_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list);
- spin_unlock_irqrestore(&dst->lock, flags);
+ atomic_spin_unlock_irqrestore(&dst->lock, flags);
/*
* The list addition should be visible before sending the IPI
@@ -191,25 +191,25 @@ void generic_smp_call_function_interrupt(void)
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- spin_lock(&data->lock);
+ atomic_spin_lock(&data->lock);
if (!cpumask_test_cpu(cpu, data->cpumask)) {
- spin_unlock(&data->lock);
+ atomic_spin_unlock(&data->lock);
continue;
}
cpumask_clear_cpu(cpu, data->cpumask);
- spin_unlock(&data->lock);
+ atomic_spin_unlock(&data->lock);
data->csd.func(data->csd.info);
- spin_lock(&data->lock);
+ atomic_spin_lock(&data->lock);
WARN_ON(data->refs == 0);
refs = --data->refs;
if (!refs) {
- spin_lock(&call_function.lock);
+ atomic_spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
- spin_unlock(&call_function.lock);
+ atomic_spin_unlock(&call_function.lock);
}
- spin_unlock(&data->lock);
+ atomic_spin_unlock(&data->lock);
if (refs)
continue;
@@ -230,9 +230,9 @@ void generic_smp_call_function_single_interrupt(void)
unsigned int data_flags;
LIST_HEAD(list);
- spin_lock(&q->lock);
+ atomic_spin_lock(&q->lock);
list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
+ atomic_spin_unlock(&q->lock);
while (!list_empty(&list)) {
struct call_single_data *data;
@@ -391,23 +391,23 @@ void smp_call_function_many(const struct cpumask *mask,
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
- spin_lock_irqsave(&data->lock, flags);
+ atomic_spin_lock_irqsave(&data->lock, flags);
data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
data->refs = cpumask_weight(data->cpumask);
- spin_lock(&call_function.lock);
+ atomic_spin_lock(&call_function.lock);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
- spin_unlock(&call_function.lock);
+ atomic_spin_unlock(&call_function.lock);
- spin_unlock_irqrestore(&data->lock, flags);
+ atomic_spin_unlock_irqrestore(&data->lock, flags);
/*
* Make the list addition visible before sending the ipi.
@@ -453,20 +453,20 @@ EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
- spin_lock(&call_function.lock);
+ atomic_spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
- spin_unlock(&call_function.lock);
+ atomic_spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
- spin_lock_irq(&call_function.lock);
+ atomic_spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
- spin_unlock_irq(&call_function.lock);
+ atomic_spin_unlock_irq(&call_function.lock);
}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 88796c330838..62996170311d 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -20,7 +20,7 @@
#include <asm/irq_regs.h>
-static DEFINE_SPINLOCK(print_lock);
+static DEFINE_ATOMIC_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long, print_timestamp);
@@ -149,7 +149,7 @@ void softlockup_tick(void)
per_cpu(print_timestamp, this_cpu) = touch_timestamp;
- spin_lock(&print_lock);
+ atomic_spin_lock(&print_lock);
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
this_cpu, now - touch_timestamp,
current->comm, task_pid_nr(current));
@@ -159,7 +159,7 @@ void softlockup_tick(void)
show_regs(regs);
else
dump_stack();
- spin_unlock(&print_lock);
+ atomic_spin_unlock(&print_lock);
if (softlockup_panic)
panic("softlockup: hung tasks");
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 7932653c4ebd..6a3c0c474316 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,44 +21,19 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
-int __lockfunc _spin_trylock(spinlock_t *lock)
+#include "lock-internals.h"
+
+int __lockfunc _atomic_spin_trylock(atomic_spinlock_t *lock)
{
preempt_disable();
if (_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
-
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL(_spin_trylock);
-
-int __lockfunc _read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
preempt_enable();
return 0;
}
-EXPORT_SYMBOL(_read_trylock);
-
-int __lockfunc _write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL(_write_trylock);
+EXPORT_SYMBOL(_atomic_spin_trylock);
/*
* If lockdep is enabled then we use the non-preemption spin-ops
@@ -67,15 +42,7 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-void __lockfunc _read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-EXPORT_SYMBOL(_read_lock);
-
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+unsigned long __lockfunc _atomic_spin_lock_irqsave(atomic_spinlock_t *lock)
{
unsigned long flags;
@@ -94,207 +61,61 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
#endif
return flags;
}
-EXPORT_SYMBOL(_spin_lock_irqsave);
+EXPORT_SYMBOL(_atomic_spin_lock_irqsave);
-void __lockfunc _spin_lock_irq(spinlock_t *lock)
+void __lockfunc _atomic_spin_lock_irq(atomic_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_lock_irq);
+EXPORT_SYMBOL(_atomic_spin_lock_irq);
-void __lockfunc _spin_lock_bh(spinlock_t *lock)
+void __lockfunc _atomic_spin_lock_bh(atomic_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_lock_bh);
-
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-EXPORT_SYMBOL(_read_lock_irqsave);
-
-void __lockfunc _read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-EXPORT_SYMBOL(_read_lock_irq);
-
-void __lockfunc _read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-EXPORT_SYMBOL(_read_lock_bh);
-
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-EXPORT_SYMBOL(_write_lock_irqsave);
-
-void __lockfunc _write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-EXPORT_SYMBOL(_write_lock_irq);
+EXPORT_SYMBOL(_atomic_spin_lock_bh);
-void __lockfunc _write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-EXPORT_SYMBOL(_write_lock_bh);
-
-void __lockfunc _spin_lock(spinlock_t *lock)
+void __lockfunc _atomic_spin_lock(atomic_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-
-EXPORT_SYMBOL(_spin_lock);
-
-void __lockfunc _write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-EXPORT_SYMBOL(_write_lock);
+EXPORT_SYMBOL(_atomic_spin_lock);
#else /* CONFIG_PREEMPT: */
/*
- * This could be a long-held lock. We both prepare to spin for a long
- * time (making _this_ CPU preemptable if possible), and we also signal
- * towards that other CPU that it should break the lock ASAP.
- *
- * (We do this in a function because inlining it would be excessive.)
- */
-
-#define BUILD_LOCK_OPS(op, locktype) \
-void __lockfunc _##op##_lock(locktype##_t *lock) \
-{ \
- for (;;) { \
- preempt_disable(); \
- if (likely(_raw_##op##_trylock(lock))) \
- break; \
- preempt_enable(); \
- \
- if (!(lock)->break_lock) \
- (lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
- } \
- (lock)->break_lock = 0; \
-} \
- \
-EXPORT_SYMBOL(_##op##_lock); \
- \
-unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
-{ \
- unsigned long flags; \
- \
- for (;;) { \
- preempt_disable(); \
- local_irq_save(flags); \
- if (likely(_raw_##op##_trylock(lock))) \
- break; \
- local_irq_restore(flags); \
- preempt_enable(); \
- \
- if (!(lock)->break_lock) \
- (lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
- } \
- (lock)->break_lock = 0; \
- return flags; \
-} \
- \
-EXPORT_SYMBOL(_##op##_lock_irqsave); \
- \
-void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
-{ \
- _##op##_lock_irqsave(lock); \
-} \
- \
-EXPORT_SYMBOL(_##op##_lock_irq); \
- \
-void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
-{ \
- unsigned long flags; \
- \
- /* */ \
- /* Careful: we must exclude softirqs too, hence the */ \
- /* irq-disabling. We use the generic preemption-aware */ \
- /* function: */ \
- /**/ \
- flags = _##op##_lock_irqsave(lock); \
- local_bh_disable(); \
- local_irq_restore(flags); \
-} \
- \
-EXPORT_SYMBOL(_##op##_lock_bh)
-
-/*
* Build preemption-friendly versions of the following
* lock-spinning functions:
*
- * _[spin|read|write]_lock()
- * _[spin|read|write]_lock_irq()
- * _[spin|read|write]_lock_irqsave()
- * _[spin|read|write]_lock_bh()
+ * _atomic_spin_lock()
+ * _atomic_spin_lock_irq()
+ * _atomic_spin_lock_irqsave()
+ * _atomic_spin_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock);
-BUILD_LOCK_OPS(read, rwlock);
-BUILD_LOCK_OPS(write, rwlock);
+BUILD_LOCK_OPS(atomic_spin, atomic_spinlock);
#endif /* CONFIG_PREEMPT */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+void __lockfunc _atomic_spin_lock_nested(atomic_spinlock_t *lock, int subclass)
{
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_lock_nested);
+EXPORT_SYMBOL(_atomic_spin_lock_nested);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+unsigned long __lockfunc
+_atomic_spin_lock_irqsave_nested(atomic_spinlock_t *lock, int subclass)
{
unsigned long flags;
@@ -305,125 +126,56 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
_raw_spin_lock_flags, &flags);
return flags;
}
-EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+EXPORT_SYMBOL(_atomic_spin_lock_irqsave_nested);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
+void __lockfunc _atomic_spin_lock_nest_lock(atomic_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_lock_nest_lock);
+EXPORT_SYMBOL(_atomic_spin_lock_nest_lock);
#endif
-void __lockfunc _spin_unlock(spinlock_t *lock)
+void __lockfunc _atomic_spin_unlock(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable();
}
-EXPORT_SYMBOL(_spin_unlock);
+EXPORT_SYMBOL(_atomic_spin_unlock);
-void __lockfunc _write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-EXPORT_SYMBOL(_write_unlock);
-
-void __lockfunc _read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable();
-}
-EXPORT_SYMBOL(_read_unlock);
-
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+void __lockfunc
+_atomic_spin_unlock_irqrestore(atomic_spinlock_t *lock, unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
+EXPORT_SYMBOL(_atomic_spin_unlock_irqrestore);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
+void __lockfunc _atomic_spin_unlock_irq(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
-EXPORT_SYMBOL(_spin_unlock_irq);
+EXPORT_SYMBOL(_atomic_spin_unlock_irq);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
+void __lockfunc _atomic_spin_unlock_bh(atomic_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-EXPORT_SYMBOL(_spin_unlock_bh);
-
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-EXPORT_SYMBOL(_read_unlock_irqrestore);
-
-void __lockfunc _read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(_read_unlock_irq);
-
-void __lockfunc _read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-EXPORT_SYMBOL(_read_unlock_bh);
-
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-EXPORT_SYMBOL(_write_unlock_irqrestore);
-
-void __lockfunc _write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(_write_unlock_irq);
-
-void __lockfunc _write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-EXPORT_SYMBOL(_write_unlock_bh);
+EXPORT_SYMBOL(_atomic_spin_unlock_bh);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+int __lockfunc _atomic_spin_trylock_bh(atomic_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
@@ -436,7 +188,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
return 0;
}
-EXPORT_SYMBOL(_spin_trylock_bh);
+EXPORT_SYMBOL(_atomic_spin_trylock_bh);
notrace int in_lock_functions(unsigned long addr)
{
diff --git a/kernel/time.c b/kernel/time.c
index 29511943871a..4d80c5d7f9b0 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -133,11 +133,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
*/
static inline void warp_clock(void)
{
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
update_xtime_cache(0);
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
clock_was_set();
}
@@ -662,9 +662,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index a6dcd67b041d..a21d577867be 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -28,7 +28,7 @@ static LIST_HEAD(clockevents_released);
static RAW_NOTIFIER_HEAD(clockevents_chain);
/* Protection for the above */
-static DEFINE_SPINLOCK(clockevents_lock);
+static DEFINE_ATOMIC_SPINLOCK(clockevents_lock);
/**
* clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
@@ -139,9 +139,9 @@ int clockevents_register_notifier(struct notifier_block *nb)
{
int ret;
- spin_lock(&clockevents_lock);
+ atomic_spin_lock(&clockevents_lock);
ret = raw_notifier_chain_register(&clockevents_chain, nb);
- spin_unlock(&clockevents_lock);
+ atomic_spin_unlock(&clockevents_lock);
return ret;
}
@@ -181,13 +181,13 @@ void clockevents_register_device(struct clock_event_device *dev)
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
BUG_ON(!dev->cpumask);
- spin_lock(&clockevents_lock);
+ atomic_spin_lock(&clockevents_lock);
list_add(&dev->list, &clockevent_devices);
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
clockevents_notify_released();
- spin_unlock(&clockevents_lock);
+ atomic_spin_unlock(&clockevents_lock);
}
EXPORT_SYMBOL_GPL(clockevents_register_device);
@@ -236,7 +236,7 @@ void clockevents_notify(unsigned long reason, void *arg)
{
struct list_head *node, *tmp;
- spin_lock(&clockevents_lock);
+ atomic_spin_lock(&clockevents_lock);
clockevents_do_notify(reason, arg);
switch (reason) {
@@ -251,7 +251,7 @@ void clockevents_notify(unsigned long reason, void *arg)
default:
break;
}
- spin_unlock(&clockevents_lock);
+ atomic_spin_unlock(&clockevents_lock);
}
EXPORT_SYMBOL_GPL(clockevents_notify);
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7466cb811251..8a4273199476 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -127,7 +127,7 @@ static struct clocksource *curr_clocksource = &clocksource_jiffies;
static struct clocksource *next_clocksource;
static struct clocksource *clocksource_override;
static LIST_HEAD(clocksource_list);
-static DEFINE_SPINLOCK(clocksource_lock);
+static DEFINE_ATOMIC_SPINLOCK(clocksource_lock);
static char override_name[32];
static int finished_booting;
@@ -296,7 +296,7 @@ void clocksource_resume(void)
struct clocksource *cs;
unsigned long flags;
- spin_lock_irqsave(&clocksource_lock, flags);
+ atomic_spin_lock_irqsave(&clocksource_lock, flags);
list_for_each_entry(cs, &clocksource_list, list) {
if (cs->resume)
@@ -305,7 +305,7 @@ void clocksource_resume(void)
clocksource_resume_watchdog();
- spin_unlock_irqrestore(&clocksource_lock, flags);
+ atomic_spin_unlock_irqrestore(&clocksource_lock, flags);
}
/**
@@ -328,12 +328,12 @@ struct clocksource *clocksource_get_next(void)
{
unsigned long flags;
- spin_lock_irqsave(&clocksource_lock, flags);
+ atomic_spin_lock_irqsave(&clocksource_lock, flags);
if (next_clocksource && finished_booting) {
curr_clocksource = next_clocksource;
next_clocksource = NULL;
}
- spin_unlock_irqrestore(&clocksource_lock, flags);
+ atomic_spin_unlock_irqrestore(&clocksource_lock, flags);
return curr_clocksource;
}
@@ -402,11 +402,11 @@ int clocksource_register(struct clocksource *c)
unsigned long flags;
int ret;
- spin_lock_irqsave(&clocksource_lock, flags);
+ atomic_spin_lock_irqsave(&clocksource_lock, flags);
ret = clocksource_enqueue(c);
if (!ret)
next_clocksource = select_clocksource();
- spin_unlock_irqrestore(&clocksource_lock, flags);
+ atomic_spin_unlock_irqrestore(&clocksource_lock, flags);
if (!ret)
clocksource_check_watchdog(c);
return ret;
@@ -421,12 +421,12 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
{
unsigned long flags;
- spin_lock_irqsave(&clocksource_lock, flags);
+ atomic_spin_lock_irqsave(&clocksource_lock, flags);
list_del(&cs->list);
cs->rating = rating;
clocksource_enqueue(cs);
next_clocksource = select_clocksource();
- spin_unlock_irqrestore(&clocksource_lock, flags);
+ atomic_spin_unlock_irqrestore(&clocksource_lock, flags);
}
/**
@@ -436,12 +436,12 @@ void clocksource_unregister(struct clocksource *cs)
{
unsigned long flags;
- spin_lock_irqsave(&clocksource_lock, flags);
+ atomic_spin_lock_irqsave(&clocksource_lock, flags);
list_del(&cs->list);
if (clocksource_override == cs)
clocksource_override = NULL;
next_clocksource = select_clocksource();
- spin_unlock_irqrestore(&clocksource_lock, flags);
+ atomic_spin_unlock_irqrestore(&clocksource_lock, flags);
}
#ifdef CONFIG_SYSFS
@@ -458,9 +458,9 @@ sysfs_show_current_clocksources(struct sys_device *dev,
{
ssize_t count = 0;
- spin_lock_irq(&clocksource_lock);
+ atomic_spin_lock_irq(&clocksource_lock);
count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
- spin_unlock_irq(&clocksource_lock);
+ atomic_spin_unlock_irq(&clocksource_lock);
return count;
}
@@ -490,7 +490,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
if (buf[count-1] == '\n')
count--;
- spin_lock_irq(&clocksource_lock);
+ atomic_spin_lock_irq(&clocksource_lock);
if (count > 0)
memcpy(override_name, buf, count);
@@ -527,7 +527,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
next_clocksource = select_clocksource();
}
- spin_unlock_irq(&clocksource_lock);
+ atomic_spin_unlock_irq(&clocksource_lock);
return ret;
}
@@ -547,7 +547,7 @@ sysfs_show_available_clocksources(struct sys_device *dev,
struct clocksource *src;
ssize_t count = 0;
- spin_lock_irq(&clocksource_lock);
+ atomic_spin_lock_irq(&clocksource_lock);
list_for_each_entry(src, &clocksource_list, list) {
/*
* Don't show non-HRES clocksource if the tick code is
@@ -559,7 +559,7 @@ sysfs_show_available_clocksources(struct sys_device *dev,
max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
"%s ", src->name);
}
- spin_unlock_irq(&clocksource_lock);
+ atomic_spin_unlock_irq(&clocksource_lock);
count += snprintf(buf + count,
max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
@@ -615,10 +615,10 @@ device_initcall(init_clocksource_sysfs);
static int __init boot_override_clocksource(char* str)
{
unsigned long flags;
- spin_lock_irqsave(&clocksource_lock, flags);
+ atomic_spin_lock_irqsave(&clocksource_lock, flags);
if (str)
strlcpy(override_name, str, sizeof(override_name));
- spin_unlock_irqrestore(&clocksource_lock, flags);
+ atomic_spin_unlock_irqrestore(&clocksource_lock, flags);
return 1;
}
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 7fc64375ff43..c195edef09e3 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -188,7 +188,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{
enum hrtimer_restart res = HRTIMER_NORESTART;
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
switch (time_state) {
case TIME_OK:
@@ -221,7 +221,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
}
update_vsyscall(&xtime, clock);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
return res;
}
@@ -479,7 +479,7 @@ int do_adjtimex(struct timex *txc)
getnstimeofday(&ts);
- write_seqlock_irq(&xtime_lock);
+ write_atomic_seqlock_irq(&xtime_lock);
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
@@ -527,7 +527,7 @@ int do_adjtimex(struct timex *txc)
txc->errcnt = 0;
txc->stbcnt = 0;
- write_sequnlock_irq(&xtime_lock);
+ write_atomic_sequnlock_irq(&xtime_lock);
txc->time.tv_sec = ts.tv_sec;
txc->time.tv_usec = ts.tv_nsec;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 877dbedc3118..9736ccb55bdf 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
-static DEFINE_SPINLOCK(tick_broadcast_lock);
+static DEFINE_ATOMIC_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
#ifdef CONFIG_TICK_ONESHOT
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
tick_broadcast_clear_oneshot(cpu);
}
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask)
*/
static void tick_do_periodic_broadcast(void)
{
- spin_lock(&tick_broadcast_lock);
+ atomic_spin_lock(&tick_broadcast_lock);
cpumask_and(to_cpumask(tmpmask),
cpu_online_mask, tick_get_broadcast_mask());
tick_do_broadcast(to_cpumask(tmpmask));
- spin_unlock(&tick_broadcast_lock);
+ atomic_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(void *why)
unsigned long flags, *reason = why;
int cpu, bc_stopped;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(void *why)
tick_broadcast_setup_oneshot(bc);
}
out:
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
@@ -300,7 +300,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
unsigned long flags;
unsigned int cpu = *cpup;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
@@ -310,7 +310,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
clockevents_shutdown(bc);
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
void tick_suspend_broadcast(void)
@@ -318,13 +318,13 @@ void tick_suspend_broadcast(void)
struct clock_event_device *bc;
unsigned long flags;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc)
clockevents_shutdown(bc);
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
int tick_resume_broadcast(void)
@@ -333,7 +333,7 @@ int tick_resume_broadcast(void)
unsigned long flags;
int broadcast = 0;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
@@ -352,7 +352,7 @@ int tick_resume_broadcast(void)
break;
}
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return broadcast;
}
@@ -406,7 +406,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
ktime_t now, next_event;
int cpu;
- spin_lock(&tick_broadcast_lock);
+ atomic_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
@@ -444,7 +444,7 @@ again:
if (tick_broadcast_set_event(next_event, 0))
goto again;
}
- spin_unlock(&tick_broadcast_lock);
+ atomic_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -458,7 +458,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
unsigned long flags;
int cpu;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Periodic mode does not care about the enter/exit of power
@@ -493,7 +493,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
}
out:
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
@@ -564,13 +564,13 @@ void tick_broadcast_switch_to_oneshot(void)
struct clock_event_device *bc;
unsigned long flags;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
@@ -582,7 +582,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
unsigned long flags;
unsigned int cpu = *cpup;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ atomic_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Clear the broadcast mask flag for the dead cpu, but do not
@@ -590,7 +590,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
*/
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 83c4417b6a3c..1d3068ae5cf1 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
ktime_t tick_next_period;
ktime_t tick_period;
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
-DEFINE_SPINLOCK(tick_device_lock);
+DEFINE_ATOMIC_SPINLOCK(tick_device_lock);
/*
* Debugging: see timer_list.c
@@ -60,13 +60,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -127,9 +127,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
int cpu, ret = NOTIFY_OK;
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ atomic_spin_lock_irqsave(&tick_device_lock, flags);
cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_device_lock, flags);
return NOTIFY_STOP;
out_bc:
@@ -278,7 +278,7 @@ out_bc:
if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP;
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
}
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup)
struct clock_event_device *dev = td->evtdev;
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ atomic_spin_lock_irqsave(&tick_device_lock, flags);
td->mode = TICKDEV_MODE_PERIODIC;
if (dev) {
/*
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup)
clockevents_exchange_device(dev, NULL);
td->evtdev = NULL;
}
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_device_lock, flags);
}
static void tick_suspend(void)
@@ -330,9 +330,9 @@ static void tick_suspend(void)
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ atomic_spin_lock_irqsave(&tick_device_lock, flags);
clockevents_shutdown(td->evtdev);
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_device_lock, flags);
}
static void tick_resume(void)
@@ -341,7 +341,7 @@ static void tick_resume(void)
unsigned long flags;
int broadcast = tick_resume_broadcast();
- spin_lock_irqsave(&tick_device_lock, flags);
+ atomic_spin_lock_irqsave(&tick_device_lock, flags);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
if (!broadcast) {
@@ -350,7 +350,7 @@ static void tick_resume(void)
else
tick_resume_oneshot();
}
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ atomic_spin_unlock_irqrestore(&tick_device_lock, flags);
}
/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index b1c05bf75ee0..e0726c714a55 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -6,7 +6,7 @@
#define TICK_DO_TIMER_BOOT -2
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
-extern spinlock_t tick_device_lock;
+extern atomic_spinlock_t tick_device_lock;
extern ktime_t tick_next_period;
extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e0f59a21c061..fea1bc74ca94 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -57,7 +57,7 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -80,7 +80,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
}
/*
@@ -90,12 +90,12 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ write_atomic_seqlock(&xtime_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_atomic_sequnlock(&xtime_lock);
return period;
}
@@ -267,10 +267,10 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
/* Get the next timer wheel timer */
next_jiffies = get_next_timer_interrupt(last_jiffies);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 02c0b2c9c674..9d1bac7e11c2 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -24,8 +24,7 @@
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
+__cacheline_aligned_in_smp DEFINE_ATOMIC_SEQLOCK(xtime_lock);
/*
* The current time
@@ -102,7 +101,7 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
*ts = xtime;
@@ -118,7 +117,7 @@ void getnstimeofday(struct timespec *ts)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -134,7 +133,7 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
@@ -147,7 +146,7 @@ ktime_t ktime_get(void)
/* convert to nanoseconds: */
nsecs += cyc2ns(clock, cycle_delta);
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -174,7 +173,7 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
*ts = xtime;
tomono = wall_to_monotonic;
@@ -187,7 +186,7 @@ void ktime_get_ts(struct timespec *ts)
/* convert to nanoseconds: */
nsecs = cyc2ns(clock, cycle_delta);
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec + nsecs);
@@ -224,7 +223,7 @@ int do_settimeofday(struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
clocksource_forward_now();
@@ -241,7 +240,7 @@ int do_settimeofday(struct timespec *tv)
update_vsyscall(&xtime, clock);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -323,11 +322,11 @@ void ktime_get_ts(struct timespec *ts)
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
getnstimeofday(ts);
tomono = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
@@ -363,7 +362,7 @@ void getrawmonotonic(struct timespec *ts)
cycle_t cycle_now, cycle_delta;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
/* read clocksource: */
cycle_now = clocksource_read(clock);
@@ -376,7 +375,7 @@ void getrawmonotonic(struct timespec *ts)
*ts = clock->raw_time;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -392,11 +391,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
return ret;
}
@@ -423,7 +422,7 @@ void __init timekeeping_init(void)
unsigned long flags;
unsigned long sec = read_persistent_clock();
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
ntp_init();
@@ -438,7 +437,7 @@ void __init timekeeping_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
update_xtime_cache(0);
total_sleep_time = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
}
/* time in seconds when suspend began */
@@ -459,7 +458,7 @@ static int timekeeping_resume(struct sys_device *dev)
clocksource_resume();
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
if (now && (now > timekeeping_suspend_time)) {
unsigned long sleep_length = now - timekeeping_suspend_time;
@@ -474,7 +473,7 @@ static int timekeeping_resume(struct sys_device *dev)
clock->cycle_last = clocksource_read(clock);
clock->error = 0;
timekeeping_suspended = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
@@ -492,10 +491,10 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
timekeeping_suspend_time = read_persistent_clock();
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_atomic_seqlock_irqsave(&xtime_lock, flags);
clocksource_forward_now();
timekeeping_suspended = 1;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_atomic_sequnlock_irqrestore(&xtime_lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
@@ -734,10 +733,10 @@ struct timespec current_kernel_time(void)
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_atomic_seqbegin(&xtime_lock);
now = xtime_cache;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_atomic_seqretry(&xtime_lock, seq));
return now;
}
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a999b92a1277..b794d0275c2a 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
next_one:
i = 0;
- spin_lock_irqsave(&base->cpu_base->lock, flags);
+ atomic_spin_lock_irqsave(&base->cpu_base->lock, flags);
curr = base->first;
/*
@@ -100,13 +100,13 @@ next_one:
timer = rb_entry(curr, struct hrtimer, node);
tmp = *timer;
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ atomic_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
print_timer(m, timer, &tmp, i, now);
next++;
goto next_one;
}
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ atomic_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
}
static void
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 4cde8b9c716f..0654f9464c37 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -81,12 +81,12 @@ struct entry {
/*
* Spinlock protecting the tables - not taken during lookup:
*/
-static DEFINE_SPINLOCK(table_lock);
+static DEFINE_ATOMIC_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
*/
-static DEFINE_PER_CPU(spinlock_t, lookup_lock);
+static DEFINE_PER_CPU(atomic_spinlock_t, lookup_lock);
/*
* Mutex to serialize state changes with show-stats activities:
@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
prev = NULL;
curr = *head;
- spin_lock(&table_lock);
+ atomic_spin_lock(&table_lock);
/*
* Make sure we have not raced with another CPU:
*/
@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
*head = curr;
}
out_unlock:
- spin_unlock(&table_lock);
+ atomic_spin_unlock(&table_lock);
return curr;
}
@@ -238,7 +238,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
/*
* It doesnt matter which lock we take:
*/
- spinlock_t *lock;
+ atomic_spinlock_t *lock;
struct entry *entry, input;
unsigned long flags;
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.pid = pid;
input.timer_flag = timer_flag;
- spin_lock_irqsave(lock, flags);
+ atomic_spin_lock_irqsave(lock, flags);
if (!timer_stats_active)
goto out_unlock;
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
atomic_inc(&overflow_count);
out_unlock:
- spin_unlock_irqrestore(lock, flags);
+ atomic_spin_unlock_irqrestore(lock, flags);
}
static void print_name_offset(struct seq_file *m, unsigned long addr)
@@ -348,9 +348,9 @@ static void sync_access(void)
int cpu;
for_each_online_cpu(cpu) {
- spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
+ atomic_spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
/* nothing */
- spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
+ atomic_spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
}
}
@@ -408,7 +408,7 @@ void __init init_timer_stats(void)
int cpu;
for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(lookup_lock, cpu));
+ atomic_spin_lock_init(&per_cpu(lookup_lock, cpu));
}
static int __init init_tstats_procfs(void)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bf27bb7a63e2..c94dd1477f61 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -403,7 +403,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
- spinlock_t reader_lock; /* serialize readers */
+ atomic_spinlock_t reader_lock; /* serialize readers */
raw_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head pages;
@@ -570,7 +570,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
- spin_lock_init(&cpu_buffer->reader_lock);
+ atomic_spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);
@@ -2117,9 +2117,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
cpu_buffer = iter->cpu_buffer;
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_iter_reset(iter);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
@@ -2517,10 +2517,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
again:
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ atomic_spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts);
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ atomic_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING) {
@@ -2547,9 +2547,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
unsigned long flags;
again:
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING) {
cpu_relax();
@@ -2587,7 +2587,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ atomic_spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts);
if (!event)
@@ -2597,7 +2597,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
out_unlock:
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ atomic_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
out:
@@ -2645,11 +2645,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
__raw_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
__raw_spin_unlock(&cpu_buffer->lock);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
}
@@ -2687,14 +2687,14 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
unsigned long flags;
again:
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
if (!event)
goto out;
rb_advance_iter(iter);
out:
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING) {
cpu_relax();
@@ -2762,7 +2762,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
__raw_spin_lock(&cpu_buffer->lock);
@@ -2770,7 +2770,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
__raw_spin_unlock(&cpu_buffer->lock);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
}
@@ -2808,10 +2808,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ atomic_spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ atomic_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
if (!ret)
@@ -2842,10 +2842,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
- spin_lock(&cpu_buffer->reader_lock);
+ atomic_spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
- spin_unlock(&cpu_buffer->reader_lock);
+ atomic_spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
return ret;
@@ -3031,7 +3031,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (!bpage)
goto out;
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ atomic_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
@@ -3106,7 +3106,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
ret = read;
out_unlock:
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ atomic_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
out:
return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8bc8d8afea6a..8ab991cc6cd4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -668,7 +668,7 @@ static void trace_init_cmdlines(void)
}
static int trace_stop_count;
-static DEFINE_SPINLOCK(tracing_start_lock);
+static DEFINE_ATOMIC_SPINLOCK(tracing_start_lock);
/**
* ftrace_off_permanent - disable all ftrace code permanently
@@ -699,7 +699,7 @@ void tracing_start(void)
if (tracing_disabled)
return;
- spin_lock_irqsave(&tracing_start_lock, flags);
+ atomic_spin_lock_irqsave(&tracing_start_lock, flags);
if (--trace_stop_count) {
if (trace_stop_count < 0) {
/* Someone screwed up their debugging */
@@ -720,7 +720,7 @@ void tracing_start(void)
ftrace_start();
out:
- spin_unlock_irqrestore(&tracing_start_lock, flags);
+ atomic_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
/**
@@ -735,7 +735,7 @@ void tracing_stop(void)
unsigned long flags;
ftrace_stop();
- spin_lock_irqsave(&tracing_start_lock, flags);
+ atomic_spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;
@@ -748,7 +748,7 @@ void tracing_stop(void)
ring_buffer_record_disable(buffer);
out:
- spin_unlock_irqrestore(&tracing_start_lock, flags);
+ atomic_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
void trace_stop_cmdline_recording(void);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b923d13e2fad..ea555b584857 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
static DEFINE_PER_CPU(int, tracing_cpu);
-static DEFINE_SPINLOCK(max_trace_lock);
+static DEFINE_ATOMIC_SPINLOCK(max_trace_lock);
enum {
TRACER_IRQS_OFF = (1 << 1),
@@ -149,7 +149,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out;
- spin_lock_irqsave(&max_trace_lock, flags);
+ atomic_spin_lock_irqsave(&max_trace_lock, flags);
/* check if we are still the max latency */
if (!report_latency(delta))
@@ -173,7 +173,7 @@ check_critical_timing(struct trace_array *tr,
max_sequence++;
out_unlock:
- spin_unlock_irqrestore(&max_trace_lock, flags);
+ atomic_spin_unlock_irqrestore(&max_trace_lock, flags);
out:
data->critical_sequence = max_sequence;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2755a3bd16a1..b8e69ee1322d 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -25,14 +25,14 @@
struct debug_bucket {
struct hlist_head list;
- spinlock_t lock;
+ atomic_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
-static DEFINE_SPINLOCK(pool_lock);
+static DEFINE_ATOMIC_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
@@ -95,10 +95,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock_irqsave(&pool_lock, flags);
+ atomic_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock_irqrestore(&pool_lock, flags);
+ atomic_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -132,7 +132,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- spin_lock(&pool_lock);
+ atomic_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -151,7 +151,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
- spin_unlock(&pool_lock);
+ atomic_spin_unlock(&pool_lock);
return obj;
}
@@ -164,7 +164,7 @@ static void free_obj_work(struct work_struct *work)
struct debug_obj *obj;
unsigned long flags;
- spin_lock_irqsave(&pool_lock, flags);
+ atomic_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
@@ -173,11 +173,11 @@ static void free_obj_work(struct work_struct *work)
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
- spin_unlock_irqrestore(&pool_lock, flags);
+ atomic_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
- spin_lock_irqsave(&pool_lock, flags);
+ atomic_spin_lock_irqsave(&pool_lock, flags);
}
- spin_unlock_irqrestore(&pool_lock, flags);
+ atomic_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
@@ -189,7 +189,7 @@ static void free_object(struct debug_obj *obj)
unsigned long flags;
int sched = 0;
- spin_lock_irqsave(&pool_lock, flags);
+ atomic_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
@@ -199,7 +199,7 @@ static void free_object(struct debug_obj *obj)
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock_irqrestore(&pool_lock, flags);
+ atomic_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
@@ -220,9 +220,9 @@ static void debug_objects_oom(void)
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -302,14 +302,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
@@ -326,7 +326,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
@@ -337,7 +337,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -384,7 +384,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -397,7 +397,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
@@ -407,11 +407,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
default:
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
return;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
@@ -437,7 +437,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -462,7 +462,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
debug_print_object(&o, "deactivate");
}
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -482,7 +482,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -497,7 +497,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
@@ -508,7 +508,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
break;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -528,7 +528,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -538,17 +538,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -574,7 +574,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
@@ -586,7 +586,7 @@ repeat:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -596,7 +596,7 @@ repeat:
break;
}
}
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -782,7 +782,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ atomic_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
@@ -806,7 +806,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
}
res = 0;
out:
- spin_unlock_irqrestore(&db->lock, flags);
+ atomic_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
@@ -906,7 +906,7 @@ void __init debug_objects_early_init(void)
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
- spin_lock_init(&obj_hash[i].lock);
+ atomic_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index e73822aa6e9a..6a4ec2b65fb5 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -17,18 +17,18 @@
* because the spin-lock and the decrement must be
* "atomic".
*/
-int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+int _atomic_dec_and_atomic_lock(atomic_t *atomic, atomic_spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
- spin_lock(lock);
+ atomic_spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
- spin_unlock(lock);
+ atomic_spin_unlock(lock);
return 0;
}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
+EXPORT_SYMBOL(_atomic_dec_and_atomic_lock);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..67b7217d14a0 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -20,7 +20,7 @@
*
* Don't use in new code.
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+static __cacheline_aligned_in_smp DEFINE_ATOMIC_SPINLOCK(kernel_flag);
/*
@@ -79,7 +79,7 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
- while (spin_is_locked(&kernel_flag))
+ while (atomic_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
} while (!_raw_spin_trylock(&kernel_flag));
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index aeaa6d734447..e63af9a35298 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -16,13 +16,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
- spin_lock(&fbc->lock);
+ atomic_spin_lock(&fbc->lock);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
- spin_unlock(&fbc->lock);
+ atomic_spin_unlock(&fbc->lock);
}
EXPORT_SYMBOL(percpu_counter_set);
@@ -35,10 +35,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
pcount = per_cpu_ptr(fbc->counters, cpu);
count = *pcount + amount;
if (count >= batch || count <= -batch) {
- spin_lock(&fbc->lock);
+ atomic_spin_lock(&fbc->lock);
fbc->count += count;
*pcount = 0;
- spin_unlock(&fbc->lock);
+ atomic_spin_unlock(&fbc->lock);
} else {
*pcount = count;
}
@@ -55,13 +55,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
s64 ret;
int cpu;
- spin_lock(&fbc->lock);
+ atomic_spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
- spin_unlock(&fbc->lock);
+ atomic_spin_unlock(&fbc->lock);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
@@ -69,7 +69,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
- spin_lock_init(&fbc->lock);
+ atomic_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
@@ -126,11 +126,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
s32 *pcount;
unsigned long flags;
- spin_lock_irqsave(&fbc->lock, flags);
+ atomic_spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- spin_unlock_irqrestore(&fbc->lock, flags);
+ atomic_spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
#endif
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..beff294ad342 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->lock);
- if (head->lock)
- WARN_ON_SMP(!spin_is_locked(head->lock));
+ WARN_ON(!head->alock && !head->slock);
+ if (head->alock)
+ WARN_ON_SMP(!atomic_spin_is_locked(head->alock));
+ if (head->slock)
+ WARN_ON_SMP(!spin_is_locked(head->slock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de2..ff347dc3e32c 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
int prop_local_init_percpu(struct prop_local_percpu *pl)
{
- spin_lock_init(&pl->lock);
+ atomic_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ atomic_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
percpu_counter_set(&pl->events, 0);
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ atomic_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
int prop_local_init_single(struct prop_local_single *pl)
{
- spin_lock_init(&pl->lock);
+ atomic_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
if (pl->period == global_period)
return;
- spin_lock_irqsave(&pl->lock, flags);
+ atomic_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/*
* For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
else
pl->events = 0;
pl->period = global_period;
- spin_unlock_irqrestore(&pl->lock, flags);
+ atomic_spin_unlock_irqrestore(&pl->lock, flags);
}
/*
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 26187edcc7ea..5488990fbfce 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -14,7 +14,7 @@
#include <linux/jiffies.h>
#include <linux/module.h>
-static DEFINE_SPINLOCK(ratelimit_lock);
+static DEFINE_ATOMIC_SPINLOCK(ratelimit_lock);
/*
* __ratelimit - rate limiting
@@ -30,7 +30,7 @@ int __ratelimit(struct ratelimit_state *rs)
if (!rs->interval)
return 1;
- spin_lock_irqsave(&ratelimit_lock, flags);
+ atomic_spin_lock_irqsave(&ratelimit_lock, flags);
if (!rs->begin)
rs->begin = jiffies;
@@ -46,12 +46,12 @@ int __ratelimit(struct ratelimit_state *rs)
goto print;
rs->missed++;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
+ atomic_spin_unlock_irqrestore(&ratelimit_lock, flags);
return 0;
print:
rs->printed++;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
+ atomic_spin_unlock_irqrestore(&ratelimit_lock, flags);
return 1;
}
EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..5f41fad4d302 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
-void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+void __atomic_spin_lock_init(atomic_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
lock->owner_cpu = -1;
}
-EXPORT_SYMBOL(__spin_lock_init);
+EXPORT_SYMBOL(__atomic_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
-static void spin_bug(spinlock_t *lock, const char *msg)
+static void spin_bug(atomic_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
-debug_spin_lock_before(spinlock_t *lock)
+debug_spin_lock_before(atomic_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
-static inline void debug_spin_lock_after(spinlock_t *lock)
+static inline void debug_spin_lock_after(atomic_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
-static inline void debug_spin_unlock(spinlock_t *lock)
+static inline void debug_spin_unlock(atomic_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+ SPIN_BUG_ON(!atomic_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
-static void __spin_lock_debug(spinlock_t *lock)
+static void __spin_lock_debug(atomic_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
@@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
-void _raw_spin_lock(spinlock_t *lock)
+void _raw_spin_lock(atomic_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
@@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock)
debug_spin_lock_after(lock);
}
-int _raw_spin_trylock(spinlock_t *lock)
+int _raw_spin_trylock(atomic_spinlock_t *lock)
{
int ret = __raw_spin_trylock(&lock->raw_lock);
@@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
-void _raw_spin_unlock(spinlock_t *lock)
+void _raw_spin_unlock(atomic_spinlock_t *lock)
{
debug_spin_unlock(lock);
__raw_spin_unlock(&lock->raw_lock);
diff --git a/sound/drivers/pcsp/pcsp.h b/sound/drivers/pcsp/pcsp.h
index 174dd2ff0f22..be7228ce4426 100644
--- a/sound/drivers/pcsp/pcsp.h
+++ b/sound/drivers/pcsp/pcsp.h
@@ -16,7 +16,7 @@
#include <asm/i8253.h>
#else
#include <asm/8253pit.h>
-static DEFINE_SPINLOCK(i8253_lock);
+static DEFINE_ATOMIC_SPINLOCK(i8253_lock);
#endif
#define PCSP_SOUND_VERSION 0x400 /* read 4.00 */
diff --git a/sound/drivers/pcsp/pcsp_input.c b/sound/drivers/pcsp/pcsp_input.c
index 0444cdeb4bec..9dfa285832e1 100644
--- a/sound/drivers/pcsp/pcsp_input.c
+++ b/sound/drivers/pcsp/pcsp_input.c
@@ -21,7 +21,7 @@ static void pcspkr_do_sound(unsigned int count)
{
unsigned long flags;
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
@@ -36,7 +36,7 @@ static void pcspkr_do_sound(unsigned int count)
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
}
void pcspkr_stop_sound(void)
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 84cc2658c05b..88f73881b1ca 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -70,7 +70,7 @@ static unsigned long pcsp_timer_update(struct hrtimer *handle)
timer_cnt = val * CUR_DIV() / 256;
if (timer_cnt && chip->enable) {
- spin_lock_irqsave(&i8253_lock, flags);
+ atomic_spin_lock_irqsave(&i8253_lock, flags);
if (!nforce_wa) {
outb_p(chip->val61, 0x61);
outb_p(timer_cnt, 0x42);
@@ -79,7 +79,7 @@ static unsigned long pcsp_timer_update(struct hrtimer *handle)
outb(chip->val61 ^ 2, 0x61);
chip->thalf = 1;
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ atomic_spin_unlock_irqrestore(&i8253_lock, flags);
}
chip->ns_rem = PCSP_PERIOD_NS();
@@ -152,10 +152,10 @@ static int pcsp_start_playing(struct snd_pcsp *chip)
return -EIO;
}
- spin_lock(&i8253_lock);
+ atomic_spin_lock(&i8253_lock);
chip->val61 = inb(0x61) | 0x03;
outb_p(0x92, 0x43); /* binary, mode 1, LSB only, ch 2 */
- spin_unlock(&i8253_lock);
+ atomic_spin_unlock(&i8253_lock);
atomic_set(&chip->timer_active, 1);
chip->thalf = 0;
@@ -176,11 +176,11 @@ static void pcsp_stop_playing(struct snd_pcsp *chip)
return;
atomic_set(&chip->timer_active, 0);
- spin_lock(&i8253_lock);
+ atomic_spin_lock(&i8253_lock);
/* restore the timer */
outb_p(0xb6, 0x43); /* binary, mode 3, LSB/MSB, ch 2 */
outb(chip->val61 & 0xFC, 0x61);
- spin_unlock(&i8253_lock);
+ atomic_spin_unlock(&i8253_lock);
}
/*