summaryrefslogtreecommitdiff
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-07-31 09:28:31 +0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-07-31 09:28:31 +0400
commitd87aae2f3c8e90bd0fe03f5309b4d066b712b8ec (patch)
tree2e5cd5f0ffaf5b725dd5bcc948a5f8cd06e13663 /lib/percpu_counter.c
parent4a55c1017b8dcfd0554734ce3f19374d5b522d59 (diff)
downloadlwn-d87aae2f3c8e90bd0fe03f5309b4d066b712b8ec.tar.gz
lwn-d87aae2f3c8e90bd0fe03f5309b4d066b712b8ec.zip
switch the protection of percpu_counter list to spinlock
... making percpu_counter_destroy() non-blocking Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index f8a3f1a829b8..ba6085d9c741 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -12,7 +12,7 @@
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
-static DEFINE_MUTEX(percpu_counters_lock);
+static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
@@ -123,9 +123,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- mutex_lock(&percpu_counters_lock);
+ spin_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
- mutex_unlock(&percpu_counters_lock);
+ spin_unlock(&percpu_counters_lock);
#endif
return 0;
}
@@ -139,9 +139,9 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- mutex_lock(&percpu_counters_lock);
+ spin_lock(&percpu_counters_lock);
list_del(&fbc->list);
- mutex_unlock(&percpu_counters_lock);
+ spin_unlock(&percpu_counters_lock);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -170,7 +170,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- mutex_lock(&percpu_counters_lock);
+ spin_lock(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -181,7 +181,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- mutex_unlock(&percpu_counters_lock);
+ spin_unlock(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}