summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-06-16 16:58:34 +0200
committerThomas Gleixner <tglx@linutronix.de>2010-06-16 19:55:36 +0200
commitfaf35813f204901f85dd0c6b3c5092e0064c6c2f (patch)
treec314b00a687c486bb4971ceb81f4b84618acb6be /kernel
parent654e404e55d3b4eee35b7def0193e5510904528f (diff)
downloadlwn-faf35813f204901f85dd0c6b3c5092e0064c6c2f.tar.gz
lwn-faf35813f204901f85dd0c6b3c5092e0064c6c2f.zip
timer_stats: Convert table_lock to raw_spin_lock
table_lock is taken in atomic contexts and therefor cannot be converted to a sleeping spinlock on rt. Make it raw. Reported-by: Andreas Sundebo <kernel@sundebo.dk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Andreas Sundebo <kernel@sundebo.dk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timer_stats.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 2f3b585b8d7d..30cb955e6c02 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -81,7 +81,7 @@ struct entry {
/*
* Spinlock protecting the tables - not taken during lookup:
*/
-static DEFINE_SPINLOCK(table_lock);
+static DEFINE_RAW_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
prev = NULL;
curr = *head;
- spin_lock(&table_lock);
+ raw_spin_lock(&table_lock);
/*
* Make sure we have not raced with another CPU:
*/
@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
*head = curr;
}
out_unlock:
- spin_unlock(&table_lock);
+ raw_spin_unlock(&table_lock);
return curr;
}