summaryrefslogtreecommitdiff
path: root/lib/idr.c
diff options
context:
space:
mode:
authorRoland Dreier <rdreier@cisco.com>2006-07-14 00:24:23 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-14 21:53:54 -0700
commitc259cc281255bdb30ceba190bfd7f37e3ae3fc85 (patch)
tree395028450ca91c441eab243186f5015fe5d6e3d4 /lib/idr.c
parent6fbe82a952790c634ea6035c223a01a81377daf1 (diff)
downloadlwn-c259cc281255bdb30ceba190bfd7f37e3ae3fc85.tar.gz
lwn-c259cc281255bdb30ceba190bfd7f37e3ae3fc85.zip
[PATCH] Convert idr's internal locking to _irqsave variant
Currently, the code in lib/idr.c uses a bare spin_lock(&idp->lock) to do internal locking. This is a nasty trap for code that might call idr functions from different contexts; for example, it seems perfectly reasonable to call idr_get_new() from process context and idr_remove() from interrupt context -- but with the current locking this would lead to a potential deadlock. The simplest fix for this is to just convert the idr locking to use spin_lock_irqsave(). In particular, this fixes a very complicated locking issue detected by lockdep, involving the ib_ipoib driver's priv->lock and dev->_xmit_lock, which get involved with the ib_sa module's query_idr.lock. Cc: Arjan van de Ven <arjan@infradead.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Zach Brown <zach.brown@oracle.com>, Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 4d096819511a..16d2143fea48 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -38,14 +38,15 @@ static kmem_cache_t *idr_layer_cache;
static struct idr_layer *alloc_layer(struct idr *idp)
{
struct idr_layer *p;
+ unsigned long flags;
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
if ((p = idp->id_free)) {
idp->id_free = p->ary[0];
idp->id_free_cnt--;
p->ary[0] = NULL;
}
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
return(p);
}
@@ -59,12 +60,14 @@ static void __free_layer(struct idr *idp, struct idr_layer *p)
static void free_layer(struct idr *idp, struct idr_layer *p)
{
+ unsigned long flags;
+
/*
* Depends on the return element being zeroed.
*/
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
__free_layer(idp, p);
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
}
/**
@@ -168,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
struct idr_layer *p, *new;
int layers, v, id;
+ unsigned long flags;
id = starting_id;
build_up:
@@ -191,14 +195,14 @@ build_up:
* The allocation failed. If we built part of
* the structure tear it down.
*/
- spin_lock(&idp->lock);
+ spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
__free_layer(idp, new);
}
- spin_unlock(&idp->lock);
+ spin_unlock_irqrestore(&idp->lock, flags);
return -1;
}
new->ary[0] = p;