summaryrefslogtreecommitdiff
path: root/net/unix
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@kvack.org>2006-01-03 14:10:46 -0800
committerDavid S. Miller <davem@davemloft.net>2006-01-03 14:10:46 -0800
commitfd19f329a32bdc4eb07885e0b3889567cfe00aa7 (patch)
treec69943e8294cae13df6af3dc8b8c25abab8c9cfb /net/unix
parent4947d3ef8de7b4f42aed6ea9ba689dc8fb45b5a5 (diff)
downloadlwn-fd19f329a32bdc4eb07885e0b3889567cfe00aa7.tar.gz
lwn-fd19f329a32bdc4eb07885e0b3889567cfe00aa7.zip
[AF_UNIX]: Convert to use a spinlock instead of rwlock
From: Benjamin LaHaise <bcrl@kvack.org> In af_unix, a rwlock is used to protect internal state. At least on my P4 with HT it is faster to use a spinlock due to the simpler memory barrier used to unlock. This patch raises bw_unix to ~690K/s. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/af_unix.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 7d3fe6aebcdb..1ddd36d50091 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -564,7 +564,7 @@ static struct sock * unix_create1(struct socket *sock)
u = unix_sk(sk);
u->dentry = NULL;
u->mnt = NULL;
- rwlock_init(&u->lock);
+ spin_lock_init(&u->lock);
atomic_set(&u->inflight, sock ? 0 : -1);
init_MUTEX(&u->readsem); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);