summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2010-01-29 15:38:32 -0800
committerThomas Gleixner <tglx@linutronix.de>2010-04-27 17:32:50 +0200
commit7e835af8566b68c0396256cdcb371f62e9cdb5fa (patch)
treed1d44193381034ed938125cca99817fe7aeaa3de
parenta119db1dc3f3a6da86148960937383ce0c162ad4 (diff)
downloadlwn-7e835af8566b68c0396256cdcb371f62e9cdb5fa.tar.gz
lwn-7e835af8566b68c0396256cdcb371f62e9cdb5fa.zip
fs-inode_lock-scale-11
This enables locking to be reduced and simplified. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: John Stultz <johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--fs/drop_caches.c10
-rw-r--r--fs/inode.c53
-rw-r--r--fs/notify/inode_mark.c10
-rw-r--r--fs/notify/inotify/inotify.c10
-rw-r--r--fs/quota/dquot.c16
5 files changed, 32 insertions, 67 deletions
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 6947cb2ef50c..9962d37d69fc 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -16,8 +16,8 @@ static void drop_pagecache_sb(struct super_block *sb)
{
struct inode *inode, *toput_inode = NULL;
- spin_lock(&sb_inode_list_lock);
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)
|| inode->i_mapping->nrpages == 0) {
@@ -26,13 +26,13 @@ static void drop_pagecache_sb(struct super_block *sb)
}
__iget(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&sb_inode_list_lock);
+ rcu_read_unlock();
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
- spin_lock(&sb_inode_list_lock);
+ rcu_read_lock();
}
- spin_unlock(&sb_inode_list_lock);
+ rcu_read_unlock();
iput(toput_inode);
}
diff --git a/fs/inode.c b/fs/inode.c
index fd57c8371686..deb1087816ef 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -347,12 +347,12 @@ static void dispose_list(struct list_head *head)
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
- spin_lock(&sb_inode_list_lock);
spin_lock(&inode->i_lock);
__remove_inode_hash(inode);
- list_del_init(&inode->i_sb_list);
- spin_unlock(&inode->i_lock);
+ spin_lock(&sb_inode_list_lock);
+ list_del_rcu(&inode->i_sb_list);
spin_unlock(&sb_inode_list_lock);
+ spin_unlock(&inode->i_lock);
wake_up_inode(inode);
destroy_inode(inode);
@@ -374,14 +374,6 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
struct list_head *tmp = next;
struct inode *inode;
- /*
- * We can reschedule here without worrying about the list's
- * consistency because the per-sb list of inodes must not
- * change during umount anymore, and because iprune_sem keeps
- * shrink_icache_memory() away.
- */
- cond_resched_lock(&sb_inode_list_lock);
-
next = next->next;
if (tmp == head)
break;
@@ -424,12 +416,17 @@ int invalidate_inodes(struct super_block *sb)
int busy;
LIST_HEAD(throw_away);
+ /*
+ * Don't need to worry about the list's consistency because the per-sb
+ * list of inodes must not change during umount anymore, and because
+ * iprune_sem keeps shrink_icache_memory() away.
+ */
down_write(&iprune_sem);
- spin_lock(&sb_inode_list_lock);
+// spin_lock(&sb_inode_list_lock); XXX: is this safe?
inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
- spin_unlock(&sb_inode_list_lock);
+// spin_unlock(&sb_inode_list_lock);
dispose_list(&throw_away);
up_write(&iprune_sem);
@@ -655,7 +652,8 @@ __inode_add_to_lists(struct super_block *sb, struct inode_hash_bucket *b,
struct inode *inode)
{
atomic_inc(&inodes_stat.nr_inodes);
- list_add(&inode->i_sb_list, &sb->s_inodes);
+ spin_lock(&sb_inode_list_lock);
+ list_add_rcu(&inode->i_sb_list, &sb->s_inodes);
spin_unlock(&sb_inode_list_lock);
if (b) {
spin_lock(&b->lock);
@@ -680,7 +678,6 @@ void inode_add_to_lists(struct super_block *sb, struct inode *inode)
{
struct inode_hash_bucket *b = inode_hashtable + hash(sb, inode->i_ino);
- spin_lock(&sb_inode_list_lock);
spin_lock(&inode->i_lock);
__inode_add_to_lists(sb, b, inode);
spin_unlock(&inode->i_lock);
@@ -711,7 +708,6 @@ struct inode *new_inode(struct super_block *sb)
inode = alloc_inode(sb);
if (inode) {
- spin_lock(&sb_inode_list_lock);
spin_lock(&inode->i_lock);
inode->i_ino = atomic_inc_return(&last_ino);
inode->i_state = 0;
@@ -778,7 +774,6 @@ static struct inode *get_new_inode(struct super_block *sb,
/* We released the lock, so.. */
old = find_inode(sb, b, test, data);
if (!old) {
- spin_lock(&sb_inode_list_lock);
spin_lock(&inode->i_lock);
if (set(inode, data))
goto set_failed;
@@ -808,7 +803,6 @@ static struct inode *get_new_inode(struct super_block *sb,
set_failed:
spin_unlock(&inode->i_lock);
- spin_unlock(&sb_inode_list_lock);
destroy_inode(inode);
return NULL;
}
@@ -829,7 +823,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
/* We released the lock, so.. */
old = find_inode_fast(sb, b, ino);
if (!old) {
- spin_lock(&sb_inode_list_lock);
spin_lock(&inode->i_lock);
inode->i_ino = ino;
inode->i_state = I_NEW;
@@ -1310,7 +1303,8 @@ void generic_delete_inode(struct inode *inode)
list_del_init(&inode->i_list);
spin_unlock(&wb_inode_list_lock);
}
- list_del_init(&inode->i_sb_list);
+ spin_lock(&sb_inode_list_lock);
+ list_del_rcu(&inode->i_sb_list);
spin_unlock(&sb_inode_list_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
@@ -1369,15 +1363,12 @@ int generic_detach_inode(struct inode *inode)
}
if (sb->s_flags & MS_ACTIVE) {
spin_unlock(&inode->i_lock);
- spin_unlock(&sb_inode_list_lock);
return 0;
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_WILL_FREE;
spin_unlock(&inode->i_lock);
- spin_unlock(&sb_inode_list_lock);
write_inode_now(inode, 1);
- spin_lock(&sb_inode_list_lock);
spin_lock(&inode->i_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_WILL_FREE;
@@ -1389,7 +1380,8 @@ int generic_detach_inode(struct inode *inode)
spin_unlock(&wb_inode_list_lock);
atomic_dec(&inodes_stat.nr_unused);
}
- list_del_init(&inode->i_sb_list);
+ spin_lock(&sb_inode_list_lock);
+ list_del_rcu(&inode->i_sb_list);
spin_unlock(&sb_inode_list_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING;
@@ -1459,19 +1451,12 @@ void iput(struct inode *inode)
if (inode) {
BUG_ON(inode->i_state == I_CLEAR);
-retry:
spin_lock(&inode->i_lock);
- if (inode->i_count == 1) {
- if (!spin_trylock(&sb_inode_list_lock)) {
- spin_unlock(&inode->i_lock);
- goto retry;
- }
- inode->i_count--;
+ inode->i_count--;
+ if (inode->i_count == 0)
iput_final(inode);
- } else {
- inode->i_count--;
+ else
spin_unlock(&inode->i_lock);
- }
}
}
EXPORT_SYMBOL(iput);
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 81b5bbb3a7ee..c705b7934034 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -413,14 +413,6 @@ void fsnotify_unmount_inodes(struct list_head *list)
spin_unlock(&next_i->i_lock);
}
- /*
- * We can safely drop inode_lock here because we hold
- * references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
- */
- spin_unlock(&sb_inode_list_lock);
-
if (need_iput_tmp)
iput(need_iput_tmp);
@@ -430,7 +422,5 @@ void fsnotify_unmount_inodes(struct list_head *list)
fsnotify_inode_delete(inode);
iput(inode);
-
- spin_lock(&sb_inode_list_lock);
}
}
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 7846758db7da..1c0bc7b681f7 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -438,14 +438,6 @@ void inotify_unmount_inodes(struct list_head *list)
spin_unlock(&next_i->i_lock);
}
- /*
- * We can safely drop inode_lock here because we hold
- * references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
- */
- spin_unlock(&sb_inode_list_lock);
-
if (need_iput_tmp)
iput(need_iput_tmp);
@@ -463,8 +455,6 @@ void inotify_unmount_inodes(struct list_head *list)
}
mutex_unlock(&inode->inotify_mutex);
iput(inode);
-
- spin_lock(&sb_inode_list_lock);
}
}
EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5305c71ccea5..7d5ecefc4091 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -844,8 +844,8 @@ static void add_dquot_ref(struct super_block *sb, int type)
struct inode *inode, *old_inode = NULL;
int reserved = 0;
- spin_lock(&sb_inode_list_lock);
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) {
spin_unlock(&inode->i_lock);
@@ -867,7 +867,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
__iget(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&sb_inode_list_lock);
+ rcu_read_unlock();
iput(old_inode);
sb->dq_op->initialize(inode, type);
@@ -877,9 +877,9 @@ static void add_dquot_ref(struct super_block *sb, int type)
* reference and we cannot iput it under inode_lock. So we
* keep the reference and iput it later. */
old_inode = inode;
- spin_lock(&sb_inode_list_lock);
+ rcu_read_lock();
}
- spin_unlock(&sb_inode_list_lock);
+ rcu_read_unlock();
iput(old_inode);
if (reserved) {
@@ -955,8 +955,8 @@ static void remove_dquot_ref(struct super_block *sb, int type,
{
struct inode *inode;
- spin_lock(&sb_inode_list_lock);
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(inode, &sb->s_inodes, i_sb_list) {
/*
* We have to scan also I_NEW inodes because they can already
* have quota pointer initialized. Luckily, we need to touch
@@ -966,7 +966,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
if (!IS_NOQUOTA(inode))
remove_inode_dquot_ref(inode, type, tofree_head);
}
- spin_unlock(&sb_inode_list_lock);
+ rcu_read_unlock();
}
/* Gather all references from inodes and drop them */