diff options
author | Nick Piggin <npiggin@suse.de> | 2010-01-29 15:38:20 -0800 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-04-27 17:32:30 +0200 |
commit | 65d7028efab8b36c2e458b0869b0d2696632136a (patch) | |
tree | 6e4c1225ccc3afa3f994d8892aab2c6e21775c8f | |
parent | a937f6d2a1bfdaff02ce6d131af5976080b569bd (diff) | |
download | lwn-65d7028efab8b36c2e458b0869b0d2696632136a.tar.gz lwn-65d7028efab8b36c2e458b0869b0d2696632136a.zip |
fs-dcache-scale-d_lru
Add a new lock, dcache_lru_lock, to protect the dcache hash table from
concurrent modification. d_lru is also protected by d_lock.
Move lru scanning out from underneath dcache_lock.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | fs/dcache.c | 105 |
1 files changed, 85 insertions, 20 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index be530d297a44..12e223ac3a90 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -37,17 +37,26 @@ /* * Usage: - * dcache_hash_lock protects dcache hash table + * dcache_hash_lock protects: + * - the dcache hash table + * dcache_lru_lock protects: + * - the dcache lru lists and counters + * d_lock protects: + * - d_flags + * - d_name + * - d_lru * * Ordering: * dcache_lock * dentry->d_lock + * dcache_lru_lock * dcache_hash_lock */ int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock); +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); @@ -134,37 +143,56 @@ static void dentry_iput(struct dentry * dentry) } /* - * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. + * dentry_lru_(add|add_tail|del|del_init) must be called with d_lock held + * to protect list_empty(d_lru) condition. */ static void dentry_lru_add(struct dentry *dentry) { + spin_lock(&dcache_lru_lock); list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); dentry->d_sb->s_nr_dentry_unused++; dentry_stat.nr_unused++; + spin_unlock(&dcache_lru_lock); } static void dentry_lru_add_tail(struct dentry *dentry) { + spin_lock(&dcache_lru_lock); list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); dentry->d_sb->s_nr_dentry_unused++; dentry_stat.nr_unused++; + spin_unlock(&dcache_lru_lock); +} + +static void __dentry_lru_del(struct dentry *dentry) +{ + list_del(&dentry->d_lru); + dentry->d_sb->s_nr_dentry_unused--; + dentry_stat.nr_unused--; +} + +static void __dentry_lru_del_init(struct dentry *dentry) +{ + list_del_init(&dentry->d_lru); + dentry->d_sb->s_nr_dentry_unused--; + dentry_stat.nr_unused--; } static void dentry_lru_del(struct dentry *dentry) { if (!list_empty(&dentry->d_lru)) { - list_del(&dentry->d_lru); - dentry->d_sb->s_nr_dentry_unused--; - dentry_stat.nr_unused--; + spin_lock(&dcache_lru_lock); + __dentry_lru_del(dentry); + spin_unlock(&dcache_lru_lock); } } static void dentry_lru_del_init(struct dentry *dentry) { if (likely(!list_empty(&dentry->d_lru))) { - list_del_init(&dentry->d_lru); - dentry->d_sb->s_nr_dentry_unused--; - dentry_stat.nr_unused--; + spin_lock(&dcache_lru_lock); + __dentry_lru_del_init(dentry); + spin_unlock(&dcache_lru_lock); } } @@ -175,6 +203,8 @@ static void dentry_lru_del_init(struct dentry *dentry) * The dentry must already be unhashed and removed from the LRU. * * If this is the root of the dentry tree, return NULL. + * + * dcache_lock and d_lock must be held by caller, are dropped by d_kill. */ static struct dentry *d_kill(struct dentry *dentry) __releases(dentry->d_lock) @@ -327,11 +357,19 @@ int d_invalidate(struct dentry * dentry) } /* This should be called _only_ with dcache_lock held */ +static inline struct dentry * __dget_locked_dlock(struct dentry *dentry) +{ + atomic_inc(&dentry->d_count); + dentry_lru_del_init(dentry); + return dentry; +} static inline struct dentry * __dget_locked(struct dentry *dentry) { atomic_inc(&dentry->d_count); + spin_lock(&dentry->d_lock); dentry_lru_del_init(dentry); + spin_unlock(&dentry->d_lock); return dentry; } @@ -408,7 +446,7 @@ restart: list_for_each_entry(dentry, &inode->i_dentry, d_alias) { spin_lock(&dentry->d_lock); if (!atomic_read(&dentry->d_count)) { - __dget_locked(dentry); + __dget_locked_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); @@ -440,17 +478,18 @@ static void prune_one_dentry(struct dentry * dentry) * Prune ancestors. Locking is simpler than in dput(), * because dcache_lock needs to be taken anyway. */ - spin_lock(&dcache_lock); while (dentry) { - if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) + spin_lock(&dcache_lock); + if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) { + spin_unlock(&dcache_lock); return; + } if (dentry->d_op && dentry->d_op->d_delete) dentry->d_op->d_delete(dentry); dentry_lru_del_init(dentry); __d_drop(dentry); dentry = d_kill(dentry); - spin_lock(&dcache_lock); } } @@ -471,10 +510,11 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) BUG_ON(!sb); BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); - spin_lock(&dcache_lock); if (count != NULL) /* called from prune_dcache() and shrink_dcache_parent() */ cnt = *count; +relock: + spin_lock(&dcache_lru_lock); restart: if (count == NULL) list_splice_init(&sb->s_dentry_lru, &tmp); @@ -484,7 +524,10 @@ restart: struct dentry, d_lru); BUG_ON(dentry->d_sb != sb); - spin_lock(&dentry->d_lock); + if (!spin_trylock(&dentry->d_lock)) { + spin_unlock(&dcache_lru_lock); + goto relock; + } /* * If we are honouring the DCACHE_REFERENCED flag and * the dentry has this flag set, don't free it. Clear @@ -502,13 +545,22 @@ restart: if (!cnt) break; } - cond_resched_lock(&dcache_lock); + cond_resched_lock(&dcache_lru_lock); } } + spin_unlock(&dcache_lru_lock); + + spin_lock(&dcache_lock); +again: + spin_lock(&dcache_lru_lock); /* lru_lock also protects tmp list */ while (!list_empty(&tmp)) { dentry = list_entry(tmp.prev, struct dentry, d_lru); - dentry_lru_del_init(dentry); - spin_lock(&dentry->d_lock); + + if (!spin_trylock(&dentry->d_lock)) { + spin_unlock(&dcache_lru_lock); + goto again; + } + __dentry_lru_del_init(dentry); /* * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free @@ -518,17 +570,22 @@ restart: spin_unlock(&dentry->d_lock); continue; } + + spin_unlock(&dcache_lru_lock); prune_one_dentry(dentry); - /* dentry->d_lock was dropped in prune_one_dentry() */ - cond_resched_lock(&dcache_lock); + /* dcache_lock and dentry->d_lock dropped */ + spin_lock(&dcache_lock); + spin_lock(&dcache_lru_lock); } + spin_unlock(&dcache_lock); + if (count == NULL && !list_empty(&sb->s_dentry_lru)) goto restart; if (count != NULL) *count = cnt; if (!list_empty(&referenced)) list_splice(&referenced, &sb->s_dentry_lru); - spin_unlock(&dcache_lock); + spin_unlock(&dcache_lru_lock); } /** @@ -636,7 +693,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) /* detach this root from the system */ spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); dentry_lru_del_init(dentry); + spin_unlock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dcache_lock); @@ -650,7 +709,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) spin_lock(&dcache_lock); list_for_each_entry(loop, &dentry->d_subdirs, d_u.d_child) { + spin_lock(&loop->d_lock); dentry_lru_del_init(loop); + spin_unlock(&loop->d_lock); __d_drop(loop); cond_resched_lock(&dcache_lock); } @@ -834,13 +895,17 @@ resume: struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; + spin_lock(&dentry->d_lock); dentry_lru_del_init(dentry); + spin_unlock(&dentry->d_lock); /* * move only zero ref count dentries to the end * of the unused list for prune_dcache */ if (!atomic_read(&dentry->d_count)) { + spin_lock(&dentry->d_lock); dentry_lru_add_tail(dentry); + spin_unlock(&dentry->d_lock); found++; } |