diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2016-04-14 19:52:13 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2016-05-02 19:47:51 -0400 |
commit | 85c7f81041d57cfe9dc97f4680d5586b54534a39 (patch) | |
tree | 16415f1241c9fdc7b279d860d53e60c9c8a9e3fb /fs/dcache.c | |
parent | 0568d705b0087751f0c085c0a665aa3e954c810d (diff) | |
download | lwn-85c7f81041d57cfe9dc97f4680d5586b54534a39.tar.gz lwn-85c7f81041d57cfe9dc97f4680d5586b54534a39.zip |
beginning of transition to parallel lookups - marking in-lookup dentries
marked as such when (would be) parallel lookup is about to pass them
to actual ->lookup(); unmarked when
* __d_add() is about to make it hashed, positive or not.
* __d_move() (from d_splice_alias(), directly or via
__d_unalias()) puts a preexisting dentry in its place
* in caller of ->lookup() if it has escaped all of the
above. Bug (WARN_ON, actually) if it reaches the final dput()
or d_instantiate() while still marked such.
As the result, we are guaranteed that for as long as the flag is
set, dentry will
* remain negative unhashed with positive refcount
* never have its ->d_alias looked at
* never have its ->d_lru looked at
* never have its ->d_parent and ->d_name changed
Right now we have at most one such for any given parent directory.
With parallel lookups that restriction will weaken to
* only exist when parent is locked shared
* at most one with given (parent,name) pair (comparison of
names is according to ->d_compare())
* only exist when there's no hashed dentry with the same
(parent,name)
Transition will take the next several commits; unfortunately, we'll
only be able to switch to rwsem at the end of this series. The
reason for not making it a single patch is to simplify review.
New primitives: d_in_lookup() (a predicate checking if dentry is in
the in-lookup state) and d_lookup_done() (tells the system that
we are done with lookup and if it's still marked as in-lookup, it
should cease to be such).
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 20394fb6f967..0f1d93866e69 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -761,6 +761,8 @@ repeat: /* Slow case: now with the dentry lock held */ rcu_read_unlock(); + WARN_ON(d_in_lookup(dentry)); + /* Unreachable? Get rid of it */ if (unlikely(d_unhashed(dentry))) goto kill_it; @@ -1746,6 +1748,7 @@ type_determined: static void __d_instantiate(struct dentry *dentry, struct inode *inode) { unsigned add_flags = d_flags_for_inode(inode); + WARN_ON(d_in_lookup(dentry)); spin_lock(&dentry->d_lock); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); @@ -2361,12 +2364,20 @@ void d_rehash(struct dentry * entry) } EXPORT_SYMBOL(d_rehash); +void __d_lookup_done(struct dentry *dentry) +{ + dentry->d_flags &= ~DCACHE_PAR_LOOKUP; + /* more stuff will land here */ +} +EXPORT_SYMBOL(__d_lookup_done); /* inode->i_lock held if inode is non-NULL */ static inline void __d_add(struct dentry *dentry, struct inode *inode) { spin_lock(&dentry->d_lock); + if (unlikely(d_in_lookup(dentry))) + __d_lookup_done(dentry); if (inode) { unsigned add_flags = d_flags_for_inode(inode); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); @@ -2612,6 +2623,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target, BUG_ON(d_ancestor(target, dentry)); dentry_lock_for_move(dentry, target); + if (unlikely(d_in_lookup(target))) + __d_lookup_done(target); write_seqcount_begin(&dentry->d_seq); write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); |