diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2020-01-08 20:37:23 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2020-02-27 14:43:56 -0500 |
commit | bd7c4b508344680c843e2d2436d56b9fc8aedc9d (patch) | |
tree | 0a04fa39c8e16178e9665e6d412e4b2a4b470fdb /fs/namei.c | |
parent | 31d1726d7250021c66c9f16d8a128444676db782 (diff) | |
download | lwn-bd7c4b508344680c843e2d2436d56b9fc8aedc9d.tar.gz lwn-bd7c4b508344680c843e2d2436d56b9fc8aedc9d.zip |
handle_mounts(): start building a sane wrapper for follow_managed()
All callers of follow_managed() follow it on success with the same steps -
d_backing_inode(path->dentry) is calculated and stored into some struct inode *
variable and, in all but one case, an unsigned variable (nd->seq to be) is
zeroed. The single exception is lookup_fast() and there zeroing is correct
thing to do - not doing it is a pointless microoptimization.
Add a wrapper for follow_managed() that would do that combination.
It's mostly a vehicle for code massage - it will be changing quite a bit,
and the current calling conventions are by no means final. Right now it
takes path, nameidata and (as out params) inode and seq, similar to
__follow_mount_rcu(). Which will soon get folded into it...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namei.c')
-rw-r--r-- | fs/namei.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/fs/namei.c b/fs/namei.c index 6938d20aa73a..c104ec75faef 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1385,6 +1385,18 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); } +static inline int handle_mounts(struct path *path, struct nameidata *nd, + struct inode **inode, unsigned int *seqp) +{ + int ret = follow_managed(path, nd); + + if (likely(ret >= 0)) { + *inode = d_backing_inode(path->dentry); + *seqp = 0; /* out of RCU mode, so the value doesn't matter */ + } + return ret; +} + static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; @@ -1607,7 +1619,6 @@ static int lookup_fast(struct nameidata *nd, struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int status = 1; - int err; /* * Rename seqlock is not required here because in the off chance @@ -1677,10 +1688,7 @@ static int lookup_fast(struct nameidata *nd, path->mnt = mnt; path->dentry = dentry; - err = follow_managed(path, nd); - if (likely(err > 0)) - *inode = d_backing_inode(path->dentry); - return err; + return handle_mounts(path, nd, inode, seqp); } /* Fast lookup failed, do it the slow way */ @@ -1875,12 +1883,9 @@ static int walk_component(struct nameidata *nd, int flags) return PTR_ERR(path.dentry); path.mnt = nd->path.mnt; - err = follow_managed(&path, nd); + err = handle_mounts(&path, nd, &inode, &seq); if (unlikely(err < 0)) return err; - - seq = 0; /* we are already out of RCU mode */ - inode = d_backing_inode(path.dentry); } return step_into(nd, &path, flags, inode, seq); @@ -2365,11 +2370,9 @@ static int handle_lookup_down(struct nameidata *nd) return -ECHILD; } else { dget(path.dentry); - err = follow_managed(&path, nd); + err = handle_mounts(&path, nd, &inode, &seq); if (unlikely(err < 0)) return err; - inode = d_backing_inode(path.dentry); - seq = 0; } path_to_nameidata(&path, nd); nd->inode = inode; @@ -3392,12 +3395,9 @@ static int do_last(struct nameidata *nd, got_write = false; } - error = follow_managed(&path, nd); + error = handle_mounts(&path, nd, &inode, &seq); if (unlikely(error < 0)) return error; - - seq = 0; /* out of RCU mode, so the value doesn't matter */ - inode = d_backing_inode(path.dentry); finish_lookup: error = step_into(nd, &path, 0, inode, seq); if (unlikely(error)) |