diff options
author | Jeff Layton <jlayton@primarydata.com> | 2014-07-16 10:31:57 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2014-07-16 21:06:12 -0400 |
commit | 02e1215f9f72ad8c087e21a5701bea0ac18fafd4 (patch) | |
tree | 26b317c316a0d37e04ac1d81116d61cfe7138f3e /fs/nfsd/nfs4state.c | |
parent | e8051c837bd96ad1eabdd46504363431dc5fddc5 (diff) | |
download | lwn-02e1215f9f72ad8c087e21a5701bea0ac18fafd4.tar.gz lwn-02e1215f9f72ad8c087e21a5701bea0ac18fafd4.zip |
nfsd: Avoid taking state_lock while holding inode lock in nfsd_break_one_deleg
state_lock is a heavily contended global lock. We don't want to grab
that while simultaneously holding the inode->i_lock.
Add a new per-nfs4_file lock that we can use to protect the
per-nfs4_file delegation list. Hold that while walking the list in the
break_deleg callback and queue the workqueue job for each one.
The workqueue job can then take the state_lock and do the list
manipulations without the i_lock being held prior to starting the
rpc call.
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Jeff Layton <jlayton@primarydata.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r-- | fs/nfsd/nfs4state.c | 58 |
1 files changed, 38 insertions, 20 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 56ea4f12803e..bdf8ac3393bd 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -254,6 +254,8 @@ static void nfsd4_free_file(struct nfs4_file *f) static inline void put_nfs4_file(struct nfs4_file *fi) { + might_lock(&state_lock); + if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { hlist_del(&fi->fi_hash); spin_unlock(&state_lock); @@ -554,6 +556,8 @@ static void block_delegations(struct knfsd_fh *fh) u32 hash; struct bloom_pair *bd = &blocked_delegations; + lockdep_assert_held(&state_lock); + hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); __set_bit(hash&255, bd->set[bd->new]); @@ -592,7 +596,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); dp->dl_time = 0; atomic_set(&dp->dl_count, 1); - INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); + INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall); return dp; } @@ -640,7 +644,9 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) lockdep_assert_held(&state_lock); dp->dl_stid.sc_type = NFS4_DELEG_STID; + spin_lock(&fp->fi_lock); list_add(&dp->dl_perfile, &fp->fi_delegations); + spin_unlock(&fp->fi_lock); list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); } @@ -648,14 +654,18 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) static void unhash_delegation(struct nfs4_delegation *dp) { + struct nfs4_file *fp = dp->dl_file; + spin_lock(&state_lock); list_del_init(&dp->dl_perclnt); - list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_recall_lru); + spin_lock(&fp->fi_lock); + list_del_init(&dp->dl_perfile); + spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); - if (dp->dl_file) { - nfs4_put_deleg_lease(dp->dl_file); - put_nfs4_file(dp->dl_file); + if (fp) { + nfs4_put_deleg_lease(fp); + put_nfs4_file(fp); dp->dl_file = NULL; } } @@ -1677,7 +1687,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, spin_unlock(&nn->client_lock); return NULL; } - INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); + INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null); clp->cl_time = get_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); copy_verf(clp, verf); @@ -3079,30 +3089,38 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) return ret; } -static void nfsd_break_one_deleg(struct nfs4_delegation *dp) +void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp) { struct nfs4_client *clp = dp->dl_stid.sc_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); - lockdep_assert_held(&state_lock); - /* We're assuming the state code never drops its reference - * without first removing the lease. Since we're in this lease - * callback (and since the lease code is serialized by the kernel - * lock) we know the server hasn't removed the lease yet, we know - * it's safe to take a reference: */ - atomic_inc(&dp->dl_count); - + /* + * We can't do this in nfsd_break_deleg_cb because it is + * already holding inode->i_lock + */ + spin_lock(&state_lock); + block_delegations(&dp->dl_fh); /* * If the dl_time != 0, then we know that it has already been * queued for a lease break. Don't queue it again. */ if (dp->dl_time == 0) { - list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); dp->dl_time = get_seconds(); + list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); } + spin_unlock(&state_lock); +} - block_delegations(&dp->dl_fh); - +static void nfsd_break_one_deleg(struct nfs4_delegation *dp) +{ + /* + * We're assuming the state code never drops its reference + * without first removing the lease. Since we're in this lease + * callback (and since the lease code is serialized by the kernel + * lock) we know the server hasn't removed the lease yet, we know + * it's safe to take a reference. + */ + atomic_inc(&dp->dl_count); nfsd4_cb_recall(dp); } @@ -3127,11 +3145,11 @@ static void nfsd_break_deleg_cb(struct file_lock *fl) */ fl->fl_break_time = 0; - spin_lock(&state_lock); fp->fi_had_conflict = true; + spin_lock(&fp->fi_lock); list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) nfsd_break_one_deleg(dp); - spin_unlock(&state_lock); + spin_unlock(&fp->fi_lock); } static |