summaryrefslogtreecommitdiff
path: root/fs/afs/security.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2020-04-30 01:03:49 +0100
committerDavid Howells <dhowells@redhat.com>2020-06-04 15:37:57 +0100
commit20325960f8750165964a6891a733e4cc15d19076 (patch)
treed372bb21037626d2fac40b9a1e6e6bea21786b8e /fs/afs/security.c
parentcca37d45d547434144409ae648a19b7eb6db5eb4 (diff)
downloadlwn-20325960f8750165964a6891a733e4cc15d19076.tar.gz
lwn-20325960f8750165964a6891a733e4cc15d19076.zip
afs: Reorganise volume and server trees to be rooted on the cell
Reorganise afs_volume objects such that they're in a tree keyed on volume ID, rooted at on an afs_cell object rather than being in multiple trees, each of which is rooted on an afs_server object. afs_server structs become per-cell and acquire a pointer to the cell. The process of breaking a callback then starts with finding the server by its network address, following that to the cell and then looking up each volume ID in the volume tree. This is simpler than the afs_vol_interest/afs_cb_interest N:M mapping web and allows those structs and the code for maintaining them to be simplified or removed. It does make a couple of things a bit more tricky, though: (1) Operations now start with a volume, not a server, so there can be more than one answer as to whether or not the server we'll end up using supports the FS.InlineBulkStatus RPC. (2) CB RPC operations that specify the server UUID. There's still a tree of servers by UUID on the afs_net struct, but the UUIDs in it aren't guaranteed unique. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/afs/security.c')
-rw-r--r--fs/afs/security.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/fs/afs/security.c b/fs/afs/security.c
index ce9de1e6742b..90d852704328 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -170,8 +170,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
break;
}
- if (afs_cb_is_broken(cb_break, vnode,
- rcu_dereference(vnode->cb_interest))) {
+ if (afs_cb_is_broken(cb_break, vnode)) {
changed = true;
break;
}
@@ -201,7 +200,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
}
- if (afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)))
+ if (afs_cb_is_broken(cb_break, vnode))
goto someone_else_changed_it;
/* We need a ref on any permits list we want to copy as we'll have to
@@ -281,8 +280,7 @@ found:
rcu_read_lock();
spin_lock(&vnode->lock);
zap = rcu_access_pointer(vnode->permit_cache);
- if (!afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)) &&
- zap == permits)
+ if (!afs_cb_is_broken(cb_break, vnode) && zap == permits)
rcu_assign_pointer(vnode->permit_cache, replacement);
else
zap = replacement;