summaryrefslogtreecommitdiff
path: root/fs/ceph/inode.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2017-02-01 13:49:09 -0500
committerIlya Dryomov <idryomov@gmail.com>2017-02-20 12:16:08 +0100
commitbc2de10dc4da5036ada3381775bd966f0c21c603 (patch)
tree5da0f735f7d5c87b23f7fef5e8fd135a39be12f7 /fs/ceph/inode.c
parentf5a03b080450784e671998921feb62fd3846c953 (diff)
downloadlwn-bc2de10dc4da5036ada3381775bd966f0c21c603.tar.gz
lwn-bc2de10dc4da5036ada3381775bd966f0c21c603.zip
ceph: convert bools in ceph_mds_request to a new r_req_flags field
Currently, we have a bunch of bool flags in struct ceph_mds_request. We need more flags though, but each bool takes (at least) a byte. Those add up over time. Merge all of the existing bools in this struct into a single unsigned long, and use the set/test/clear_bit macros to manipulate them. These are atomic operations, but that is required here to prevent load/modify/store races. The existing flags are protected by different locks, so we can't rely on them for that purpose. Signed-off-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Yan, Zheng <zyan@redhat.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'fs/ceph/inode.c')
-rw-r--r--fs/ceph/inode.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index b18462c64cdd..ebfb156aba89 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1199,8 +1199,8 @@ retry_lookup:
err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
session, req->r_request_started,
- (!req->r_aborted && rinfo->head->result == 0) ?
- req->r_fmode : -1,
+ (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
+ rinfo->head->result == 0) ? req->r_fmode : -1,
&req->r_caps_reservation);
if (err < 0) {
pr_err("fill_inode badness %p %llx.%llx\n",
@@ -1213,8 +1213,8 @@ retry_lookup:
* ignore null lease/binding on snapdir ENOENT, or else we
* will have trouble splicing in the virtual snapdir later
*/
- if (rinfo->head->is_dentry && !req->r_aborted &&
- req->r_locked_dir &&
+ if (rinfo->head->is_dentry && req->r_locked_dir &&
+ !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
(rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
fsc->mount_options->snapdir_name,
req->r_dentry->d_name.len))) {
@@ -1317,9 +1317,9 @@ retry_lookup:
update_dentry_lease(dn, rinfo->dlease, session,
req->r_request_started);
dout(" final dn %p\n", dn);
- } else if (!req->r_aborted &&
- (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
- req->r_op == CEPH_MDS_OP_MKSNAP)) {
+ } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+ req->r_op == CEPH_MDS_OP_MKSNAP) &&
+ !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
struct dentry *dn = req->r_dentry;
struct inode *dir = req->r_locked_dir;
@@ -1444,7 +1444,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
u32 fpos_offset;
struct ceph_readdir_cache_control cache_ctl = {};
- if (req->r_aborted)
+ if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
return readdir_prepopulate_inodes_only(req, session);
if (rinfo->hash_order && req->r_path2) {
@@ -1598,7 +1598,7 @@ next_item:
}
out:
if (err == 0 && skipped == 0) {
- req->r_did_prepopulate = true;
+ set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
req->r_readdir_cache_idx = cache_ctl.index;
}
ceph_readdir_cache_release(&cache_ctl);