summaryrefslogtreecommitdiff
path: root/fs/netfs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2021-08-26 09:24:42 -0400
committerDavid Howells <dhowells@redhat.com>2022-03-18 09:24:00 +0000
commit663dfb65c3b3ea4b8e1944680352992d58f3aa22 (patch)
tree8a1b195eabea22f85dc59a42e8b700bd6c4efc65 /fs/netfs
parent5c88705e2aeaee5521b8586e68bef47aab359914 (diff)
downloadlwn-663dfb65c3b3ea4b8e1944680352992d58f3aa22.tar.gz
lwn-663dfb65c3b3ea4b8e1944680352992d58f3aa22.zip
netfs: Refactor arguments for netfs_alloc_read_request
Pass start and len to the rreq allocator. This should ensure that the fields are set so that ->init_request() can use them. Also add a parameter to indicates the origin of the request. Ceph can use this to tell whether to get caps. Changes ======= ver #3) - Change the author to me as Jeff feels that most of the patch is my changes now. ver #2) - Show the request origin in the netfs_rreq tracepoint. Signed-off-by: Jeff Layton <jlayton@kernel.org> Co-developed-by: David Howells <dhowells@redhat.com> Signed-off-by: David Howells <dhowells@redhat.com> cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/164622989020.3564931.17517006047854958747.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678208569.1200972.12153682697842916557.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692904155.2099075.14717645623034355995.stgit@warthog.procyon.org.uk/ # v3
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/internal.h7
-rw-r--r--fs/netfs/objects.c13
-rw-r--r--fs/netfs/read_helper.c23
3 files changed, 26 insertions, 17 deletions
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index a0b7d1bf9f3d..89837e904fa7 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -17,9 +17,12 @@
/*
* objects.c
*/
-struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops,
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ const struct netfs_request_ops *ops,
void *netfs_priv,
- struct file *file);
+ loff_t start, size_t len,
+ enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 39097893e847..986d7a9d25dd 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -11,17 +11,24 @@
/*
* Allocate an I/O request and initialise it.
*/
-struct netfs_io_request *netfs_alloc_request(
- const struct netfs_request_ops *ops, void *netfs_priv,
- struct file *file)
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ const struct netfs_request_ops *ops,
+ void *netfs_priv,
+ loff_t start, size_t len,
+ enum netfs_io_origin origin)
{
static atomic_t debug_ids;
struct netfs_io_request *rreq;
rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
if (rreq) {
+ rreq->start = start;
+ rreq->len = len;
+ rreq->origin = origin;
rreq->netfs_ops = ops;
rreq->netfs_priv = netfs_priv;
+ rreq->mapping = mapping;
rreq->inode = file_inode(file);
rreq->i_size = i_size_read(rreq->inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 8f277da487b6..dea085715286 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -763,12 +763,13 @@ void netfs_readahead(struct readahead_control *ractl,
if (readahead_count(ractl) == 0)
goto cleanup;
- rreq = netfs_alloc_request(ops, netfs_priv, ractl->file);
+ rreq = netfs_alloc_request(ractl->mapping, ractl->file,
+ ops, netfs_priv,
+ readahead_pos(ractl),
+ readahead_length(ractl),
+ NETFS_READAHEAD);
if (!rreq)
goto cleanup;
- rreq->mapping = ractl->mapping;
- rreq->start = readahead_pos(ractl);
- rreq->len = readahead_length(ractl);
if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
@@ -838,16 +839,15 @@ int netfs_readpage(struct file *file,
_enter("%lx", folio_index(folio));
- rreq = netfs_alloc_request(ops, netfs_priv, file);
+ rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv,
+ folio_file_pos(folio), folio_size(folio),
+ NETFS_READPAGE);
if (!rreq) {
if (netfs_priv)
ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio);
return -ENOMEM;
}
- rreq->mapping = folio_file_mapping(folio);
- rreq->start = folio_file_pos(folio);
- rreq->len = folio_size(folio);
if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
@@ -1008,12 +1008,11 @@ retry:
}
ret = -ENOMEM;
- rreq = netfs_alloc_request(ops, netfs_priv, file);
+ rreq = netfs_alloc_request(mapping, file, ops, netfs_priv,
+ folio_file_pos(folio), folio_size(folio),
+ NETFS_READ_FOR_WRITE);
if (!rreq)
goto error;
- rreq->mapping = folio_file_mapping(folio);
- rreq->start = folio_file_pos(folio);
- rreq->len = folio_size(folio);
rreq->no_unlock_folio = folio_index(folio);
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
netfs_priv = NULL;