diff options
author | David Howells <dhowells@redhat.com> | 2024-10-04 15:33:58 +0100 |
---|---|---|
committer | Christian Brauner <brauner@kernel.org> | 2024-10-07 13:48:22 +0200 |
commit | 796a4049640b54cb1daf9e7fe543292c5ca02c74 (patch) | |
tree | 23297c8976b96683ebb392a1bfaffe7f4f4ab374 | |
parent | 8cf0b93919e13d1e8d4466eb4080a4c4d9d66d7b (diff) | |
download | lwn-796a4049640b54cb1daf9e7fe543292c5ca02c74.tar.gz lwn-796a4049640b54cb1daf9e7fe543292c5ca02c74.zip |
netfs: In readahead, put the folio refs as soon extracted
netfslib currently defers dropping the ref on the folios it obtains during
readahead to after it has started I/O on the basis that we can do it whilst
we wait for the I/O to complete, but this runs the risk of the I/O
collection racing with this in future.
Furthermore, Matthew Wilcox strongly suggests that the refs should be
dropped immediately, as readahead_folio() does (netfslib is using
__readahead_batch() which doesn't drop the refs).
Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/3771538.1728052438@warthog.procyon.org.uk
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r-- | fs/netfs/buffered_read.c | 47 | ||||
-rw-r--r-- | fs/netfs/read_collect.c | 2 | ||||
-rw-r--r-- | include/trace/events/netfs.h | 1 |
3 files changed, 16 insertions, 34 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index c40e226053cc..af46a598f4d7 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -67,7 +67,8 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in * Decant the list of folios to read into a rolling buffer. */ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, - struct folio_queue *folioq) + struct folio_queue *folioq, + struct folio_batch *put_batch) { unsigned int order, nr; size_t size = 0; @@ -82,6 +83,9 @@ static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, order = folio_order(folio); folioq->orders[i] = order; size += PAGE_SIZE << order; + + if (!folio_batch_add(put_batch, folio)) + folio_batch_release(put_batch); } for (int i = nr; i < folioq_nr_slots(folioq); i++) @@ -120,6 +124,9 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) * that we will need to release later - but we don't want to do * that until after we've started the I/O. */ + struct folio_batch put_batch; + + folio_batch_init(&put_batch); while (rreq->submitted < subreq->start + rsize) { struct folio_queue *tail = rreq->buffer_tail, *new; size_t added; @@ -132,10 +139,11 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) new->prev = tail; tail->next = new; rreq->buffer_tail = new; - added = netfs_load_buffer_from_ra(rreq, new); + added = netfs_load_buffer_from_ra(rreq, new, &put_batch); rreq->iter.count += added; rreq->submitted += added; } + folio_batch_release(&put_batch); } subreq->len = rsize; @@ -348,6 +356,7 @@ static int netfs_wait_for_read(struct netfs_io_request *rreq) static int netfs_prime_buffer(struct netfs_io_request *rreq) { struct folio_queue *folioq; + struct folio_batch put_batch; size_t added; folioq = kmalloc(sizeof(*folioq), GFP_KERNEL); @@ -360,39 +369,14 @@ static int netfs_prime_buffer(struct netfs_io_request *rreq) rreq->submitted = rreq->start; iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0); - added = netfs_load_buffer_from_ra(rreq, folioq); + folio_batch_init(&put_batch); + added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch); + folio_batch_release(&put_batch); rreq->iter.count += added; rreq->submitted += added; return 0; } -/* - * Drop the ref on each folio that we inherited from the VM readahead code. We - * still have the folio locks to pin the page until we complete the I/O. - * - * Note that we can't just release the batch in each queue struct as we use the - * occupancy count in other places. - */ -static void netfs_put_ra_refs(struct folio_queue *folioq) -{ - struct folio_batch fbatch; - - folio_batch_init(&fbatch); - while (folioq) { - for (unsigned int slot = 0; slot < folioq_count(folioq); slot++) { - struct folio *folio = folioq_folio(folioq, slot); - if (!folio) - continue; - trace_netfs_folio(folio, netfs_folio_trace_read_put); - if (!folio_batch_add(&fbatch, folio)) - folio_batch_release(&fbatch); - } - folioq = folioq->next; - } - - folio_batch_release(&fbatch); -} - /** * netfs_readahead - Helper to manage a read request * @ractl: The description of the readahead request @@ -436,9 +420,6 @@ void netfs_readahead(struct readahead_control *ractl) goto cleanup_free; netfs_read_to_pagecache(rreq); - /* Release the folio refs whilst we're waiting for the I/O. */ - netfs_put_ra_refs(rreq->buffer); - netfs_put_request(rreq, true, netfs_rreq_trace_put_return); return; diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c index b18c65ba5580..3cbb289535a8 100644 --- a/fs/netfs/read_collect.c +++ b/fs/netfs/read_collect.c @@ -77,6 +77,8 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq, folio_unlock(folio); } } + + folioq_clear(folioq, slot); } /* diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 1d7c52821e55..69975c9c6823 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -172,7 +172,6 @@ EM(netfs_folio_trace_read, "read") \ EM(netfs_folio_trace_read_done, "read-done") \ EM(netfs_folio_trace_read_gaps, "read-gaps") \ - EM(netfs_folio_trace_read_put, "read-put") \ EM(netfs_folio_trace_read_unlock, "read-unlock") \ EM(netfs_folio_trace_redirtied, "redirtied") \ EM(netfs_folio_trace_store, "store") \ |