diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2021-05-30 22:53:43 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2021-06-02 17:50:38 -0400 |
commit | 9067931236651c8bde847d17a2f862d052e672b7 (patch) | |
tree | 0cc421fee7d9cd6ad32a4d46ebf3b8d5395104f1 /fs/ntfs | |
parent | 6efb943b8616ec53a5e444193dccf1af9ad627b5 (diff) | |
download | lwn-9067931236651c8bde847d17a2f862d052e672b7.tar.gz lwn-9067931236651c8bde847d17a2f862d052e672b7.zip |
ntfs_copy_from_user_iter(): don't bother with copying iov_iter
Advance the original, let the caller revert if it needs to.
Don't mess with iov_iter_single_seg_count() in the caller -
if we got a (non-zero) short copy, use the amount actually
copied for the next pass, limit it to "up to the end
of page" if nothing got copied at all.
Originally fault-in only read the first iovec; back then it used
to make sense to limit to the just one iovec for the pass after
short copy. These days it's no long true.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/ntfs')
-rw-r--r-- | fs/ntfs/file.c | 33 |
1 files changed, 11 insertions, 22 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index e5aab265dff1..0666d4578137 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1684,20 +1684,19 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, { struct page **last_page = pages + nr_pages; size_t total = 0; - struct iov_iter data = *i; unsigned len, copied; do { len = PAGE_SIZE - ofs; if (len > bytes) len = bytes; - copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, + copied = iov_iter_copy_from_user_atomic(*pages, i, ofs, len); + iov_iter_advance(i, copied); total += copied; bytes -= copied; if (!bytes) break; - iov_iter_advance(&data, copied); if (copied < len) goto err; ofs = 0; @@ -1866,34 +1865,24 @@ again: if (likely(copied == bytes)) { status = ntfs_commit_pages_after_write(pages, do_pages, pos, bytes); - if (!status) - status = bytes; } do { unlock_page(pages[--do_pages]); put_page(pages[do_pages]); } while (do_pages); - if (unlikely(status < 0)) + if (unlikely(status < 0)) { + iov_iter_revert(i, copied); break; - copied = status; + } cond_resched(); - if (unlikely(!copied)) { - size_t sc; - - /* - * We failed to copy anything. Fall back to single - * segment length write. - * - * This is needed to avoid possible livelock in the - * case that all segments in the iov cannot be copied - * at once without a pagefault. - */ - sc = iov_iter_single_seg_count(i); - if (bytes > sc) - bytes = sc; + if (unlikely(copied < bytes)) { + iov_iter_revert(i, copied); + if (copied) + bytes = copied; + else if (bytes > PAGE_SIZE - ofs) + bytes = PAGE_SIZE - ofs; goto again; } - iov_iter_advance(i, copied); pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); |