diff options
author | Christoph Hellwig <hch@lst.de> | 2019-10-19 09:09:47 -0700 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2019-10-21 09:04:58 -0700 |
commit | 5c5b6f7585d272a2fccf4ccf9b85251f6fbeb124 (patch) | |
tree | d4eefce86eaaac71ecf36ec3e13481688edbea99 /fs/xfs/xfs_iomap.c | |
parent | 12dfb58af61da826839f50cf8cd089150c478880 (diff) | |
download | lwn-5c5b6f7585d272a2fccf4ccf9b85251f6fbeb124.tar.gz lwn-5c5b6f7585d272a2fccf4ccf9b85251f6fbeb124.zip |
xfs: cleanup xfs_direct_write_iomap_begin
Move more checks into the helpers that determine if we need a COW
operation or allocation and split the return path for when an existing
data for allocation has been found versus a new allocation.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/xfs_iomap.c')
-rw-r--r-- | fs/xfs/xfs_iomap.c | 88 |
1 files changed, 46 insertions, 42 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index b6e17594d10a..6b429bfd5bb8 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -642,23 +642,42 @@ error_on_bmapi_transaction: static inline bool imap_needs_alloc( struct inode *inode, + unsigned flags, struct xfs_bmbt_irec *imap, int nimaps) { - return !nimaps || - imap->br_startblock == HOLESTARTBLOCK || - imap->br_startblock == DELAYSTARTBLOCK || - (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); + /* don't allocate blocks when just zeroing */ + if (flags & IOMAP_ZERO) + return false; + if (!nimaps || + imap->br_startblock == HOLESTARTBLOCK || + imap->br_startblock == DELAYSTARTBLOCK) + return true; + /* we convert unwritten extents before copying the data for DAX */ + if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN) + return true; + return false; } static inline bool -needs_cow_for_zeroing( +imap_needs_cow( + struct xfs_inode *ip, + unsigned int flags, struct xfs_bmbt_irec *imap, int nimaps) { - return nimaps && - imap->br_startblock != HOLESTARTBLOCK && - imap->br_state != XFS_EXT_UNWRITTEN; + if (!xfs_is_cow_inode(ip)) + return false; + + /* when zeroing we don't have to COW holes or unwritten extents */ + if (flags & IOMAP_ZERO) { + if (!nimaps || + imap->br_startblock == HOLESTARTBLOCK || + imap->br_state == XFS_EXT_UNWRITTEN) + return false; + } + + return true; } static int @@ -743,6 +762,14 @@ xfs_direct_write_iomap_begin( return -EIO; /* + * Writes that span EOF might trigger an IO size update on completion, + * so consider them to be dirty for the purposes of O_DSYNC even if + * there is no other metadata changes pending or have been made here. + */ + if (offset + length > i_size_read(inode)) + iomap_flags |= IOMAP_F_DIRTY; + + /* * Lock the inode in the manner required for the specified operation and * check for as many conditions that would result in blocking as * possible. This removes most of the non-blocking checks from the @@ -761,12 +788,7 @@ xfs_direct_write_iomap_begin( * Break shared extents if necessary. Checks for non-blocking IO have * been done up front, so we don't need to do them here. */ - if (xfs_is_cow_inode(ip)) { - /* if zeroing doesn't need COW allocation, then we are done. */ - if ((flags & IOMAP_ZERO) && - !needs_cow_for_zeroing(&imap, nimaps)) - goto out_found; - + if (imap_needs_cow(ip, flags, &imap, nimaps)) { /* may drop and re-acquire the ilock */ error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, &lockmode, flags & IOMAP_DIRECT); @@ -778,18 +800,17 @@ xfs_direct_write_iomap_begin( length = XFS_FSB_TO_B(mp, end_fsb) - offset; } - /* Don't need to allocate over holes when doing zeroing operations. */ - if (flags & IOMAP_ZERO) - goto out_found; + if (imap_needs_alloc(inode, flags, &imap, nimaps)) + goto allocate_blocks; - if (!imap_needs_alloc(inode, &imap, nimaps)) - goto out_found; + xfs_iunlock(ip, lockmode); + trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); + return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); - /* If nowait is set bail since we are going to make allocations. */ - if (flags & IOMAP_NOWAIT) { - error = -EAGAIN; +allocate_blocks: + error = -EAGAIN; + if (flags & IOMAP_NOWAIT) goto out_unlock; - } /* * We cap the maximum length we map to a sane size to keep the chunks @@ -808,29 +829,12 @@ xfs_direct_write_iomap_begin( */ if (lockmode == XFS_ILOCK_EXCL) xfs_ilock_demote(ip, lockmode); - error = xfs_iomap_write_direct(ip, offset, length, &imap, - nimaps); + error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps); if (error) return error; - iomap_flags |= IOMAP_F_NEW; trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); - -out_finish: - /* - * Writes that span EOF might trigger an IO size update on completion, - * so consider them to be dirty for the purposes of O_DSYNC even if - * there is no other metadata changes pending or have been made here. - */ - if (offset + length > i_size_read(inode)) - iomap_flags |= IOMAP_F_DIRTY; - return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); - -out_found: - ASSERT(nimaps); - xfs_iunlock(ip, lockmode); - trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); - goto out_finish; + return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW); out_found_cow: xfs_iunlock(ip, lockmode); |