summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_inode_item.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2020-06-29 14:49:18 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2020-07-07 07:15:08 -0700
commit48d55e2ae3ce837598c073995bbbac5d24a35fe1 (patch)
treeb5d1923900858dd3e77b725bae8d4a0f666fcc5f /fs/xfs/xfs_inode_item.c
parent71e3e35646861f2f9b8d36e00720904ed3ca31cb (diff)
downloadlwn-48d55e2ae3ce837598c073995bbbac5d24a35fe1.tar.gz
lwn-48d55e2ae3ce837598c073995bbbac5d24a35fe1.zip
xfs: attach inodes to the cluster buffer when dirtied
Rather than attach inodes to the cluster buffer just when we are doing IO, attach the inodes to the cluster buffer when they are dirtied. The means the buffer always carries a list of dirty inodes that reference it, and we can use that list to make more fundamental changes to inode writeback that aren't otherwise possible. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/xfs_inode_item.c')
-rw-r--r--fs/xfs/xfs_inode_item.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 64bdda72f7b2..697248b7eb2b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -660,6 +660,10 @@ xfs_inode_item_destroy(
* list for other inodes that will run this function. We remove them from the
* buffer list so we can process all the inode IO completions in one AIL lock
* traversal.
+ *
+ * Note: Now that we attach the log item to the buffer when we first log the
+ * inode in memory, we can have unflushed inodes on the buffer list here. These
+ * inodes will have a zero ili_last_fields, so skip over them here.
*/
void
xfs_iflush_done(
@@ -677,12 +681,15 @@ xfs_iflush_done(
*/
list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
iip = INODE_ITEM(lip);
+
if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) {
- list_del_init(&lip->li_bio_list);
xfs_iflush_abort(iip->ili_inode);
continue;
}
+ if (!iip->ili_last_fields)
+ continue;
+
list_move_tail(&lip->li_bio_list, &tmp);
/* Do an unlocked check for needing the AIL lock. */
@@ -728,12 +735,16 @@ xfs_iflush_done(
/*
* Remove the reference to the cluster buffer if the inode is
* clean in memory. Drop the buffer reference once we've dropped
- * the locks we hold.
+ * the locks we hold. If the inode is dirty in memory, we need
+ * to put the inode item back on the buffer list for another
+ * pass through the flush machinery.
*/
ASSERT(iip->ili_item.li_buf == bp);
if (!iip->ili_fields) {
iip->ili_item.li_buf = NULL;
drop_buffer = true;
+ } else {
+ list_add(&lip->li_bio_list, &bp->b_li_list);
}
iip->ili_last_fields = 0;
iip->ili_flush_lsn = 0;
@@ -777,6 +788,7 @@ xfs_iflush_abort(
iip->ili_flush_lsn = 0;
bp = iip->ili_item.li_buf;
iip->ili_item.li_buf = NULL;
+ list_del_init(&iip->ili_item.li_bio_list);
spin_unlock(&iip->ili_lock);
}
xfs_ifunlock(ip);