summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-04-23 15:58:33 +1000
committerBen Myers <bpm@sgi.com>2012-05-14 16:20:26 -0500
commit1c30462542bac8abffb4823638b6b1659c1cfcf5 (patch)
treeb86f1fc14eb0f41e224833128ecc81163227d044 /fs/xfs/xfs_log.c
parent32ce90a4b79155a155de2b284d8b69023e5e8fea (diff)
downloadlwn-1c30462542bac8abffb4823638b6b1659c1cfcf5.tar.gz
lwn-1c30462542bac8abffb4823638b6b1659c1cfcf5.zip
xfs: allow assigning the tail lsn with the AIL lock held
Provide a variant of xlog_assign_tail_lsn that has the AIL lock already held. By doing so we do an additional atomic_read + atomic_set under the lock, which comes down to two instructions. Switch xfs_trans_ail_update_bulk and xfs_trans_ail_delete_bulk to the new version to reduce the number of lock roundtrips, and prepare for a new addition that would require a third lock roundtrip in xfs_trans_ail_delete_bulk. This addition is also the reason for slightly rearranging the conditionals and relying on xfs_log_space_wake for checking that the filesystem has been shut down internally. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6db1fef38bff..418d5d7bc52b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -916,27 +916,42 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
-xlog_assign_tail_lsn(
+xlog_assign_tail_lsn_locked(
struct xfs_mount *mp)
{
- xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
+ struct xfs_log_item *lip;
+ xfs_lsn_t tail_lsn;
+
+ assert_spin_locked(&mp->m_ail->xa_lock);
/*
* To make sure we always have a valid LSN for the log tail we keep
* track of the last LSN which was committed in log->l_last_sync_lsn,
- * and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
- *
- * If the AIL has been emptied we also need to wake any process
- * waiting for this condition.
+ * and use that when the AIL was empty.
*/
- tail_lsn = xfs_ail_min_lsn(mp->m_ail);
- if (!tail_lsn)
+ lip = xfs_ail_min(mp->m_ail);
+ if (lip)
+ tail_lsn = lip->li_lsn;
+ else
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn;
}
+xfs_lsn_t
+xlog_assign_tail_lsn(
+ struct xfs_mount *mp)
+{
+ xfs_lsn_t tail_lsn;
+
+ spin_lock(&mp->m_ail->xa_lock);
+ tail_lsn = xlog_assign_tail_lsn_locked(mp);
+ spin_unlock(&mp->m_ail->xa_lock);
+
+ return tail_lsn;
+}
+
/*
* Return the space in the log between the tail and the head. The head
* is passed in the cycle/bytes formal parms. In the special case where