xfs: allow assigning the tail lsn with the AIL lock held

Provide a variant of xlog_assign_tail_lsn that has the AIL lock already
held.  By doing so we do an additional atomic_read + atomic_set under
the lock, which comes down to two instructions.

Switch xfs_trans_ail_update_bulk and xfs_trans_ail_delete_bulk to the
new version to reduce the number of lock roundtrips, and prepare for
a new addition that would require a third lock roundtrip in
xfs_trans_ail_delete_bulk.  This addition is also the reason for
slightly rearranging the conditionals and relying on xfs_log_space_wake
for checking that the filesystem has been shut down internally.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
This commit is contained in:
Christoph Hellwig 2012-04-23 15:58:33 +10:00 коммит произвёл Ben Myers
Родитель 32ce90a4b7
Коммит 1c30462542
4 изменённых файлов: 40 добавлений и 15 удалений

Просмотреть файл

@ -916,27 +916,42 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
xlog_assign_tail_lsn(
xlog_assign_tail_lsn_locked(
struct xfs_mount *mp)
{
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
struct xfs_log_item *lip;
xfs_lsn_t tail_lsn;
assert_spin_locked(&mp->m_ail->xa_lock);
/*
* To make sure we always have a valid LSN for the log tail we keep
* track of the last LSN which was committed in log->l_last_sync_lsn,
* and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
*
* If the AIL has been emptied we also need to wake any process
* waiting for this condition.
* and use that when the AIL was empty.
*/
tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn)
lip = xfs_ail_min(mp->m_ail);
if (lip)
tail_lsn = lip->li_lsn;
else
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn;
}
xfs_lsn_t
xlog_assign_tail_lsn(
struct xfs_mount *mp)
{
xfs_lsn_t tail_lsn;
spin_lock(&mp->m_ail->xa_lock);
tail_lsn = xlog_assign_tail_lsn_locked(mp);
spin_unlock(&mp->m_ail->xa_lock);
return tail_lsn;
}
/*
* Return the space in the log between the tail and the head. The head
* is passed in the cycle/bytes formal parms. In the special case where

Просмотреть файл

@ -152,6 +152,7 @@ int xfs_log_mount(struct xfs_mount *mp,
int num_bblocks);
int xfs_log_mount_finish(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
int xfs_log_notify(struct xfs_mount *mp,
struct xlog_in_core *iclog,

Просмотреть файл

@ -79,7 +79,7 @@ xfs_ail_check(
* Return a pointer to the first item in the AIL. If the AIL is empty, then
* return NULL.
*/
static xfs_log_item_t *
xfs_log_item_t *
xfs_ail_min(
struct xfs_ail *ailp)
{
@ -667,11 +667,15 @@ xfs_trans_ail_update_bulk(
if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
spin_unlock(&ailp->xa_lock);
if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
xlog_assign_tail_lsn(ailp->xa_mount);
if (mlip_changed) {
if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount);
spin_unlock(&ailp->xa_lock);
xfs_log_space_wake(ailp->xa_mount);
} else {
spin_unlock(&ailp->xa_lock);
}
}
@ -729,11 +733,15 @@ xfs_trans_ail_delete_bulk(
if (mlip == lip)
mlip_changed = 1;
}
spin_unlock(&ailp->xa_lock);
if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
xlog_assign_tail_lsn(ailp->xa_mount);
if (mlip_changed) {
if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
xlog_assign_tail_lsn_locked(ailp->xa_mount);
spin_unlock(&ailp->xa_lock);
xfs_log_space_wake(ailp->xa_mount);
} else {
spin_unlock(&ailp->xa_lock);
}
}

Просмотреть файл

@ -102,6 +102,7 @@ xfs_trans_ail_delete(
void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
void xfs_ail_push_all(struct xfs_ail *);
struct xfs_log_item *xfs_ail_min(struct xfs_ail *ailp);
xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,