xfs: implement batched inode lookups for AG walking
With the reclaim code separated from the generic walking code, it is simple to implement batched lookups for the generic walk code. Separate out the inode validation from the execute operations and modify the tree lookups to get a batch of inodes at a time. Reclaim operations will be optimised separately. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
Родитель
e13de955ca
Коммит
78ae525676
|
@ -39,6 +39,14 @@
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The inode lookup is done in batches to keep the amount of lock traffic and
|
||||||
|
* radix tree lookups to a minimum. The batch size is a trade off between
|
||||||
|
* lookup reduction and stack usage. This is in the reclaim path, so we can't
|
||||||
|
* be too greedy.
|
||||||
|
*/
|
||||||
|
#define XFS_LOOKUP_BATCH 32
|
||||||
|
|
||||||
STATIC int
|
STATIC int
|
||||||
xfs_inode_ag_walk_grab(
|
xfs_inode_ag_walk_grab(
|
||||||
struct xfs_inode *ip)
|
struct xfs_inode *ip)
|
||||||
|
@ -66,7 +74,6 @@ xfs_inode_ag_walk_grab(
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
STATIC int
|
STATIC int
|
||||||
xfs_inode_ag_walk(
|
xfs_inode_ag_walk(
|
||||||
struct xfs_mount *mp,
|
struct xfs_mount *mp,
|
||||||
|
@ -79,54 +86,69 @@ xfs_inode_ag_walk(
|
||||||
int last_error = 0;
|
int last_error = 0;
|
||||||
int skipped;
|
int skipped;
|
||||||
int done;
|
int done;
|
||||||
|
int nr_found;
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
done = 0;
|
done = 0;
|
||||||
skipped = 0;
|
skipped = 0;
|
||||||
first_index = 0;
|
first_index = 0;
|
||||||
|
nr_found = 0;
|
||||||
do {
|
do {
|
||||||
|
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
|
||||||
int error = 0;
|
int error = 0;
|
||||||
int nr_found;
|
int i;
|
||||||
xfs_inode_t *ip;
|
|
||||||
|
|
||||||
read_lock(&pag->pag_ici_lock);
|
read_lock(&pag->pag_ici_lock);
|
||||||
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
|
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
|
||||||
(void **)&ip, first_index, 1);
|
(void **)batch, first_index,
|
||||||
|
XFS_LOOKUP_BATCH);
|
||||||
if (!nr_found) {
|
if (!nr_found) {
|
||||||
read_unlock(&pag->pag_ici_lock);
|
read_unlock(&pag->pag_ici_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update the index for the next lookup. Catch overflows
|
* Grab the inodes before we drop the lock. if we found
|
||||||
* into the next AG range which can occur if we have inodes
|
* nothing, nr == 0 and the loop will be skipped.
|
||||||
* in the last block of the AG and we are currently
|
|
||||||
* pointing to the last inode.
|
|
||||||
*/
|
*/
|
||||||
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
for (i = 0; i < nr_found; i++) {
|
||||||
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
|
struct xfs_inode *ip = batch[i];
|
||||||
done = 1;
|
|
||||||
|
|
||||||
if (xfs_inode_ag_walk_grab(ip)) {
|
if (done || xfs_inode_ag_walk_grab(ip))
|
||||||
read_unlock(&pag->pag_ici_lock);
|
batch[i] = NULL;
|
||||||
continue;
|
|
||||||
|
/*
|
||||||
|
* Update the index for the next lookup. Catch overflows
|
||||||
|
* into the next AG range which can occur if we have inodes
|
||||||
|
* in the last block of the AG and we are currently
|
||||||
|
* pointing to the last inode.
|
||||||
|
*/
|
||||||
|
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
||||||
|
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
|
||||||
|
done = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* unlock now we've grabbed the inodes. */
|
||||||
read_unlock(&pag->pag_ici_lock);
|
read_unlock(&pag->pag_ici_lock);
|
||||||
|
|
||||||
error = execute(ip, pag, flags);
|
for (i = 0; i < nr_found; i++) {
|
||||||
IRELE(ip);
|
if (!batch[i])
|
||||||
if (error == EAGAIN) {
|
continue;
|
||||||
skipped++;
|
error = execute(batch[i], pag, flags);
|
||||||
continue;
|
IRELE(batch[i]);
|
||||||
|
if (error == EAGAIN) {
|
||||||
|
skipped++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (error && last_error != EFSCORRUPTED)
|
||||||
|
last_error = error;
|
||||||
}
|
}
|
||||||
if (error)
|
|
||||||
last_error = error;
|
|
||||||
|
|
||||||
/* bail out if the filesystem is corrupted. */
|
/* bail out if the filesystem is corrupted. */
|
||||||
if (error == EFSCORRUPTED)
|
if (error == EFSCORRUPTED)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
} while (!done);
|
} while (nr_found && !done);
|
||||||
|
|
||||||
if (skipped) {
|
if (skipped) {
|
||||||
delay(1);
|
delay(1);
|
||||||
|
|
|
@ -47,7 +47,7 @@ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
|
||||||
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
|
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
|
||||||
struct xfs_inode *ip);
|
struct xfs_inode *ip);
|
||||||
|
|
||||||
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
|
int xfs_sync_inode_grab(struct xfs_inode *ip);
|
||||||
int xfs_inode_ag_iterator(struct xfs_mount *mp,
|
int xfs_inode_ag_iterator(struct xfs_mount *mp,
|
||||||
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
|
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
|
||||||
int flags);
|
int flags);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче