2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* Copyright (c) 2013 Red Hat, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-23 03:51:50 +04:00
|
|
|
#include "xfs_format.h"
|
2013-10-23 03:50:10 +04:00
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2013-08-12 14:49:36 +04:00
|
|
|
#include "xfs_bit.h"
|
|
|
|
#include "xfs_mount.h"
|
2013-10-15 02:17:51 +04:00
|
|
|
#include "xfs_da_format.h"
|
2013-08-12 14:49:36 +04:00
|
|
|
#include "xfs_da_btree.h"
|
|
|
|
#include "xfs_inode.h"
|
2013-08-12 14:49:37 +04:00
|
|
|
#include "xfs_dir2.h"
|
2013-08-12 14:49:36 +04:00
|
|
|
#include "xfs_dir2_priv.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_trace.h"
|
|
|
|
#include "xfs_bmap.h"
|
2013-10-23 03:50:10 +04:00
|
|
|
#include "xfs_trans.h"
|
2013-08-12 14:49:36 +04:00
|
|
|
|
2013-08-12 14:50:09 +04:00
|
|
|
/*
|
|
|
|
* Directory file type support functions
|
|
|
|
*/
|
|
|
|
static unsigned char xfs_dir3_filetype_table[] = {
|
|
|
|
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK,
|
|
|
|
DT_FIFO, DT_SOCK, DT_LNK, DT_WHT,
|
|
|
|
};
|
|
|
|
|
2014-12-04 01:43:17 +03:00
|
|
|
static unsigned char
|
2013-08-12 14:50:09 +04:00
|
|
|
xfs_dir3_get_dtype(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
__uint8_t filetype)
|
|
|
|
{
|
|
|
|
if (!xfs_sb_version_hasftype(&mp->m_sb))
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
|
|
|
|
if (filetype >= XFS_DIR3_FT_MAX)
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
|
|
|
|
return xfs_dir3_filetype_table[filetype];
|
|
|
|
}
|
|
|
|
|
2013-08-12 14:49:36 +04:00
|
|
|
STATIC int
|
|
|
|
xfs_dir2_sf_getdents(
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_args *args,
|
2013-08-12 14:49:36 +04:00
|
|
|
struct dir_context *ctx)
|
|
|
|
{
|
|
|
|
int i; /* shortform entry number */
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_inode *dp = args->dp; /* incore directory inode */
|
2013-08-12 14:49:36 +04:00
|
|
|
xfs_dir2_dataptr_t off; /* current entry's offset */
|
|
|
|
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
|
|
|
|
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
|
|
|
|
xfs_dir2_dataptr_t dot_offset;
|
|
|
|
xfs_dir2_dataptr_t dotdot_offset;
|
|
|
|
xfs_ino_t ino;
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_geometry *geo = args->geo;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
|
|
|
/*
|
|
|
|
* Give up if the directory is way too short.
|
|
|
|
*/
|
|
|
|
if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
|
2014-06-06 09:20:32 +04:00
|
|
|
ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
|
2014-06-25 08:58:08 +04:00
|
|
|
return -EIO;
|
2013-08-12 14:49:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
|
|
|
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
|
|
|
|
|
|
|
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
|
|
|
|
2016-10-03 19:11:15 +03:00
|
|
|
if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
|
|
|
|
return -EFSCORRUPTED;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the block number in the offset is out of range, we're done.
|
|
|
|
*/
|
2014-06-06 09:11:18 +04:00
|
|
|
if (xfs_dir2_dataptr_to_db(geo, ctx->pos) > geo->datablk)
|
2013-08-12 14:49:36 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Precalculate offsets for . and .. as we will always need them.
|
|
|
|
*
|
|
|
|
* XXX(hch): the second argument is sometimes 0 and sometimes
|
2014-06-06 09:11:18 +04:00
|
|
|
* geo->datablk
|
2013-08-12 14:49:36 +04:00
|
|
|
*/
|
2014-06-06 09:11:18 +04:00
|
|
|
dot_offset = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
|
2013-10-30 02:15:02 +04:00
|
|
|
dp->d_ops->data_dot_offset);
|
2014-06-06 09:11:18 +04:00
|
|
|
dotdot_offset = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
|
2013-10-30 02:15:02 +04:00
|
|
|
dp->d_ops->data_dotdot_offset);
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Put . entry unless we're starting past it.
|
|
|
|
*/
|
|
|
|
if (ctx->pos <= dot_offset) {
|
|
|
|
ctx->pos = dot_offset & 0x7fffffff;
|
|
|
|
if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put .. entry unless we're starting past it.
|
|
|
|
*/
|
|
|
|
if (ctx->pos <= dotdot_offset) {
|
2013-10-29 15:11:47 +04:00
|
|
|
ino = dp->d_ops->sf_get_parent_ino(sfp);
|
2013-08-12 14:49:36 +04:00
|
|
|
ctx->pos = dotdot_offset & 0x7fffffff;
|
|
|
|
if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop while there are more entries and put'ing works.
|
|
|
|
*/
|
|
|
|
sfep = xfs_dir2_sf_firstentry(sfp);
|
|
|
|
for (i = 0; i < sfp->count; i++) {
|
2013-08-12 14:50:09 +04:00
|
|
|
__uint8_t filetype;
|
|
|
|
|
2014-06-06 09:11:18 +04:00
|
|
|
off = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
|
2013-08-12 14:49:36 +04:00
|
|
|
xfs_dir2_sf_get_offset(sfep));
|
|
|
|
|
|
|
|
if (ctx->pos > off) {
|
2013-10-29 15:11:46 +04:00
|
|
|
sfep = dp->d_ops->sf_nextentry(sfp, sfep);
|
2013-08-12 14:49:36 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-10-29 15:11:47 +04:00
|
|
|
ino = dp->d_ops->sf_get_ino(sfp, sfep);
|
|
|
|
filetype = dp->d_ops->sf_get_ftype(sfep);
|
2013-08-12 14:49:36 +04:00
|
|
|
ctx->pos = off & 0x7fffffff;
|
2013-08-12 14:50:09 +04:00
|
|
|
if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
|
2014-06-06 09:20:32 +04:00
|
|
|
xfs_dir3_get_dtype(dp->i_mount, filetype)))
|
2013-08-12 14:49:36 +04:00
|
|
|
return 0;
|
2013-10-29 15:11:46 +04:00
|
|
|
sfep = dp->d_ops->sf_nextentry(sfp, sfep);
|
2013-08-12 14:49:36 +04:00
|
|
|
}
|
|
|
|
|
2014-06-06 09:11:18 +04:00
|
|
|
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
|
2014-06-06 09:20:32 +04:00
|
|
|
0x7fffffff;
|
2013-08-12 14:49:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Readdir for block directories.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_dir2_block_getdents(
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_args *args,
|
2013-08-12 14:49:36 +04:00
|
|
|
struct dir_context *ctx)
|
|
|
|
{
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_inode *dp = args->dp; /* incore directory inode */
|
2013-08-12 14:49:36 +04:00
|
|
|
xfs_dir2_data_hdr_t *hdr; /* block header */
|
|
|
|
struct xfs_buf *bp; /* buffer for block */
|
|
|
|
xfs_dir2_block_tail_t *btp; /* block tail */
|
|
|
|
xfs_dir2_data_entry_t *dep; /* block data entry */
|
|
|
|
xfs_dir2_data_unused_t *dup; /* block unused entry */
|
|
|
|
char *endptr; /* end of the data entries */
|
|
|
|
int error; /* error return value */
|
|
|
|
char *ptr; /* current data entry */
|
|
|
|
int wantoff; /* starting block offset */
|
|
|
|
xfs_off_t cook;
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_geometry *geo = args->geo;
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
int lock_mode;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the block number in the offset is out of range, we're done.
|
|
|
|
*/
|
2014-06-06 09:11:18 +04:00
|
|
|
if (xfs_dir2_dataptr_to_db(geo, ctx->pos) > geo->datablk)
|
2013-08-12 14:49:36 +04:00
|
|
|
return 0;
|
|
|
|
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
lock_mode = xfs_ilock_data_map_shared(dp);
|
2013-08-12 14:49:36 +04:00
|
|
|
error = xfs_dir3_block_read(NULL, dp, &bp);
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
xfs_iunlock(dp, lock_mode);
|
2013-08-12 14:49:36 +04:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the byte offset we start at from the seek pointer.
|
|
|
|
* We'll skip entries before this.
|
|
|
|
*/
|
2014-06-06 09:08:18 +04:00
|
|
|
wantoff = xfs_dir2_dataptr_to_off(geo, ctx->pos);
|
2013-08-12 14:49:36 +04:00
|
|
|
hdr = bp->b_addr;
|
|
|
|
xfs_dir3_data_check(dp, bp);
|
|
|
|
/*
|
|
|
|
* Set up values for the loop.
|
|
|
|
*/
|
2014-06-06 09:15:59 +04:00
|
|
|
btp = xfs_dir2_block_tail_p(geo, hdr);
|
2013-10-29 15:11:49 +04:00
|
|
|
ptr = (char *)dp->d_ops->data_entry_p(hdr);
|
2013-08-12 14:49:36 +04:00
|
|
|
endptr = (char *)xfs_dir2_block_leaf_p(btp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop over the data portion of the block.
|
|
|
|
* Each object is a real entry (dep) or an unused one (dup).
|
|
|
|
*/
|
|
|
|
while (ptr < endptr) {
|
2013-08-12 14:50:09 +04:00
|
|
|
__uint8_t filetype;
|
|
|
|
|
2013-08-12 14:49:36 +04:00
|
|
|
dup = (xfs_dir2_data_unused_t *)ptr;
|
|
|
|
/*
|
|
|
|
* Unused, skip it.
|
|
|
|
*/
|
|
|
|
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
|
|
|
|
ptr += be16_to_cpu(dup->length);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
dep = (xfs_dir2_data_entry_t *)ptr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bump pointer for the next iteration.
|
|
|
|
*/
|
2013-10-29 15:11:48 +04:00
|
|
|
ptr += dp->d_ops->data_entsize(dep->namelen);
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* The entry is before the desired starting point, skip it.
|
|
|
|
*/
|
|
|
|
if ((char *)dep - (char *)hdr < wantoff)
|
|
|
|
continue;
|
|
|
|
|
2014-06-06 09:11:18 +04:00
|
|
|
cook = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
|
2013-08-12 14:49:36 +04:00
|
|
|
(char *)dep - (char *)hdr);
|
|
|
|
|
|
|
|
ctx->pos = cook & 0x7fffffff;
|
2013-10-29 15:11:48 +04:00
|
|
|
filetype = dp->d_ops->data_get_ftype(dep);
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* If it didn't fit, set the final offset to here & return.
|
|
|
|
*/
|
|
|
|
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
|
2013-08-12 14:50:09 +04:00
|
|
|
be64_to_cpu(dep->inumber),
|
2014-06-06 09:20:32 +04:00
|
|
|
xfs_dir3_get_dtype(dp->i_mount, filetype))) {
|
2013-08-12 14:49:36 +04:00
|
|
|
xfs_trans_brelse(NULL, bp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reached the end of the block.
|
|
|
|
* Set the offset to a non-existent block 1 and return.
|
|
|
|
*/
|
2014-06-06 09:11:18 +04:00
|
|
|
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
|
2014-06-06 09:20:32 +04:00
|
|
|
0x7fffffff;
|
2013-08-12 14:49:36 +04:00
|
|
|
xfs_trans_brelse(NULL, bp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct xfs_dir2_leaf_map_info {
|
|
|
|
xfs_extlen_t map_blocks; /* number of fsbs in map */
|
|
|
|
xfs_dablk_t map_off; /* last mapped file offset */
|
|
|
|
int map_size; /* total entries in *map */
|
|
|
|
int map_valid; /* valid entries in *map */
|
|
|
|
int nmap; /* mappings to ask xfs_bmapi */
|
|
|
|
xfs_dir2_db_t curdb; /* db for current block */
|
|
|
|
int ra_current; /* number of read-ahead blks */
|
|
|
|
int ra_index; /* *map index for read-ahead */
|
|
|
|
int ra_offset; /* map entry offset for ra */
|
|
|
|
int ra_want; /* readahead count wanted */
|
|
|
|
struct xfs_bmbt_irec map[]; /* map vector for blocks */
|
|
|
|
};
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_dir2_leaf_readbuf(
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_args *args,
|
2013-08-12 14:49:36 +04:00
|
|
|
size_t bufsize,
|
|
|
|
struct xfs_dir2_leaf_map_info *mip,
|
|
|
|
xfs_dir2_off_t *curoff,
|
2016-05-18 17:17:26 +03:00
|
|
|
struct xfs_buf **bpp,
|
|
|
|
bool trim_map)
|
2013-08-12 14:49:36 +04:00
|
|
|
{
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_inode *dp = args->dp;
|
2016-05-18 17:17:26 +03:00
|
|
|
struct xfs_buf *bp = NULL;
|
2013-08-12 14:49:36 +04:00
|
|
|
struct xfs_bmbt_irec *map = mip->map;
|
|
|
|
struct blk_plug plug;
|
|
|
|
int error = 0;
|
|
|
|
int length;
|
|
|
|
int i;
|
|
|
|
int j;
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_geometry *geo = args->geo;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
2016-05-18 17:17:26 +03:00
|
|
|
* If the caller just finished processing a buffer, it will tell us
|
|
|
|
* we need to trim that block out of the mapping now it is done.
|
2013-08-12 14:49:36 +04:00
|
|
|
*/
|
2016-05-18 17:17:26 +03:00
|
|
|
if (trim_map) {
|
2014-06-06 09:14:11 +04:00
|
|
|
mip->map_blocks -= geo->fsbcount;
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Loop to get rid of the extents for the
|
|
|
|
* directory block.
|
|
|
|
*/
|
2014-06-06 09:14:11 +04:00
|
|
|
for (i = geo->fsbcount; i > 0; ) {
|
2013-08-12 14:49:36 +04:00
|
|
|
j = min_t(int, map->br_blockcount, i);
|
|
|
|
map->br_blockcount -= j;
|
|
|
|
map->br_startblock += j;
|
|
|
|
map->br_startoff += j;
|
|
|
|
/*
|
|
|
|
* If mapping is done, pitch it from
|
|
|
|
* the table.
|
|
|
|
*/
|
|
|
|
if (!map->br_blockcount && --mip->map_valid)
|
|
|
|
memmove(&map[0], &map[1],
|
|
|
|
sizeof(map[0]) * mip->map_valid);
|
|
|
|
i -= j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recalculate the readahead blocks wanted.
|
|
|
|
*/
|
2014-06-06 09:20:32 +04:00
|
|
|
mip->ra_want = howmany(bufsize + geo->blksize, (1 << geo->fsblog)) - 1;
|
2013-08-12 14:49:36 +04:00
|
|
|
ASSERT(mip->ra_want >= 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't have as many as we want, and we haven't
|
|
|
|
* run out of data blocks, get some more mappings.
|
|
|
|
*/
|
|
|
|
if (1 + mip->ra_want > mip->map_blocks &&
|
2014-06-06 09:08:18 +04:00
|
|
|
mip->map_off < xfs_dir2_byte_to_da(geo, XFS_DIR2_LEAF_OFFSET)) {
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Get more bmaps, fill in after the ones
|
|
|
|
* we already have in the table.
|
|
|
|
*/
|
|
|
|
mip->nmap = mip->map_size - mip->map_valid;
|
|
|
|
error = xfs_bmapi_read(dp, mip->map_off,
|
2014-06-06 09:08:18 +04:00
|
|
|
xfs_dir2_byte_to_da(geo, XFS_DIR2_LEAF_OFFSET) -
|
2013-08-12 14:49:36 +04:00
|
|
|
mip->map_off,
|
|
|
|
&map[mip->map_valid], &mip->nmap, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't know if we should ignore this or try to return an
|
|
|
|
* error. The trouble with returning errors is that readdir
|
|
|
|
* will just stop without actually passing the error through.
|
|
|
|
*/
|
|
|
|
if (error)
|
|
|
|
goto out; /* XXX */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we got all the mappings we asked for, set the final map
|
|
|
|
* offset based on the last bmap value received. Otherwise,
|
|
|
|
* we've reached the end.
|
|
|
|
*/
|
|
|
|
if (mip->nmap == mip->map_size - mip->map_valid) {
|
|
|
|
i = mip->map_valid + mip->nmap - 1;
|
|
|
|
mip->map_off = map[i].br_startoff + map[i].br_blockcount;
|
|
|
|
} else
|
2014-06-06 09:08:18 +04:00
|
|
|
mip->map_off = xfs_dir2_byte_to_da(geo,
|
2013-08-12 14:49:36 +04:00
|
|
|
XFS_DIR2_LEAF_OFFSET);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for holes in the mapping, and eliminate them. Count up
|
|
|
|
* the valid blocks.
|
|
|
|
*/
|
|
|
|
for (i = mip->map_valid; i < mip->map_valid + mip->nmap; ) {
|
|
|
|
if (map[i].br_startblock == HOLESTARTBLOCK) {
|
|
|
|
mip->nmap--;
|
|
|
|
length = mip->map_valid + mip->nmap - i;
|
|
|
|
if (length)
|
|
|
|
memmove(&map[i], &map[i + 1],
|
|
|
|
sizeof(map[i]) * length);
|
|
|
|
} else {
|
|
|
|
mip->map_blocks += map[i].br_blockcount;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mip->map_valid += mip->nmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No valid mappings, so no more data blocks.
|
|
|
|
*/
|
|
|
|
if (!mip->map_valid) {
|
2014-06-06 09:08:18 +04:00
|
|
|
*curoff = xfs_dir2_da_to_byte(geo, mip->map_off);
|
2013-08-12 14:49:36 +04:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the directory block starting at the first mapping.
|
|
|
|
*/
|
2014-06-06 09:08:18 +04:00
|
|
|
mip->curdb = xfs_dir2_da_to_db(geo, map->br_startoff);
|
2013-08-12 14:49:36 +04:00
|
|
|
error = xfs_dir3_data_read(NULL, dp, map->br_startoff,
|
2014-06-06 09:14:11 +04:00
|
|
|
map->br_blockcount >= geo->fsbcount ?
|
2014-06-06 09:20:32 +04:00
|
|
|
XFS_FSB_TO_DADDR(dp->i_mount, map->br_startblock) :
|
|
|
|
-1, &bp);
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Should just skip over the data block instead of giving up.
|
|
|
|
*/
|
|
|
|
if (error)
|
|
|
|
goto out; /* XXX */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adjust the current amount of read-ahead: we just read a block that
|
|
|
|
* was previously ra.
|
|
|
|
*/
|
|
|
|
if (mip->ra_current)
|
2014-06-06 09:14:11 +04:00
|
|
|
mip->ra_current -= geo->fsbcount;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do we need more readahead?
|
|
|
|
*/
|
|
|
|
blk_start_plug(&plug);
|
|
|
|
for (mip->ra_index = mip->ra_offset = i = 0;
|
|
|
|
mip->ra_want > mip->ra_current && i < mip->map_blocks;
|
2014-06-06 09:14:11 +04:00
|
|
|
i += geo->fsbcount) {
|
2013-08-12 14:49:36 +04:00
|
|
|
ASSERT(mip->ra_index < mip->map_valid);
|
|
|
|
/*
|
|
|
|
* Read-ahead a contiguous directory block.
|
|
|
|
*/
|
|
|
|
if (i > mip->ra_current &&
|
2014-06-06 09:14:11 +04:00
|
|
|
map[mip->ra_index].br_blockcount >= geo->fsbcount) {
|
2014-04-14 13:01:59 +04:00
|
|
|
xfs_dir3_data_readahead(dp,
|
2013-08-12 14:49:36 +04:00
|
|
|
map[mip->ra_index].br_startoff + mip->ra_offset,
|
2014-06-06 09:20:32 +04:00
|
|
|
XFS_FSB_TO_DADDR(dp->i_mount,
|
2013-08-12 14:49:36 +04:00
|
|
|
map[mip->ra_index].br_startblock +
|
|
|
|
mip->ra_offset));
|
|
|
|
mip->ra_current = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-ahead a non-contiguous directory block. This doesn't
|
|
|
|
* use our mapping, but this is a very rare case.
|
|
|
|
*/
|
|
|
|
else if (i > mip->ra_current) {
|
2014-04-14 13:01:59 +04:00
|
|
|
xfs_dir3_data_readahead(dp,
|
2013-08-12 14:49:36 +04:00
|
|
|
map[mip->ra_index].br_startoff +
|
|
|
|
mip->ra_offset, -1);
|
|
|
|
mip->ra_current = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance offset through the mapping table.
|
|
|
|
*/
|
2014-06-06 09:14:11 +04:00
|
|
|
for (j = 0; j < geo->fsbcount; j += length ) {
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* The rest of this extent but not more than a dir
|
|
|
|
* block.
|
|
|
|
*/
|
2014-06-06 09:14:11 +04:00
|
|
|
length = min_t(int, geo->fsbcount,
|
2013-08-12 14:49:36 +04:00
|
|
|
map[mip->ra_index].br_blockcount -
|
|
|
|
mip->ra_offset);
|
|
|
|
mip->ra_offset += length;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance to the next mapping if this one is used up.
|
|
|
|
*/
|
|
|
|
if (mip->ra_offset == map[mip->ra_index].br_blockcount) {
|
|
|
|
mip->ra_offset = 0;
|
|
|
|
mip->ra_index++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
|
|
|
out:
|
|
|
|
*bpp = bp;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Getdents (readdir) for leaf and node directories.
|
|
|
|
* This reads the data blocks only, so is the same for both forms.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_dir2_leaf_getdents(
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_args *args,
|
2013-08-12 14:49:36 +04:00
|
|
|
struct dir_context *ctx,
|
|
|
|
size_t bufsize)
|
|
|
|
{
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_inode *dp = args->dp;
|
2013-08-12 14:49:36 +04:00
|
|
|
struct xfs_buf *bp = NULL; /* data block buffer */
|
|
|
|
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
|
|
|
xfs_dir2_data_entry_t *dep; /* data entry */
|
|
|
|
xfs_dir2_data_unused_t *dup; /* unused entry */
|
|
|
|
int error = 0; /* error return value */
|
|
|
|
int length; /* temporary length value */
|
|
|
|
int byteoff; /* offset in current block */
|
|
|
|
xfs_dir2_off_t curoff; /* current overall offset */
|
|
|
|
xfs_dir2_off_t newoff; /* new curoff after new blk */
|
|
|
|
char *ptr = NULL; /* pointer to current data */
|
|
|
|
struct xfs_dir2_leaf_map_info *map_info;
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_da_geometry *geo = args->geo;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the offset is at or past the largest allowed value,
|
|
|
|
* give up right away.
|
|
|
|
*/
|
|
|
|
if (ctx->pos >= XFS_DIR2_MAX_DATAPTR)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up to bmap a number of blocks based on the caller's
|
|
|
|
* buffer size, the directory block size, and the filesystem
|
|
|
|
* block size.
|
|
|
|
*/
|
2014-06-06 09:20:32 +04:00
|
|
|
length = howmany(bufsize + geo->blksize, (1 << geo->fsblog));
|
2013-08-12 14:49:36 +04:00
|
|
|
map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
|
|
|
|
(length * sizeof(struct xfs_bmbt_irec)),
|
|
|
|
KM_SLEEP | KM_NOFS);
|
|
|
|
map_info->map_size = length;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Inside the loop we keep the main offset value as a byte offset
|
|
|
|
* in the directory file.
|
|
|
|
*/
|
2014-04-14 13:02:30 +04:00
|
|
|
curoff = xfs_dir2_dataptr_to_byte(ctx->pos);
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Force this conversion through db so we truncate the offset
|
|
|
|
* down to get the start of the data block.
|
|
|
|
*/
|
2014-06-06 09:08:18 +04:00
|
|
|
map_info->map_off = xfs_dir2_db_to_da(geo,
|
|
|
|
xfs_dir2_byte_to_db(geo, curoff));
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop over directory entries until we reach the end offset.
|
|
|
|
* Get more blocks and readahead as necessary.
|
|
|
|
*/
|
|
|
|
while (curoff < XFS_DIR2_LEAF_OFFSET) {
|
2013-08-12 14:50:09 +04:00
|
|
|
__uint8_t filetype;
|
|
|
|
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* If we have no buffer, or we're off the end of the
|
|
|
|
* current buffer, need to get another one.
|
|
|
|
*/
|
2014-06-06 09:15:59 +04:00
|
|
|
if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) {
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
int lock_mode;
|
2016-05-18 17:17:26 +03:00
|
|
|
bool trim_map = false;
|
|
|
|
|
|
|
|
if (bp) {
|
|
|
|
xfs_trans_brelse(NULL, bp);
|
|
|
|
bp = NULL;
|
|
|
|
trim_map = true;
|
|
|
|
}
|
2013-08-12 14:49:36 +04:00
|
|
|
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
lock_mode = xfs_ilock_data_map_shared(dp);
|
2014-06-06 09:20:32 +04:00
|
|
|
error = xfs_dir2_leaf_readbuf(args, bufsize, map_info,
|
2016-05-18 17:17:26 +03:00
|
|
|
&curoff, &bp, trim_map);
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
xfs_iunlock(dp, lock_mode);
|
2013-08-12 14:49:36 +04:00
|
|
|
if (error || !map_info->map_valid)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Having done a read, we need to set a new offset.
|
|
|
|
*/
|
2014-06-06 09:20:32 +04:00
|
|
|
newoff = xfs_dir2_db_off_to_byte(geo,
|
2014-06-06 09:06:53 +04:00
|
|
|
map_info->curdb, 0);
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Start of the current block.
|
|
|
|
*/
|
|
|
|
if (curoff < newoff)
|
|
|
|
curoff = newoff;
|
|
|
|
/*
|
|
|
|
* Make sure we're in the right block.
|
|
|
|
*/
|
|
|
|
else if (curoff > newoff)
|
2014-06-06 09:08:18 +04:00
|
|
|
ASSERT(xfs_dir2_byte_to_db(geo, curoff) ==
|
2013-08-12 14:49:36 +04:00
|
|
|
map_info->curdb);
|
|
|
|
hdr = bp->b_addr;
|
|
|
|
xfs_dir3_data_check(dp, bp);
|
|
|
|
/*
|
|
|
|
* Find our position in the block.
|
|
|
|
*/
|
2013-10-29 15:11:49 +04:00
|
|
|
ptr = (char *)dp->d_ops->data_entry_p(hdr);
|
2014-06-06 09:20:32 +04:00
|
|
|
byteoff = xfs_dir2_byte_to_off(geo, curoff);
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Skip past the header.
|
|
|
|
*/
|
|
|
|
if (byteoff == 0)
|
2013-10-30 02:15:02 +04:00
|
|
|
curoff += dp->d_ops->data_entry_offset;
|
2013-08-12 14:49:36 +04:00
|
|
|
/*
|
|
|
|
* Skip past entries until we reach our offset.
|
|
|
|
*/
|
|
|
|
else {
|
|
|
|
while ((char *)ptr - (char *)hdr < byteoff) {
|
|
|
|
dup = (xfs_dir2_data_unused_t *)ptr;
|
|
|
|
|
|
|
|
if (be16_to_cpu(dup->freetag)
|
|
|
|
== XFS_DIR2_DATA_FREE_TAG) {
|
|
|
|
|
|
|
|
length = be16_to_cpu(dup->length);
|
|
|
|
ptr += length;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
dep = (xfs_dir2_data_entry_t *)ptr;
|
|
|
|
length =
|
2013-10-29 15:11:48 +04:00
|
|
|
dp->d_ops->data_entsize(dep->namelen);
|
2013-08-12 14:49:36 +04:00
|
|
|
ptr += length;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Now set our real offset.
|
|
|
|
*/
|
|
|
|
curoff =
|
2014-06-06 09:08:18 +04:00
|
|
|
xfs_dir2_db_off_to_byte(geo,
|
|
|
|
xfs_dir2_byte_to_db(geo, curoff),
|
2013-08-12 14:49:36 +04:00
|
|
|
(char *)ptr - (char *)hdr);
|
2014-06-06 09:15:59 +04:00
|
|
|
if (ptr >= (char *)hdr + geo->blksize) {
|
2013-08-12 14:49:36 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We have a pointer to an entry.
|
|
|
|
* Is it a live one?
|
|
|
|
*/
|
|
|
|
dup = (xfs_dir2_data_unused_t *)ptr;
|
|
|
|
/*
|
|
|
|
* No, it's unused, skip over it.
|
|
|
|
*/
|
|
|
|
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
|
|
|
|
length = be16_to_cpu(dup->length);
|
|
|
|
ptr += length;
|
|
|
|
curoff += length;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
dep = (xfs_dir2_data_entry_t *)ptr;
|
2013-10-29 15:11:48 +04:00
|
|
|
length = dp->d_ops->data_entsize(dep->namelen);
|
|
|
|
filetype = dp->d_ops->data_get_ftype(dep);
|
2013-08-12 14:49:36 +04:00
|
|
|
|
2014-04-14 13:02:30 +04:00
|
|
|
ctx->pos = xfs_dir2_byte_to_dataptr(curoff) & 0x7fffffff;
|
2013-08-12 14:49:36 +04:00
|
|
|
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
|
2013-08-12 14:50:09 +04:00
|
|
|
be64_to_cpu(dep->inumber),
|
2014-06-06 09:20:32 +04:00
|
|
|
xfs_dir3_get_dtype(dp->i_mount, filetype)))
|
2013-08-12 14:49:36 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance to next entry in the block.
|
|
|
|
*/
|
|
|
|
ptr += length;
|
|
|
|
curoff += length;
|
|
|
|
/* bufsize may have just been a guess; don't go negative */
|
|
|
|
bufsize = bufsize > length ? bufsize - length : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All done. Set output offset value to current offset.
|
|
|
|
*/
|
2014-04-14 13:02:30 +04:00
|
|
|
if (curoff > xfs_dir2_dataptr_to_byte(XFS_DIR2_MAX_DATAPTR))
|
2013-08-12 14:49:36 +04:00
|
|
|
ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
|
|
|
|
else
|
2014-04-14 13:02:30 +04:00
|
|
|
ctx->pos = xfs_dir2_byte_to_dataptr(curoff) & 0x7fffffff;
|
2013-08-12 14:49:36 +04:00
|
|
|
kmem_free(map_info);
|
|
|
|
if (bp)
|
|
|
|
xfs_trans_brelse(NULL, bp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a directory.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_readdir(
|
2014-06-06 09:20:32 +04:00
|
|
|
struct xfs_inode *dp,
|
|
|
|
struct dir_context *ctx,
|
|
|
|
size_t bufsize)
|
2013-08-12 14:49:36 +04:00
|
|
|
{
|
2014-06-10 01:30:36 +04:00
|
|
|
struct xfs_da_args args = { NULL };
|
2014-06-06 09:20:32 +04:00
|
|
|
int rval;
|
|
|
|
int v;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
|
|
|
trace_xfs_readdir(dp);
|
|
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
|
2014-06-25 08:58:08 +04:00
|
|
|
return -EIO;
|
2013-08-12 14:49:36 +04:00
|
|
|
|
2016-02-09 08:54:58 +03:00
|
|
|
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
2015-10-12 10:21:22 +03:00
|
|
|
XFS_STATS_INC(dp->i_mount, xs_dir_getdents);
|
2013-08-12 14:49:36 +04:00
|
|
|
|
2014-06-06 09:20:32 +04:00
|
|
|
args.dp = dp;
|
|
|
|
args.geo = dp->i_mount->m_dir_geo;
|
|
|
|
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
xfs_ilock(dp, XFS_IOLOCK_SHARED);
|
2013-08-12 14:49:36 +04:00
|
|
|
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
|
2014-06-06 09:20:32 +04:00
|
|
|
rval = xfs_dir2_sf_getdents(&args, ctx);
|
|
|
|
else if ((rval = xfs_dir2_isblock(&args, &v)))
|
2013-08-12 14:49:36 +04:00
|
|
|
;
|
|
|
|
else if (v)
|
2014-06-06 09:20:32 +04:00
|
|
|
rval = xfs_dir2_block_getdents(&args, ctx);
|
2013-08-12 14:49:36 +04:00
|
|
|
else
|
2014-06-06 09:20:32 +04:00
|
|
|
rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize);
|
xfs: stop holding ILOCK over filldir callbacks
The recent change to the readdir locking made in 40194ec ("xfs:
reinstate the ilock in xfs_readdir") for CXFS directory sanity was
probably the wrong thing to do. Deep in the readdir code we
can take page faults in the filldir callback, and so taking a page
fault while holding an inode ilock creates a new set of locking
issues that lockdep warns all over the place about.
The locking order for regular inodes w.r.t. page faults is io_lock
-> pagefault -> mmap_sem -> ilock. The directory readdir code now
triggers ilock -> page fault -> mmap_sem. While we cannot deadlock
at this point, it inverts all the locking patterns that lockdep
normally sees on XFS inodes, and so triggers lockdep. We worked
around this with commit 93a8614 ("xfs: fix directory inode iolock
lockdep false positive"), but that then just moved the lockdep
warning to deeper in the page fault path and triggered on security
inode locks. Fixing the shmem issue there just moved the lockdep
reports somewhere else, and now we are getting false positives from
filesystem freezing annotations getting confused.
Further, if we enter memory reclaim in a readdir path, we now get
lockdep warning about potential deadlocks because the ilock is held
when we enter reclaim. This, again, is different to a regular file
in that we never allow memory reclaim to run while holding the ilock
for regular files. Hence lockdep now throws
ilock->kmalloc->reclaim->ilock warnings.
Basically, the problem is that the ilock is being used to protect
the directory data and the inode metadata, whereas for a regular
file the iolock protects the data and the ilock protects the
metadata. From the VFS perspective, the i_mutex serialises all
accesses to the directory data, and so not holding the ilock for
readdir doesn't matter. The issue is that CXFS doesn't access
directory data via the VFS, so it has no "data serialisaton"
mechanism. Hence we need to hold the IOLOCK in the correct places to
provide this low level directory data access serialisation.
The ilock can then be used just when the extent list needs to be
read, just like we do for regular files. The directory modification
code can take the iolock exclusive when the ilock is also taken,
and this then ensures that readdir is correct excluded while
modifications are in progress.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-08-19 03:33:00 +03:00
|
|
|
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
|
2013-12-07 00:30:11 +04:00
|
|
|
|
2013-08-12 14:49:36 +04:00
|
|
|
return rval;
|
|
|
|
}
|