xfs: fix s_maxbytes computation on 32-bit kernels
I observed a hang in generic/308 while running fstests on a i686 kernel.
The hang occurred when trying to purge the pagecache on a large sparse
file that had a page created past MAX_LFS_FILESIZE, which caused an
integer overflow in the pagecache xarray and resulted in an infinite
loop.
I then noticed that Linus changed the definition of MAX_LFS_FILESIZE in
commit 0cc3b0ec23
("Clarify (and fix) MAX_LFS_FILESIZE macros") so
that it is now one page short of the maximum page index on 32-bit
kernels. Because the XFS function to compute max offset open-codes the
2005-era MAX_LFS_FILESIZE computation and neither the vfs nor mm perform
any sanity checking of s_maxbytes, the code in generic/308 can create a
page above the pagecache's limit and kaboom.
Fix all this by setting s_maxbytes to MAX_LFS_FILESIZE directly and
aborting the mount with a warning if our assumptions ever break. I have
no answer for why this seems to have been broken for years and nobody
noticed.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Родитель
4bbb04abb4
Коммит
932befe39d
|
@ -193,32 +193,6 @@ xfs_fs_show_options(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
xfs_max_file_offset(
|
||||
unsigned int blockshift)
|
||||
{
|
||||
unsigned int pagefactor = 1;
|
||||
unsigned int bitshift = BITS_PER_LONG - 1;
|
||||
|
||||
/* Figure out maximum filesize, on Linux this can depend on
|
||||
* the filesystem blocksize (on 32 bit platforms).
|
||||
* __block_write_begin does this in an [unsigned] long long...
|
||||
* page->index << (PAGE_SHIFT - bbits)
|
||||
* So, for page sized blocks (4K on 32 bit platforms),
|
||||
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
|
||||
* (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
|
||||
* but for smaller blocksizes it is less (bbits = log2 bsize).
|
||||
*/
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
ASSERT(sizeof(sector_t) == 8);
|
||||
pagefactor = PAGE_SIZE;
|
||||
bitshift = BITS_PER_LONG;
|
||||
#endif
|
||||
|
||||
return (((uint64_t)pagefactor) << bitshift) - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set parameters for inode allocation heuristics, taking into account
|
||||
* filesystem size and inode32/inode64 mount options; i.e. specifically
|
||||
|
@ -1424,6 +1398,26 @@ xfs_fc_fill_super(
|
|||
if (error)
|
||||
goto out_free_sb;
|
||||
|
||||
/*
|
||||
* XFS block mappings use 54 bits to store the logical block offset.
|
||||
* This should suffice to handle the maximum file size that the VFS
|
||||
* supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
|
||||
* bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
|
||||
* calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
|
||||
* to check this assertion.
|
||||
*
|
||||
* Avoid integer overflow by comparing the maximum bmbt offset to the
|
||||
* maximum pagecache offset in units of fs blocks.
|
||||
*/
|
||||
if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
|
||||
xfs_warn(mp,
|
||||
"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
|
||||
XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
|
||||
XFS_MAX_FILEOFF);
|
||||
error = -EINVAL;
|
||||
goto out_free_sb;
|
||||
}
|
||||
|
||||
error = xfs_filestream_mount(mp);
|
||||
if (error)
|
||||
goto out_free_sb;
|
||||
|
@ -1435,7 +1429,7 @@ xfs_fc_fill_super(
|
|||
sb->s_magic = XFS_SUPER_MAGIC;
|
||||
sb->s_blocksize = mp->m_sb.sb_blocksize;
|
||||
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
|
||||
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
sb->s_max_links = XFS_MAXLINK;
|
||||
sb->s_time_gran = 1;
|
||||
sb->s_time_min = S32_MIN;
|
||||
|
|
Загрузка…
Ссылка в новой задаче