ext4: undo ext4_calc_metadata_amount if we fail to claim space

The function ext4_calc_metadata_amount() has side effects, although
it's not obvious from its function name.  So if we fail to claim
space, regardless of whether we retry to claim the space again, or
return an error, we need to undo these side effects.

Otherwise we can end up incorrectly calculating the number of metadata
blocks needed for the operation, which was responsible for an xfstests
failure for test #271 when using an ext2 file system with delalloc
enabled.

Reported-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@vger.kernel.org
This commit is contained in:
Theodore Ts'o 2012-07-23 00:00:20 -04:00
Родитель 97795d2a5b
Коммит 03179fe923
1 изменённых файлов: 24 добавлений и 14 удалений

Просмотреть файл

@ -1182,18 +1182,8 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed; unsigned int md_needed;
int ret; int ret;
ext4_lblk_t save_last_lblock;
/* int save_len;
* recalculate the amount of metadata blocks to reserve
* in order to allocate nrblocks
* worse case is one extent per block
*/
repeat:
spin_lock(&ei->i_block_reservation_lock);
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
spin_unlock(&ei->i_block_reservation_lock);
/* /*
* We will charge metadata quota at writeout time; this saves * We will charge metadata quota at writeout time; this saves
@ -1203,19 +1193,39 @@ repeat:
ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
if (ret) if (ret)
return ret; return ret;
/*
* recalculate the amount of metadata blocks to reserve
* in order to allocate nrblocks
* worse case is one extent per block
*/
repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
* to be prepared undo if we fail to claim space.
*/
save_len = ei->i_da_metadata_calc_len;
save_last_lblock = ei->i_da_metadata_calc_last_lblock;
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
/* /*
* We do still charge estimated metadata to the sb though; * We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks. * we cannot afford to run out of free blocks.
*/ */
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) { if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield(); yield();
goto repeat; goto repeat;
} }
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC; return -ENOSPC;
} }
spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks++; ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed; ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock); spin_unlock(&ei->i_block_reservation_lock);