More miscellaneous ext4 bug fixes (all stable fodder)
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAl5R8vEACgkQ8vlZVpUN gaMkPQf/QpimFVWvW+y2u9wOCl4pS38fog3SEbaCMcmCjndUfgLd9zf43GetFUfD DYbxmzotu+WEqHH83H6c+Cr/9tmhxrH5njhydxlzucocqyxdWmdWKe5cNz3ECJ6Z c4B1HFux+w/AfSGs73AU1K9APHlc/yXnZhgHpjLON6mP0Ata9lRZkmxwe9RnSWEn 186U1/kWe6sHNyOe1iQJC1QOPSauqY8SQDTZr5QSHLEyO7M/eJje+bplocor6JnJ HTsKHdP1dNQaQzZxup4QgvZ33vAfgsgwIFtJKhF4ps+2NsILJzH5FfYW+dHTpnqe INuJM5kPkkUuNnQqCfFDOvmaDGwjqQ== =i1ka -----END PGP SIGNATURE----- Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4 Pull ext4 fixes from Ted Ts'o: "More miscellaneous ext4 bug fixes (all stable fodder)" * tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: fix mount failure with quota configured as module jbd2: fix ocfs2 corrupt when clearing block group bits ext4: fix race between writepages and enabling EXT4_EXTENTS_FL ext4: rename s_journal_flag_rwsem to s_writepages_rwsem ext4: fix potential race between s_flex_groups online resizing and access ext4: fix potential race between s_group_info online resizing and access ext4: fix potential race between online resizing and write operations ext4: add cond_resched() to __ext4_find_entry() ext4: fix a data race in EXT4_I(inode)->i_disksize
This commit is contained in:
Коммит
a3163ca03f
|
@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
|
|||
ext4_group_t ngroups = ext4_get_groups_count(sb);
|
||||
struct ext4_group_desc *desc;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct buffer_head *bh_p;
|
||||
|
||||
if (block_group >= ngroups) {
|
||||
ext4_error(sb, "block_group >= groups_count - block_group = %u,"
|
||||
|
@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
|
|||
|
||||
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
|
||||
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
|
||||
if (!sbi->s_group_desc[group_desc]) {
|
||||
bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
|
||||
/*
|
||||
* sbi_array_rcu_deref returns with rcu unlocked, this is ok since
|
||||
* the pointer being dereferenced won't be dereferenced again. By
|
||||
* looking at the usage in add_new_gdb() the value isn't modified,
|
||||
* just the pointer, and so it remains valid.
|
||||
*/
|
||||
if (!bh_p) {
|
||||
ext4_error(sb, "Group descriptor not loaded - "
|
||||
"block_group = %u, group_desc = %u, desc = %u",
|
||||
block_group, group_desc, offset);
|
||||
|
@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
|
|||
}
|
||||
|
||||
desc = (struct ext4_group_desc *)(
|
||||
(__u8 *)sbi->s_group_desc[group_desc]->b_data +
|
||||
(__u8 *)bh_p->b_data +
|
||||
offset * EXT4_DESC_SIZE(sb));
|
||||
if (bh)
|
||||
*bh = sbi->s_group_desc[group_desc];
|
||||
*bh = bh_p;
|
||||
return desc;
|
||||
}
|
||||
|
||||
|
|
|
@ -1400,7 +1400,7 @@ struct ext4_sb_info {
|
|||
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
|
||||
struct buffer_head * s_sbh; /* Buffer containing the super block */
|
||||
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
|
||||
struct buffer_head **s_group_desc;
|
||||
struct buffer_head * __rcu *s_group_desc;
|
||||
unsigned int s_mount_opt;
|
||||
unsigned int s_mount_opt2;
|
||||
unsigned int s_mount_flags;
|
||||
|
@ -1462,7 +1462,7 @@ struct ext4_sb_info {
|
|||
#endif
|
||||
|
||||
/* for buddy allocator */
|
||||
struct ext4_group_info ***s_group_info;
|
||||
struct ext4_group_info ** __rcu *s_group_info;
|
||||
struct inode *s_buddy_cache;
|
||||
spinlock_t s_md_lock;
|
||||
unsigned short *s_mb_offsets;
|
||||
|
@ -1512,7 +1512,7 @@ struct ext4_sb_info {
|
|||
unsigned int s_extent_max_zeroout_kb;
|
||||
|
||||
unsigned int s_log_groups_per_flex;
|
||||
struct flex_groups *s_flex_groups;
|
||||
struct flex_groups * __rcu *s_flex_groups;
|
||||
ext4_group_t s_flex_groups_allocated;
|
||||
|
||||
/* workqueue for reserved extent conversions (buffered io) */
|
||||
|
@ -1552,8 +1552,11 @@ struct ext4_sb_info {
|
|||
struct ratelimit_state s_warning_ratelimit_state;
|
||||
struct ratelimit_state s_msg_ratelimit_state;
|
||||
|
||||
/* Barrier between changing inodes' journal flags and writepages ops. */
|
||||
struct percpu_rw_semaphore s_journal_flag_rwsem;
|
||||
/*
|
||||
* Barrier between writepages ops and changing any inode's JOURNAL_DATA
|
||||
* or EXTENTS flag.
|
||||
*/
|
||||
struct percpu_rw_semaphore s_writepages_rwsem;
|
||||
struct dax_device *s_daxdev;
|
||||
#ifdef CONFIG_EXT4_DEBUG
|
||||
unsigned long s_simulate_fail;
|
||||
|
@ -1576,6 +1579,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
|
|||
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns: sbi->field[index]
|
||||
* Used to access an array element from the following sbi fields which require
|
||||
* rcu protection to avoid dereferencing an invalid pointer due to reassignment
|
||||
* - s_group_desc
|
||||
* - s_group_info
|
||||
* - s_flex_group
|
||||
*/
|
||||
#define sbi_array_rcu_deref(sbi, field, index) \
|
||||
({ \
|
||||
typeof(*((sbi)->field)) _v; \
|
||||
rcu_read_lock(); \
|
||||
_v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
|
||||
rcu_read_unlock(); \
|
||||
_v; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Simulate_fail codes
|
||||
*/
|
||||
|
@ -2730,6 +2750,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
|
|||
extern bool ext4_empty_dir(struct inode *inode);
|
||||
|
||||
/* resize.c */
|
||||
extern void ext4_kvfree_array_rcu(void *to_free);
|
||||
extern int ext4_group_add(struct super_block *sb,
|
||||
struct ext4_new_group_data *input);
|
||||
extern int ext4_group_extend(struct super_block *sb,
|
||||
|
@ -2976,13 +2997,13 @@ static inline
|
|||
struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
|
||||
ext4_group_t group)
|
||||
{
|
||||
struct ext4_group_info ***grp_info;
|
||||
struct ext4_group_info **grp_info;
|
||||
long indexv, indexh;
|
||||
BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
|
||||
grp_info = EXT4_SB(sb)->s_group_info;
|
||||
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
|
||||
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
|
||||
return grp_info[indexv][indexh];
|
||||
grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
|
||||
return grp_info[indexh];
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3032,7 +3053,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
|
|||
!inode_is_locked(inode));
|
||||
down_write(&EXT4_I(inode)->i_data_sem);
|
||||
if (newsize > EXT4_I(inode)->i_disksize)
|
||||
EXT4_I(inode)->i_disksize = newsize;
|
||||
WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
}
|
||||
|
||||
|
|
|
@ -328,11 +328,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
|
|||
|
||||
percpu_counter_inc(&sbi->s_freeinodes_counter);
|
||||
if (sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t f = ext4_flex_group(sbi, block_group);
|
||||
struct flex_groups *fg;
|
||||
|
||||
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
|
||||
fg = sbi_array_rcu_deref(sbi, s_flex_groups,
|
||||
ext4_flex_group(sbi, block_group));
|
||||
atomic_inc(&fg->free_inodes);
|
||||
if (is_directory)
|
||||
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
|
||||
atomic_dec(&fg->used_dirs);
|
||||
}
|
||||
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
|
||||
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
|
||||
|
@ -368,12 +370,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
|
|||
int flex_size, struct orlov_stats *stats)
|
||||
{
|
||||
struct ext4_group_desc *desc;
|
||||
struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
|
||||
|
||||
if (flex_size > 1) {
|
||||
stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
|
||||
stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
|
||||
stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
|
||||
struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
|
||||
s_flex_groups, g);
|
||||
stats->free_inodes = atomic_read(&fg->free_inodes);
|
||||
stats->free_clusters = atomic64_read(&fg->free_clusters);
|
||||
stats->used_dirs = atomic_read(&fg->used_dirs);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1054,7 +1057,8 @@ got:
|
|||
if (sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t f = ext4_flex_group(sbi, group);
|
||||
|
||||
atomic_inc(&sbi->s_flex_groups[f].used_dirs);
|
||||
atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
|
||||
f)->used_dirs);
|
||||
}
|
||||
}
|
||||
if (ext4_has_group_desc_csum(sb)) {
|
||||
|
@ -1077,7 +1081,8 @@ got:
|
|||
|
||||
if (sbi->s_log_groups_per_flex) {
|
||||
flex_group = ext4_flex_group(sbi, group);
|
||||
atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
|
||||
atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
|
||||
flex_group)->free_inodes);
|
||||
}
|
||||
|
||||
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
|
||||
|
|
|
@ -2465,7 +2465,7 @@ update_disksize:
|
|||
* truncate are avoided by checking i_size under i_data_sem.
|
||||
*/
|
||||
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
|
||||
if (disksize > EXT4_I(inode)->i_disksize) {
|
||||
if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
|
||||
int err2;
|
||||
loff_t i_size;
|
||||
|
||||
|
@ -2628,7 +2628,7 @@ static int ext4_writepages(struct address_space *mapping,
|
|||
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
|
||||
return -EIO;
|
||||
|
||||
percpu_down_read(&sbi->s_journal_flag_rwsem);
|
||||
percpu_down_read(&sbi->s_writepages_rwsem);
|
||||
trace_ext4_writepages(inode, wbc);
|
||||
|
||||
/*
|
||||
|
@ -2849,7 +2849,7 @@ unplug:
|
|||
out_writepages:
|
||||
trace_ext4_writepages_result(inode, wbc, ret,
|
||||
nr_to_write - wbc->nr_to_write);
|
||||
percpu_up_read(&sbi->s_journal_flag_rwsem);
|
||||
percpu_up_read(&sbi->s_writepages_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2864,13 +2864,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
|
|||
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
|
||||
return -EIO;
|
||||
|
||||
percpu_down_read(&sbi->s_journal_flag_rwsem);
|
||||
percpu_down_read(&sbi->s_writepages_rwsem);
|
||||
trace_ext4_writepages(inode, wbc);
|
||||
|
||||
ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
|
||||
trace_ext4_writepages_result(inode, wbc, ret,
|
||||
nr_to_write - wbc->nr_to_write);
|
||||
percpu_up_read(&sbi->s_journal_flag_rwsem);
|
||||
percpu_up_read(&sbi->s_writepages_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5861,7 +5861,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
|
|||
}
|
||||
}
|
||||
|
||||
percpu_down_write(&sbi->s_journal_flag_rwsem);
|
||||
percpu_down_write(&sbi->s_writepages_rwsem);
|
||||
jbd2_journal_lock_updates(journal);
|
||||
|
||||
/*
|
||||
|
@ -5878,7 +5878,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
|
|||
err = jbd2_journal_flush(journal);
|
||||
if (err < 0) {
|
||||
jbd2_journal_unlock_updates(journal);
|
||||
percpu_up_write(&sbi->s_journal_flag_rwsem);
|
||||
percpu_up_write(&sbi->s_writepages_rwsem);
|
||||
return err;
|
||||
}
|
||||
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
|
||||
|
@ -5886,7 +5886,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
|
|||
ext4_set_aops(inode);
|
||||
|
||||
jbd2_journal_unlock_updates(journal);
|
||||
percpu_up_write(&sbi->s_journal_flag_rwsem);
|
||||
percpu_up_write(&sbi->s_writepages_rwsem);
|
||||
|
||||
if (val)
|
||||
up_write(&EXT4_I(inode)->i_mmap_sem);
|
||||
|
|
|
@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
|
|||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
unsigned size;
|
||||
struct ext4_group_info ***new_groupinfo;
|
||||
struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
|
||||
|
||||
size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
|
||||
EXT4_DESC_PER_BLOCK_BITS(sb);
|
||||
|
@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
|
|||
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (sbi->s_group_info) {
|
||||
memcpy(new_groupinfo, sbi->s_group_info,
|
||||
rcu_read_lock();
|
||||
old_groupinfo = rcu_dereference(sbi->s_group_info);
|
||||
if (old_groupinfo)
|
||||
memcpy(new_groupinfo, old_groupinfo,
|
||||
sbi->s_group_info_size * sizeof(*sbi->s_group_info));
|
||||
kvfree(sbi->s_group_info);
|
||||
}
|
||||
sbi->s_group_info = new_groupinfo;
|
||||
rcu_read_unlock();
|
||||
rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
|
||||
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
|
||||
if (old_groupinfo)
|
||||
ext4_kvfree_array_rcu(old_groupinfo);
|
||||
ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
|
||||
sbi->s_group_info_size);
|
||||
return 0;
|
||||
|
@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
|
|||
{
|
||||
int i;
|
||||
int metalen = 0;
|
||||
int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct ext4_group_info **meta_group_info;
|
||||
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
|
||||
|
@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
|
|||
"for a buddy group");
|
||||
goto exit_meta_group_info;
|
||||
}
|
||||
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
|
||||
meta_group_info;
|
||||
rcu_read_lock();
|
||||
rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
meta_group_info =
|
||||
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
|
||||
meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
|
||||
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
|
||||
|
||||
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
|
||||
|
@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
|
|||
exit_group_info:
|
||||
/* If a meta_group_info table has been allocated, release it now */
|
||||
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
|
||||
kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
|
||||
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
|
||||
struct ext4_group_info ***group_info;
|
||||
|
||||
rcu_read_lock();
|
||||
group_info = rcu_dereference(sbi->s_group_info);
|
||||
kfree(group_info[idx]);
|
||||
group_info[idx] = NULL;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
exit_meta_group_info:
|
||||
return -ENOMEM;
|
||||
|
@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
|
|||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
int err;
|
||||
struct ext4_group_desc *desc;
|
||||
struct ext4_group_info ***group_info;
|
||||
struct kmem_cache *cachep;
|
||||
|
||||
err = ext4_mb_alloc_groupinfo(sb, ngroups);
|
||||
|
@ -2507,11 +2517,16 @@ err_freebuddy:
|
|||
while (i-- > 0)
|
||||
kmem_cache_free(cachep, ext4_get_group_info(sb, i));
|
||||
i = sbi->s_group_info_size;
|
||||
rcu_read_lock();
|
||||
group_info = rcu_dereference(sbi->s_group_info);
|
||||
while (i-- > 0)
|
||||
kfree(sbi->s_group_info[i]);
|
||||
kfree(group_info[i]);
|
||||
rcu_read_unlock();
|
||||
iput(sbi->s_buddy_cache);
|
||||
err_freesgi:
|
||||
kvfree(sbi->s_group_info);
|
||||
rcu_read_lock();
|
||||
kvfree(rcu_dereference(sbi->s_group_info));
|
||||
rcu_read_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
|
|||
ext4_group_t ngroups = ext4_get_groups_count(sb);
|
||||
ext4_group_t i;
|
||||
int num_meta_group_infos;
|
||||
struct ext4_group_info *grinfo;
|
||||
struct ext4_group_info *grinfo, ***group_info;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
|
||||
|
||||
|
@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
|
|||
num_meta_group_infos = (ngroups +
|
||||
EXT4_DESC_PER_BLOCK(sb) - 1) >>
|
||||
EXT4_DESC_PER_BLOCK_BITS(sb);
|
||||
rcu_read_lock();
|
||||
group_info = rcu_dereference(sbi->s_group_info);
|
||||
for (i = 0; i < num_meta_group_infos; i++)
|
||||
kfree(sbi->s_group_info[i]);
|
||||
kvfree(sbi->s_group_info);
|
||||
kfree(group_info[i]);
|
||||
kvfree(group_info);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
kfree(sbi->s_mb_offsets);
|
||||
kfree(sbi->s_mb_maxs);
|
||||
|
@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||
ext4_group_t flex_group = ext4_flex_group(sbi,
|
||||
ac->ac_b_ex.fe_group);
|
||||
atomic64_sub(ac->ac_b_ex.fe_len,
|
||||
&sbi->s_flex_groups[flex_group].free_clusters);
|
||||
&sbi_array_rcu_deref(sbi, s_flex_groups,
|
||||
flex_group)->free_clusters);
|
||||
}
|
||||
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
|
||||
|
@ -4918,7 +4937,8 @@ do_more:
|
|||
if (sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
|
||||
atomic64_add(count_clusters,
|
||||
&sbi->s_flex_groups[flex_group].free_clusters);
|
||||
&sbi_array_rcu_deref(sbi, s_flex_groups,
|
||||
flex_group)->free_clusters);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5075,7 +5095,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
|
|||
if (sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
|
||||
atomic64_add(clusters_freed,
|
||||
&sbi->s_flex_groups[flex_group].free_clusters);
|
||||
&sbi_array_rcu_deref(sbi, s_flex_groups,
|
||||
flex_group)->free_clusters);
|
||||
}
|
||||
|
||||
ext4_mb_unload_buddy(&e4b);
|
||||
|
|
|
@ -407,6 +407,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
|
|||
|
||||
int ext4_ext_migrate(struct inode *inode)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
handle_t *handle;
|
||||
int retval = 0, i;
|
||||
__le32 *i_data;
|
||||
|
@ -431,6 +432,8 @@ int ext4_ext_migrate(struct inode *inode)
|
|||
*/
|
||||
return retval;
|
||||
|
||||
percpu_down_write(&sbi->s_writepages_rwsem);
|
||||
|
||||
/*
|
||||
* Worst case we can touch the allocation bitmaps, a bgd
|
||||
* block, and a block to link in the orphan list. We do need
|
||||
|
@ -441,7 +444,7 @@ int ext4_ext_migrate(struct inode *inode)
|
|||
|
||||
if (IS_ERR(handle)) {
|
||||
retval = PTR_ERR(handle);
|
||||
return retval;
|
||||
goto out_unlock;
|
||||
}
|
||||
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
|
||||
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
|
||||
|
@ -452,7 +455,7 @@ int ext4_ext_migrate(struct inode *inode)
|
|||
if (IS_ERR(tmp_inode)) {
|
||||
retval = PTR_ERR(tmp_inode);
|
||||
ext4_journal_stop(handle);
|
||||
return retval;
|
||||
goto out_unlock;
|
||||
}
|
||||
i_size_write(tmp_inode, i_size_read(inode));
|
||||
/*
|
||||
|
@ -494,7 +497,7 @@ int ext4_ext_migrate(struct inode *inode)
|
|||
*/
|
||||
ext4_orphan_del(NULL, tmp_inode);
|
||||
retval = PTR_ERR(handle);
|
||||
goto out;
|
||||
goto out_tmp_inode;
|
||||
}
|
||||
|
||||
ei = EXT4_I(inode);
|
||||
|
@ -576,10 +579,11 @@ err_out:
|
|||
ext4_ext_tree_init(handle, tmp_inode);
|
||||
out_stop:
|
||||
ext4_journal_stop(handle);
|
||||
out:
|
||||
out_tmp_inode:
|
||||
unlock_new_inode(tmp_inode);
|
||||
iput(tmp_inode);
|
||||
|
||||
out_unlock:
|
||||
percpu_up_write(&sbi->s_writepages_rwsem);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -589,7 +593,8 @@ out:
|
|||
int ext4_ind_migrate(struct inode *inode)
|
||||
{
|
||||
struct ext4_extent_header *eh;
|
||||
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
struct ext4_super_block *es = sbi->s_es;
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
struct ext4_extent *ex;
|
||||
unsigned int i, len;
|
||||
|
@ -613,9 +618,13 @@ int ext4_ind_migrate(struct inode *inode)
|
|||
if (test_opt(inode->i_sb, DELALLOC))
|
||||
ext4_alloc_da_blocks(inode);
|
||||
|
||||
percpu_down_write(&sbi->s_writepages_rwsem);
|
||||
|
||||
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
if (IS_ERR(handle)) {
|
||||
ret = PTR_ERR(handle);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
down_write(&EXT4_I(inode)->i_data_sem);
|
||||
ret = ext4_ext_check_inode(inode);
|
||||
|
@ -650,5 +659,7 @@ int ext4_ind_migrate(struct inode *inode)
|
|||
errout:
|
||||
ext4_journal_stop(handle);
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
out_unlock:
|
||||
percpu_up_write(&sbi->s_writepages_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1511,6 +1511,7 @@ restart:
|
|||
/*
|
||||
* We deal with the read-ahead logic here.
|
||||
*/
|
||||
cond_resched();
|
||||
if (ra_ptr >= ra_max) {
|
||||
/* Refill the readahead buffer */
|
||||
ra_ptr = 0;
|
||||
|
|
|
@ -17,6 +17,33 @@
|
|||
|
||||
#include "ext4_jbd2.h"
|
||||
|
||||
struct ext4_rcu_ptr {
|
||||
struct rcu_head rcu;
|
||||
void *ptr;
|
||||
};
|
||||
|
||||
static void ext4_rcu_ptr_callback(struct rcu_head *head)
|
||||
{
|
||||
struct ext4_rcu_ptr *ptr;
|
||||
|
||||
ptr = container_of(head, struct ext4_rcu_ptr, rcu);
|
||||
kvfree(ptr->ptr);
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
void ext4_kvfree_array_rcu(void *to_free)
|
||||
{
|
||||
struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
|
||||
|
||||
if (ptr) {
|
||||
ptr->ptr = to_free;
|
||||
call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
|
||||
return;
|
||||
}
|
||||
synchronize_rcu();
|
||||
kvfree(to_free);
|
||||
}
|
||||
|
||||
int ext4_resize_begin(struct super_block *sb)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
@ -542,8 +569,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
|
|||
brelse(gdb);
|
||||
goto out;
|
||||
}
|
||||
memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
|
||||
gdb->b_size);
|
||||
memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
|
||||
s_group_desc, j)->b_data, gdb->b_size);
|
||||
set_buffer_uptodate(gdb);
|
||||
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
|
||||
|
@ -860,13 +887,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
}
|
||||
brelse(dind);
|
||||
|
||||
o_group_desc = EXT4_SB(sb)->s_group_desc;
|
||||
rcu_read_lock();
|
||||
o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
|
||||
memcpy(n_group_desc, o_group_desc,
|
||||
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
|
||||
rcu_read_unlock();
|
||||
n_group_desc[gdb_num] = gdb_bh;
|
||||
EXT4_SB(sb)->s_group_desc = n_group_desc;
|
||||
rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
|
||||
EXT4_SB(sb)->s_gdb_count++;
|
||||
kvfree(o_group_desc);
|
||||
ext4_kvfree_array_rcu(o_group_desc);
|
||||
|
||||
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
|
||||
err = ext4_handle_dirty_super(handle, sb);
|
||||
|
@ -909,9 +938,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
|
|||
return err;
|
||||
}
|
||||
|
||||
o_group_desc = EXT4_SB(sb)->s_group_desc;
|
||||
rcu_read_lock();
|
||||
o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
|
||||
memcpy(n_group_desc, o_group_desc,
|
||||
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
|
||||
rcu_read_unlock();
|
||||
n_group_desc[gdb_num] = gdb_bh;
|
||||
|
||||
BUFFER_TRACE(gdb_bh, "get_write_access");
|
||||
|
@ -922,9 +953,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
|
|||
return err;
|
||||
}
|
||||
|
||||
EXT4_SB(sb)->s_group_desc = n_group_desc;
|
||||
rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
|
||||
EXT4_SB(sb)->s_gdb_count++;
|
||||
kvfree(o_group_desc);
|
||||
ext4_kvfree_array_rcu(o_group_desc);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1188,7 +1219,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
|
|||
* use non-sparse filesystems anymore. This is already checked above.
|
||||
*/
|
||||
if (gdb_off) {
|
||||
gdb_bh = sbi->s_group_desc[gdb_num];
|
||||
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
|
||||
gdb_num);
|
||||
BUFFER_TRACE(gdb_bh, "get_write_access");
|
||||
err = ext4_journal_get_write_access(handle, gdb_bh);
|
||||
|
||||
|
@ -1270,7 +1302,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
|
|||
/*
|
||||
* get_write_access() has been called on gdb_bh by ext4_add_new_desc().
|
||||
*/
|
||||
gdb_bh = sbi->s_group_desc[gdb_num];
|
||||
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
|
||||
/* Update group descriptor block for new group */
|
||||
gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
|
||||
gdb_off * EXT4_DESC_SIZE(sb));
|
||||
|
@ -1398,11 +1430,14 @@ static void ext4_update_super(struct super_block *sb,
|
|||
percpu_counter_read(&sbi->s_freeclusters_counter));
|
||||
if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
|
||||
ext4_group_t flex_group;
|
||||
struct flex_groups *fg;
|
||||
|
||||
flex_group = ext4_flex_group(sbi, group_data[0].group);
|
||||
fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
|
||||
atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
|
||||
&sbi->s_flex_groups[flex_group].free_clusters);
|
||||
&fg->free_clusters);
|
||||
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
|
||||
&sbi->s_flex_groups[flex_group].free_inodes);
|
||||
&fg->free_inodes);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1497,7 +1532,8 @@ exit_journal:
|
|||
for (; gdb_num <= gdb_num_end; gdb_num++) {
|
||||
struct buffer_head *gdb_bh;
|
||||
|
||||
gdb_bh = sbi->s_group_desc[gdb_num];
|
||||
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
|
||||
gdb_num);
|
||||
if (old_gdb == gdb_bh->b_blocknr)
|
||||
continue;
|
||||
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
|
||||
|
|
113
fs/ext4/super.c
113
fs/ext4/super.c
|
@ -1014,6 +1014,8 @@ static void ext4_put_super(struct super_block *sb)
|
|||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct ext4_super_block *es = sbi->s_es;
|
||||
struct buffer_head **group_desc;
|
||||
struct flex_groups **flex_groups;
|
||||
int aborted = 0;
|
||||
int i, err;
|
||||
|
||||
|
@ -1046,15 +1048,23 @@ static void ext4_put_super(struct super_block *sb)
|
|||
if (!sb_rdonly(sb))
|
||||
ext4_commit_super(sb, 1);
|
||||
|
||||
rcu_read_lock();
|
||||
group_desc = rcu_dereference(sbi->s_group_desc);
|
||||
for (i = 0; i < sbi->s_gdb_count; i++)
|
||||
brelse(sbi->s_group_desc[i]);
|
||||
kvfree(sbi->s_group_desc);
|
||||
kvfree(sbi->s_flex_groups);
|
||||
brelse(group_desc[i]);
|
||||
kvfree(group_desc);
|
||||
flex_groups = rcu_dereference(sbi->s_flex_groups);
|
||||
if (flex_groups) {
|
||||
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
|
||||
kvfree(flex_groups[i]);
|
||||
kvfree(flex_groups);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
percpu_counter_destroy(&sbi->s_freeclusters_counter);
|
||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
|
||||
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
|
||||
percpu_free_rwsem(&sbi->s_writepages_rwsem);
|
||||
#ifdef CONFIG_QUOTA
|
||||
for (i = 0; i < EXT4_MAXQUOTAS; i++)
|
||||
kfree(get_qf_name(sb, sbi, i));
|
||||
|
@ -2380,8 +2390,8 @@ done:
|
|||
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct flex_groups *new_groups;
|
||||
int size;
|
||||
struct flex_groups **old_groups, **new_groups;
|
||||
int size, i;
|
||||
|
||||
if (!sbi->s_log_groups_per_flex)
|
||||
return 0;
|
||||
|
@ -2390,22 +2400,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
|
|||
if (size <= sbi->s_flex_groups_allocated)
|
||||
return 0;
|
||||
|
||||
size = roundup_pow_of_two(size * sizeof(struct flex_groups));
|
||||
new_groups = kvzalloc(size, GFP_KERNEL);
|
||||
new_groups = kvzalloc(roundup_pow_of_two(size *
|
||||
sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
|
||||
if (!new_groups) {
|
||||
ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
|
||||
size / (int) sizeof(struct flex_groups));
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"not enough memory for %d flex group pointers", size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (sbi->s_flex_groups) {
|
||||
memcpy(new_groups, sbi->s_flex_groups,
|
||||
(sbi->s_flex_groups_allocated *
|
||||
sizeof(struct flex_groups)));
|
||||
kvfree(sbi->s_flex_groups);
|
||||
for (i = sbi->s_flex_groups_allocated; i < size; i++) {
|
||||
new_groups[i] = kvzalloc(roundup_pow_of_two(
|
||||
sizeof(struct flex_groups)),
|
||||
GFP_KERNEL);
|
||||
if (!new_groups[i]) {
|
||||
for (i--; i >= sbi->s_flex_groups_allocated; i--)
|
||||
kvfree(new_groups[i]);
|
||||
kvfree(new_groups);
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"not enough memory for %d flex groups", size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
sbi->s_flex_groups = new_groups;
|
||||
sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
|
||||
rcu_read_lock();
|
||||
old_groups = rcu_dereference(sbi->s_flex_groups);
|
||||
if (old_groups)
|
||||
memcpy(new_groups, old_groups,
|
||||
(sbi->s_flex_groups_allocated *
|
||||
sizeof(struct flex_groups *)));
|
||||
rcu_read_unlock();
|
||||
rcu_assign_pointer(sbi->s_flex_groups, new_groups);
|
||||
sbi->s_flex_groups_allocated = size;
|
||||
if (old_groups)
|
||||
ext4_kvfree_array_rcu(old_groups);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2413,6 +2438,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
|||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct ext4_group_desc *gdp = NULL;
|
||||
struct flex_groups *fg;
|
||||
ext4_group_t flex_group;
|
||||
int i, err;
|
||||
|
||||
|
@ -2430,12 +2456,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
|||
gdp = ext4_get_group_desc(sb, i, NULL);
|
||||
|
||||
flex_group = ext4_flex_group(sbi, i);
|
||||
atomic_add(ext4_free_inodes_count(sb, gdp),
|
||||
&sbi->s_flex_groups[flex_group].free_inodes);
|
||||
fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
|
||||
atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
|
||||
atomic64_add(ext4_free_group_clusters(sb, gdp),
|
||||
&sbi->s_flex_groups[flex_group].free_clusters);
|
||||
atomic_add(ext4_used_dirs_count(sb, gdp),
|
||||
&sbi->s_flex_groups[flex_group].used_dirs);
|
||||
&fg->free_clusters);
|
||||
atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -3009,7 +3034,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
|
||||
#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
|
||||
if (!readonly && (ext4_has_feature_quota(sb) ||
|
||||
ext4_has_feature_project(sb))) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
|
@ -3634,9 +3659,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
{
|
||||
struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
|
||||
char *orig_data = kstrdup(data, GFP_KERNEL);
|
||||
struct buffer_head *bh;
|
||||
struct buffer_head *bh, **group_desc;
|
||||
struct ext4_super_block *es = NULL;
|
||||
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
||||
struct flex_groups **flex_groups;
|
||||
ext4_fsblk_t block;
|
||||
ext4_fsblk_t sb_block = get_sb_block(&data);
|
||||
ext4_fsblk_t logical_sb_block;
|
||||
|
@ -4290,9 +4316,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
goto failed_mount;
|
||||
}
|
||||
}
|
||||
sbi->s_group_desc = kvmalloc_array(db_count,
|
||||
sizeof(struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
rcu_assign_pointer(sbi->s_group_desc,
|
||||
kvmalloc_array(db_count,
|
||||
sizeof(struct buffer_head *),
|
||||
GFP_KERNEL));
|
||||
if (sbi->s_group_desc == NULL) {
|
||||
ext4_msg(sb, KERN_ERR, "not enough memory");
|
||||
ret = -ENOMEM;
|
||||
|
@ -4308,14 +4335,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
}
|
||||
|
||||
for (i = 0; i < db_count; i++) {
|
||||
struct buffer_head *bh;
|
||||
|
||||
block = descriptor_loc(sb, logical_sb_block, i);
|
||||
sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
|
||||
if (!sbi->s_group_desc[i]) {
|
||||
bh = sb_bread_unmovable(sb, block);
|
||||
if (!bh) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"can't read group descriptor %d", i);
|
||||
db_count = i;
|
||||
goto failed_mount2;
|
||||
}
|
||||
rcu_read_lock();
|
||||
rcu_dereference(sbi->s_group_desc)[i] = bh;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
sbi->s_gdb_count = db_count;
|
||||
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
|
||||
|
@ -4594,7 +4626,7 @@ no_journal:
|
|||
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
|
||||
GFP_KERNEL);
|
||||
if (!err)
|
||||
err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
|
||||
err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
|
||||
|
||||
if (err) {
|
||||
ext4_msg(sb, KERN_ERR, "insufficient memory");
|
||||
|
@ -4682,13 +4714,19 @@ failed_mount7:
|
|||
ext4_unregister_li_request(sb);
|
||||
failed_mount6:
|
||||
ext4_mb_release(sb);
|
||||
if (sbi->s_flex_groups)
|
||||
kvfree(sbi->s_flex_groups);
|
||||
rcu_read_lock();
|
||||
flex_groups = rcu_dereference(sbi->s_flex_groups);
|
||||
if (flex_groups) {
|
||||
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
|
||||
kvfree(flex_groups[i]);
|
||||
kvfree(flex_groups);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
percpu_counter_destroy(&sbi->s_freeclusters_counter);
|
||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
|
||||
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
|
||||
percpu_free_rwsem(&sbi->s_writepages_rwsem);
|
||||
failed_mount5:
|
||||
ext4_ext_release(sb);
|
||||
ext4_release_system_zone(sb);
|
||||
|
@ -4717,9 +4755,12 @@ failed_mount3:
|
|||
if (sbi->s_mmp_tsk)
|
||||
kthread_stop(sbi->s_mmp_tsk);
|
||||
failed_mount2:
|
||||
rcu_read_lock();
|
||||
group_desc = rcu_dereference(sbi->s_group_desc);
|
||||
for (i = 0; i < db_count; i++)
|
||||
brelse(sbi->s_group_desc[i]);
|
||||
kvfree(sbi->s_group_desc);
|
||||
brelse(group_desc[i]);
|
||||
kvfree(group_desc);
|
||||
rcu_read_unlock();
|
||||
failed_mount:
|
||||
if (sbi->s_chksum_driver)
|
||||
crypto_free_shash(sbi->s_chksum_driver);
|
||||
|
|
|
@ -936,8 +936,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
|
|||
char *frozen_buffer = NULL;
|
||||
unsigned long start_lock, time_lock;
|
||||
|
||||
if (is_handle_aborted(handle))
|
||||
return -EROFS;
|
||||
journal = transaction->t_journal;
|
||||
|
||||
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
|
||||
|
@ -1189,6 +1187,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
|
|||
struct journal_head *jh;
|
||||
int rc;
|
||||
|
||||
if (is_handle_aborted(handle))
|
||||
return -EROFS;
|
||||
|
||||
if (jbd2_write_access_granted(handle, bh, false))
|
||||
return 0;
|
||||
|
||||
|
@ -1326,6 +1327,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
|
|||
struct journal_head *jh;
|
||||
char *committed_data = NULL;
|
||||
|
||||
if (is_handle_aborted(handle))
|
||||
return -EROFS;
|
||||
|
||||
if (jbd2_write_access_granted(handle, bh, true))
|
||||
return 0;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче