enforce ->sync_fs is only called for rw superblock
Make sure a superblock really is writeable by checking MS_RDONLY under s_umount. sync_filesystems needed some re-arragement for that, but all but one sync_filesystem caller had the correct locking already so that we could add that check there. cachefiles grew s_umount locking. I've also added a WARN_ON to sync_filesystem to assert this for future callers. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
e500475338
Коммит
5af7926ff3
|
@ -394,9 +394,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
|||
struct btrfs_root *root = btrfs_sb(sb);
|
||||
int ret;
|
||||
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return 0;
|
||||
|
||||
if (!wait) {
|
||||
filemap_flush(root->fs_info->btree_inode->i_mapping);
|
||||
return 0;
|
||||
|
|
|
@ -354,7 +354,9 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
|
|||
/* make sure all pages pinned by operations on behalf of the netfs are
|
||||
* written to disc */
|
||||
cachefiles_begin_secure(cache, &saved_cred);
|
||||
down_read(&cache->mnt->mnt_sb->s_umount);
|
||||
ret = sync_filesystem(cache->mnt->mnt_sb);
|
||||
up_read(&cache->mnt->mnt_sb->s_umount);
|
||||
cachefiles_end_secure(cache, saved_cred);
|
||||
|
||||
if (ret == -EIO)
|
||||
|
|
|
@ -64,18 +64,15 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
|
|||
|
||||
static int reiserfs_sync_fs(struct super_block *s, int wait)
|
||||
{
|
||||
if (!(s->s_flags & MS_RDONLY)) {
|
||||
struct reiserfs_transaction_handle th;
|
||||
reiserfs_write_lock(s);
|
||||
if (!journal_begin(&th, s, 1))
|
||||
if (!journal_end_sync(&th, s, 1))
|
||||
reiserfs_flush_old_commits(s);
|
||||
s->s_dirt = 0; /* Even if it's not true.
|
||||
* We'll loop forever in sync_supers otherwise */
|
||||
reiserfs_write_unlock(s);
|
||||
} else {
|
||||
s->s_dirt = 0;
|
||||
}
|
||||
struct reiserfs_transaction_handle th;
|
||||
|
||||
reiserfs_write_lock(s);
|
||||
if (!journal_begin(&th, s, 1))
|
||||
if (!journal_end_sync(&th, s, 1))
|
||||
reiserfs_flush_old_commits(s);
|
||||
s->s_dirt = 0; /* Even if it's not true.
|
||||
* We'll loop forever in sync_supers otherwise */
|
||||
reiserfs_write_unlock(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
23
fs/sync.c
23
fs/sync.c
|
@ -51,6 +51,18 @@ int sync_filesystem(struct super_block *sb)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We need to be protected against the filesystem going from
|
||||
* r/o to r/w or vice versa.
|
||||
*/
|
||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||
|
||||
/*
|
||||
* No point in syncing out anything if the filesystem is read-only.
|
||||
*/
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return 0;
|
||||
|
||||
ret = __sync_filesystem(sb, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -79,25 +91,22 @@ static void sync_filesystems(int wait)
|
|||
|
||||
mutex_lock(&mutex); /* Could be down_interruptible */
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
continue;
|
||||
list_for_each_entry(sb, &super_blocks, s_list)
|
||||
sb->s_need_sync = 1;
|
||||
}
|
||||
|
||||
restart:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (!sb->s_need_sync)
|
||||
continue;
|
||||
sb->s_need_sync = 0;
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
continue; /* hm. Was remounted r/o meanwhile */
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
|
||||
down_read(&sb->s_umount);
|
||||
if (sb->s_root)
|
||||
if (!(sb->s_flags & MS_RDONLY) && sb->s_root)
|
||||
__sync_filesystem(sb, wait);
|
||||
up_read(&sb->s_umount);
|
||||
|
||||
/* restart only when sb is no longer on the list */
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
|
|
|
@ -447,9 +447,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
|
|||
if (!wait)
|
||||
return 0;
|
||||
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* VFS calls '->sync_fs()' before synchronizing all dirty inodes and
|
||||
* pages, so synchronize them first, then commit the journal. Strictly
|
||||
|
|
Загрузка…
Ссылка в новой задаче