fs: move bdev code out of buffer.c
Move some block device related code out from buffer.c and put it in block_dev.c. I'm trying to move non-buffer_head code out of buffer.c Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
3ba13d179e
Коммит
585d3bc06f
146
fs/block_dev.c
146
fs/block_dev.c
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/blkpg.h>
|
#include <linux/blkpg.h>
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
|
#include <linux/pagevec.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/mpage.h>
|
#include <linux/mpage.h>
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
|
@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
||||||
iov, offset, nr_segs, blkdev_get_blocks, NULL);
|
iov, offset, nr_segs, blkdev_get_blocks, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write out and wait upon all the dirty data associated with a block
|
||||||
|
* device via its mapping. Does not take the superblock lock.
|
||||||
|
*/
|
||||||
|
int sync_blockdev(struct block_device *bdev)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (bdev)
|
||||||
|
ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(sync_blockdev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write out and wait upon all dirty data associated with this
|
||||||
|
* device. Filesystem data as well as the underlying block
|
||||||
|
* device. Takes the superblock lock.
|
||||||
|
*/
|
||||||
|
int fsync_bdev(struct block_device *bdev)
|
||||||
|
{
|
||||||
|
struct super_block *sb = get_super(bdev);
|
||||||
|
if (sb) {
|
||||||
|
int res = fsync_super(sb);
|
||||||
|
drop_super(sb);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
return sync_blockdev(bdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* freeze_bdev -- lock a filesystem and force it into a consistent state
|
||||||
|
* @bdev: blockdevice to lock
|
||||||
|
*
|
||||||
|
* This takes the block device bd_mount_sem to make sure no new mounts
|
||||||
|
* happen on bdev until thaw_bdev() is called.
|
||||||
|
* If a superblock is found on this device, we take the s_umount semaphore
|
||||||
|
* on it to make sure nobody unmounts until the snapshot creation is done.
|
||||||
|
* The reference counter (bd_fsfreeze_count) guarantees that only the last
|
||||||
|
* unfreeze process can unfreeze the frozen filesystem actually when multiple
|
||||||
|
* freeze requests arrive simultaneously. It counts up in freeze_bdev() and
|
||||||
|
* count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
|
||||||
|
* actually.
|
||||||
|
*/
|
||||||
|
struct super_block *freeze_bdev(struct block_device *bdev)
|
||||||
|
{
|
||||||
|
struct super_block *sb;
|
||||||
|
int error = 0;
|
||||||
|
|
||||||
|
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
if (bdev->bd_fsfreeze_count > 0) {
|
||||||
|
bdev->bd_fsfreeze_count++;
|
||||||
|
sb = get_super(bdev);
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
return sb;
|
||||||
|
}
|
||||||
|
bdev->bd_fsfreeze_count++;
|
||||||
|
|
||||||
|
down(&bdev->bd_mount_sem);
|
||||||
|
sb = get_super(bdev);
|
||||||
|
if (sb && !(sb->s_flags & MS_RDONLY)) {
|
||||||
|
sb->s_frozen = SB_FREEZE_WRITE;
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
__fsync_super(sb);
|
||||||
|
|
||||||
|
sb->s_frozen = SB_FREEZE_TRANS;
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
sync_blockdev(sb->s_bdev);
|
||||||
|
|
||||||
|
if (sb->s_op->freeze_fs) {
|
||||||
|
error = sb->s_op->freeze_fs(sb);
|
||||||
|
if (error) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"VFS:Filesystem freeze failed\n");
|
||||||
|
sb->s_frozen = SB_UNFROZEN;
|
||||||
|
drop_super(sb);
|
||||||
|
up(&bdev->bd_mount_sem);
|
||||||
|
bdev->bd_fsfreeze_count--;
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
return ERR_PTR(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sync_blockdev(bdev);
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
|
||||||
|
return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(freeze_bdev);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* thaw_bdev -- unlock filesystem
|
||||||
|
* @bdev: blockdevice to unlock
|
||||||
|
* @sb: associated superblock
|
||||||
|
*
|
||||||
|
* Unlocks the filesystem and marks it writeable again after freeze_bdev().
|
||||||
|
*/
|
||||||
|
int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
||||||
|
{
|
||||||
|
int error = 0;
|
||||||
|
|
||||||
|
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
if (!bdev->bd_fsfreeze_count) {
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bdev->bd_fsfreeze_count--;
|
||||||
|
if (bdev->bd_fsfreeze_count > 0) {
|
||||||
|
if (sb)
|
||||||
|
drop_super(sb);
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sb) {
|
||||||
|
BUG_ON(sb->s_bdev != bdev);
|
||||||
|
if (!(sb->s_flags & MS_RDONLY)) {
|
||||||
|
if (sb->s_op->unfreeze_fs) {
|
||||||
|
error = sb->s_op->unfreeze_fs(sb);
|
||||||
|
if (error) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"VFS:Filesystem thaw failed\n");
|
||||||
|
sb->s_frozen = SB_FREEZE_TRANS;
|
||||||
|
bdev->bd_fsfreeze_count++;
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sb->s_frozen = SB_UNFROZEN;
|
||||||
|
smp_wmb();
|
||||||
|
wake_up(&sb->s_wait_unfrozen);
|
||||||
|
}
|
||||||
|
drop_super(sb);
|
||||||
|
}
|
||||||
|
|
||||||
|
up(&bdev->bd_mount_sem);
|
||||||
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(thaw_bdev);
|
||||||
|
|
||||||
static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
|
static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
return block_write_full_page(page, blkdev_get_block, wbc);
|
return block_write_full_page(page, blkdev_get_block, wbc);
|
||||||
|
|
145
fs/buffer.c
145
fs/buffer.c
|
@ -165,151 +165,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Write out and wait upon all the dirty data associated with a block
|
|
||||||
* device via its mapping. Does not take the superblock lock.
|
|
||||||
*/
|
|
||||||
int sync_blockdev(struct block_device *bdev)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (bdev)
|
|
||||||
ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(sync_blockdev);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Write out and wait upon all dirty data associated with this
|
|
||||||
* device. Filesystem data as well as the underlying block
|
|
||||||
* device. Takes the superblock lock.
|
|
||||||
*/
|
|
||||||
int fsync_bdev(struct block_device *bdev)
|
|
||||||
{
|
|
||||||
struct super_block *sb = get_super(bdev);
|
|
||||||
if (sb) {
|
|
||||||
int res = fsync_super(sb);
|
|
||||||
drop_super(sb);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
return sync_blockdev(bdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* freeze_bdev -- lock a filesystem and force it into a consistent state
|
|
||||||
* @bdev: blockdevice to lock
|
|
||||||
*
|
|
||||||
* This takes the block device bd_mount_sem to make sure no new mounts
|
|
||||||
* happen on bdev until thaw_bdev() is called.
|
|
||||||
* If a superblock is found on this device, we take the s_umount semaphore
|
|
||||||
* on it to make sure nobody unmounts until the snapshot creation is done.
|
|
||||||
* The reference counter (bd_fsfreeze_count) guarantees that only the last
|
|
||||||
* unfreeze process can unfreeze the frozen filesystem actually when multiple
|
|
||||||
* freeze requests arrive simultaneously. It counts up in freeze_bdev() and
|
|
||||||
* count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
|
|
||||||
* actually.
|
|
||||||
*/
|
|
||||||
struct super_block *freeze_bdev(struct block_device *bdev)
|
|
||||||
{
|
|
||||||
struct super_block *sb;
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
if (bdev->bd_fsfreeze_count > 0) {
|
|
||||||
bdev->bd_fsfreeze_count++;
|
|
||||||
sb = get_super(bdev);
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
return sb;
|
|
||||||
}
|
|
||||||
bdev->bd_fsfreeze_count++;
|
|
||||||
|
|
||||||
down(&bdev->bd_mount_sem);
|
|
||||||
sb = get_super(bdev);
|
|
||||||
if (sb && !(sb->s_flags & MS_RDONLY)) {
|
|
||||||
sb->s_frozen = SB_FREEZE_WRITE;
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
__fsync_super(sb);
|
|
||||||
|
|
||||||
sb->s_frozen = SB_FREEZE_TRANS;
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
sync_blockdev(sb->s_bdev);
|
|
||||||
|
|
||||||
if (sb->s_op->freeze_fs) {
|
|
||||||
error = sb->s_op->freeze_fs(sb);
|
|
||||||
if (error) {
|
|
||||||
printk(KERN_ERR
|
|
||||||
"VFS:Filesystem freeze failed\n");
|
|
||||||
sb->s_frozen = SB_UNFROZEN;
|
|
||||||
drop_super(sb);
|
|
||||||
up(&bdev->bd_mount_sem);
|
|
||||||
bdev->bd_fsfreeze_count--;
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
return ERR_PTR(error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sync_blockdev(bdev);
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
|
|
||||||
return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(freeze_bdev);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* thaw_bdev -- unlock filesystem
|
|
||||||
* @bdev: blockdevice to unlock
|
|
||||||
* @sb: associated superblock
|
|
||||||
*
|
|
||||||
* Unlocks the filesystem and marks it writeable again after freeze_bdev().
|
|
||||||
*/
|
|
||||||
int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
|
||||||
{
|
|
||||||
int error = 0;
|
|
||||||
|
|
||||||
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
if (!bdev->bd_fsfreeze_count) {
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
bdev->bd_fsfreeze_count--;
|
|
||||||
if (bdev->bd_fsfreeze_count > 0) {
|
|
||||||
if (sb)
|
|
||||||
drop_super(sb);
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sb) {
|
|
||||||
BUG_ON(sb->s_bdev != bdev);
|
|
||||||
if (!(sb->s_flags & MS_RDONLY)) {
|
|
||||||
if (sb->s_op->unfreeze_fs) {
|
|
||||||
error = sb->s_op->unfreeze_fs(sb);
|
|
||||||
if (error) {
|
|
||||||
printk(KERN_ERR
|
|
||||||
"VFS:Filesystem thaw failed\n");
|
|
||||||
sb->s_frozen = SB_FREEZE_TRANS;
|
|
||||||
bdev->bd_fsfreeze_count++;
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sb->s_frozen = SB_UNFROZEN;
|
|
||||||
smp_wmb();
|
|
||||||
wake_up(&sb->s_wait_unfrozen);
|
|
||||||
}
|
|
||||||
drop_super(sb);
|
|
||||||
}
|
|
||||||
|
|
||||||
up(&bdev->bd_mount_sem);
|
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(thaw_bdev);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Various filesystems appear to want __find_get_block to be non-blocking.
|
* Various filesystems appear to want __find_get_block to be non-blocking.
|
||||||
* But it's the page lock which protects the buffers. To get around this,
|
* But it's the page lock which protects the buffers. To get around this,
|
||||||
|
|
|
@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
|
||||||
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
|
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
|
||||||
|
|
||||||
void mark_buffer_async_write(struct buffer_head *bh);
|
void mark_buffer_async_write(struct buffer_head *bh);
|
||||||
void invalidate_bdev(struct block_device *);
|
|
||||||
int sync_blockdev(struct block_device *bdev);
|
|
||||||
void __wait_on_buffer(struct buffer_head *);
|
void __wait_on_buffer(struct buffer_head *);
|
||||||
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
|
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
|
||||||
int fsync_bdev(struct block_device *);
|
|
||||||
struct super_block *freeze_bdev(struct block_device *);
|
|
||||||
int thaw_bdev(struct block_device *, struct super_block *);
|
|
||||||
int fsync_super(struct super_block *);
|
|
||||||
int fsync_no_super(struct block_device *);
|
|
||||||
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
|
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
|
||||||
unsigned size);
|
unsigned size);
|
||||||
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
|
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
|
||||||
|
|
|
@ -1874,6 +1874,13 @@ extern void bd_set_size(struct block_device *, loff_t size);
|
||||||
extern void bd_forget(struct inode *inode);
|
extern void bd_forget(struct inode *inode);
|
||||||
extern void bdput(struct block_device *);
|
extern void bdput(struct block_device *);
|
||||||
extern struct block_device *open_by_devnum(dev_t, fmode_t);
|
extern struct block_device *open_by_devnum(dev_t, fmode_t);
|
||||||
|
extern void invalidate_bdev(struct block_device *);
|
||||||
|
extern int sync_blockdev(struct block_device *bdev);
|
||||||
|
extern struct super_block *freeze_bdev(struct block_device *);
|
||||||
|
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
|
||||||
|
extern int fsync_bdev(struct block_device *);
|
||||||
|
extern int fsync_super(struct super_block *);
|
||||||
|
extern int fsync_no_super(struct block_device *);
|
||||||
#else
|
#else
|
||||||
static inline void bd_forget(struct inode *inode) {}
|
static inline void bd_forget(struct inode *inode) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче