fs: introduce f_op->mmap_capabilities for nommu mmap support

Since "BDI: Provide backing device capability information [try #3]" the
backing_dev_info structure also provides flags for the kind of mmap
operation available in a nommu environment, which is entirely unrelated
to it's original purpose.

Introduce a new nommu-only file operation to provide this information to
the nommu mmap code instead.  Splitting this from the backing_dev_info
structure allows to remove lots of backing_dev_info instance that aren't
otherwise needed, and entirely gets rid of the concept of providing a
backing_dev_info for a character device.  It also removes the need for
the mtd_inodefs filesystem.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Tejun Heo <tj@kernel.org>
Acked-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2015-01-14 10:42:32 +01:00 коммит произвёл Jens Axboe
Родитель 97b713ba3e
Коммит b4caecd480
32 изменённых файлов: 169 добавлений и 346 удалений

Просмотреть файл

@ -43,12 +43,12 @@ and it's also much more restricted in the latter case:
even if this was created by another process. even if this was created by another process.
- If possible, the file mapping will be directly on the backing device - If possible, the file mapping will be directly on the backing device
if the backing device has the BDI_CAP_MAP_DIRECT capability and if the backing device has the NOMMU_MAP_DIRECT capability and
appropriate mapping protection capabilities. Ramfs, romfs, cramfs appropriate mapping protection capabilities. Ramfs, romfs, cramfs
and mtd might all permit this. and mtd might all permit this.
- If the backing device device can't or won't permit direct sharing, - If the backing device device can't or won't permit direct sharing,
but does have the BDI_CAP_MAP_COPY capability, then a copy of the but does have the NOMMU_MAP_COPY capability, then a copy of the
appropriate bit of the file will be read into a contiguous bit of appropriate bit of the file will be read into a contiguous bit of
memory and any extraneous space beyond the EOF will be cleared memory and any extraneous space beyond the EOF will be cleared
@ -220,7 +220,7 @@ directly (can't be copied).
The file->f_op->mmap() operation will be called to actually inaugurate the The file->f_op->mmap() operation will be called to actually inaugurate the
mapping. It can be rejected at that point. Returning the ENOSYS error will mapping. It can be rejected at that point. Returning the ENOSYS error will
cause the mapping to be copied instead if BDI_CAP_MAP_COPY is specified. cause the mapping to be copied instead if NOMMU_MAP_COPY is specified.
The vm_ops->close() routine will be invoked when the last mapping on a chardev The vm_ops->close() routine will be invoked when the last mapping on a chardev
is removed. An existing mapping will be shared, partially or not, if possible is removed. An existing mapping will be shared, partially or not, if possible
@ -232,7 +232,7 @@ want to handle it, despite the fact it's got an operation. For instance, it
might try directing the call to a secondary driver which turns out not to might try directing the call to a secondary driver which turns out not to
implement it. Such is the case for the framebuffer driver which attempts to implement it. Such is the case for the framebuffer driver which attempts to
direct the call to the device-specific driver. Under such circumstances, the direct the call to the device-specific driver. Under such circumstances, the
mapping request will be rejected if BDI_CAP_MAP_COPY is not specified, and a mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a
copy mapped otherwise. copy mapped otherwise.
IMPORTANT NOTE: IMPORTANT NOTE:

Просмотреть файл

@ -607,7 +607,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.ra_pages = q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.capabilities = 0;
q->backing_dev_info.name = "block"; q->backing_dev_info.name = "block";
q->node = node_id; q->node = node_id;

Просмотреть файл

@ -287,13 +287,24 @@ static unsigned long get_unmapped_area_mem(struct file *file,
return pgoff << PAGE_SHIFT; return pgoff << PAGE_SHIFT;
} }
/* permit direct mmap, for read, write or exec */
static unsigned memory_mmap_capabilities(struct file *file)
{
return NOMMU_MAP_DIRECT |
NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
}
static unsigned zero_mmap_capabilities(struct file *file)
{
return NOMMU_MAP_COPY;
}
/* can't do an in-place private mapping if there's no MMU */ /* can't do an in-place private mapping if there's no MMU */
static inline int private_mapping_ok(struct vm_area_struct *vma) static inline int private_mapping_ok(struct vm_area_struct *vma)
{ {
return vma->vm_flags & VM_MAYSHARE; return vma->vm_flags & VM_MAYSHARE;
} }
#else #else
#define get_unmapped_area_mem NULL
static inline int private_mapping_ok(struct vm_area_struct *vma) static inline int private_mapping_ok(struct vm_area_struct *vma)
{ {
@ -721,7 +732,10 @@ static const struct file_operations mem_fops = {
.write = write_mem, .write = write_mem,
.mmap = mmap_mem, .mmap = mmap_mem,
.open = open_mem, .open = open_mem,
#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem, .get_unmapped_area = get_unmapped_area_mem,
.mmap_capabilities = memory_mmap_capabilities,
#endif
}; };
#ifdef CONFIG_DEVKMEM #ifdef CONFIG_DEVKMEM
@ -731,7 +745,10 @@ static const struct file_operations kmem_fops = {
.write = write_kmem, .write = write_kmem,
.mmap = mmap_kmem, .mmap = mmap_kmem,
.open = open_kmem, .open = open_kmem,
#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem, .get_unmapped_area = get_unmapped_area_mem,
.mmap_capabilities = memory_mmap_capabilities,
#endif
}; };
#endif #endif
@ -760,16 +777,9 @@ static const struct file_operations zero_fops = {
.read_iter = read_iter_zero, .read_iter = read_iter_zero,
.aio_write = aio_write_zero, .aio_write = aio_write_zero,
.mmap = mmap_zero, .mmap = mmap_zero,
}; #ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
/* #endif
* capabilities for /dev/zero
* - permits private mappings, "copies" are taken of the source of zeros
* - no writeback happens
*/
static struct backing_dev_info zero_bdi = {
.name = "char/mem",
.capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
}; };
static const struct file_operations full_fops = { static const struct file_operations full_fops = {
@ -783,22 +793,22 @@ static const struct memdev {
const char *name; const char *name;
umode_t mode; umode_t mode;
const struct file_operations *fops; const struct file_operations *fops;
struct backing_dev_info *dev_info; fmode_t fmode;
} devlist[] = { } devlist[] = {
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
#ifdef CONFIG_DEVKMEM #ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
#endif #endif
[3] = { "null", 0666, &null_fops, NULL }, [3] = { "null", 0666, &null_fops, 0 },
#ifdef CONFIG_DEVPORT #ifdef CONFIG_DEVPORT
[4] = { "port", 0, &port_fops, NULL }, [4] = { "port", 0, &port_fops, 0 },
#endif #endif
[5] = { "zero", 0666, &zero_fops, &zero_bdi }, [5] = { "zero", 0666, &zero_fops, 0 },
[7] = { "full", 0666, &full_fops, NULL }, [7] = { "full", 0666, &full_fops, 0 },
[8] = { "random", 0666, &random_fops, NULL }, [8] = { "random", 0666, &random_fops, 0 },
[9] = { "urandom", 0666, &urandom_fops, NULL }, [9] = { "urandom", 0666, &urandom_fops, 0 },
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, NULL }, [11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif #endif
}; };
@ -816,12 +826,7 @@ static int memory_open(struct inode *inode, struct file *filp)
return -ENXIO; return -ENXIO;
filp->f_op = dev->fops; filp->f_op = dev->fops;
if (dev->dev_info) filp->f_mode |= dev->fmode;
filp->f_mapping->backing_dev_info = dev->dev_info;
/* Is /dev/mem or /dev/kmem ? */
if (dev->dev_info == &directly_mappable_cdev_bdi)
filp->f_mode |= FMODE_UNSIGNED_OFFSET;
if (dev->fops->open) if (dev->fops->open)
return dev->fops->open(inode, filp); return dev->fops->open(inode, filp);
@ -846,11 +851,6 @@ static struct class *mem_class;
static int __init chr_dev_init(void) static int __init chr_dev_init(void)
{ {
int minor; int minor;
int err;
err = bdi_init(&zero_bdi);
if (err)
return err;
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR); printk("unable to get major %d for memory devs\n", MEM_MAJOR);

Просмотреть файл

@ -49,7 +49,6 @@ static DEFINE_MUTEX(mtd_mutex);
*/ */
struct mtd_file_info { struct mtd_file_info {
struct mtd_info *mtd; struct mtd_info *mtd;
struct inode *ino;
enum mtd_file_modes mode; enum mtd_file_modes mode;
}; };
@ -59,10 +58,6 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
return fixed_size_llseek(file, offset, orig, mfi->mtd->size); return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
} }
static int count;
static struct vfsmount *mnt;
static struct file_system_type mtd_inodefs_type;
static int mtdchar_open(struct inode *inode, struct file *file) static int mtdchar_open(struct inode *inode, struct file *file)
{ {
int minor = iminor(inode); int minor = iminor(inode);
@ -70,7 +65,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
int ret = 0; int ret = 0;
struct mtd_info *mtd; struct mtd_info *mtd;
struct mtd_file_info *mfi; struct mtd_file_info *mfi;
struct inode *mtd_ino;
pr_debug("MTD_open\n"); pr_debug("MTD_open\n");
@ -78,10 +72,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && (minor & 1)) if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES; return -EACCES;
ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
if (ret)
return ret;
mutex_lock(&mtd_mutex); mutex_lock(&mtd_mutex);
mtd = get_mtd_device(NULL, devnum); mtd = get_mtd_device(NULL, devnum);
@ -95,43 +85,26 @@ static int mtdchar_open(struct inode *inode, struct file *file)
goto out1; goto out1;
} }
mtd_ino = iget_locked(mnt->mnt_sb, devnum);
if (!mtd_ino) {
ret = -ENOMEM;
goto out1;
}
if (mtd_ino->i_state & I_NEW) {
mtd_ino->i_private = mtd;
mtd_ino->i_mode = S_IFCHR;
mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
unlock_new_inode(mtd_ino);
}
file->f_mapping = mtd_ino->i_mapping;
/* You can't open it RW if it's not a writeable device */ /* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
ret = -EACCES; ret = -EACCES;
goto out2; goto out1;
} }
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) { if (!mfi) {
ret = -ENOMEM; ret = -ENOMEM;
goto out2; goto out1;
} }
mfi->ino = mtd_ino;
mfi->mtd = mtd; mfi->mtd = mtd;
file->private_data = mfi; file->private_data = mfi;
mutex_unlock(&mtd_mutex); mutex_unlock(&mtd_mutex);
return 0; return 0;
out2:
iput(mtd_ino);
out1: out1:
put_mtd_device(mtd); put_mtd_device(mtd);
out: out:
mutex_unlock(&mtd_mutex); mutex_unlock(&mtd_mutex);
simple_release_fs(&mnt, &count);
return ret; return ret;
} /* mtdchar_open */ } /* mtdchar_open */
@ -148,12 +121,9 @@ static int mtdchar_close(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE)) if ((file->f_mode & FMODE_WRITE))
mtd_sync(mtd); mtd_sync(mtd);
iput(mfi->ino);
put_mtd_device(mtd); put_mtd_device(mtd);
file->private_data = NULL; file->private_data = NULL;
kfree(mfi); kfree(mfi);
simple_release_fs(&mnt, &count);
return 0; return 0;
} /* mtdchar_close */ } /* mtdchar_close */
@ -1117,6 +1087,13 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
ret = mtd_get_unmapped_area(mtd, len, offset, flags); ret = mtd_get_unmapped_area(mtd, len, offset, flags);
return ret == -EOPNOTSUPP ? -ENODEV : ret; return ret == -EOPNOTSUPP ? -ENODEV : ret;
} }
static unsigned mtdchar_mmap_capabilities(struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
return mtd_mmap_capabilities(mfi->mtd);
}
#endif #endif
/* /*
@ -1160,27 +1137,10 @@ static const struct file_operations mtd_fops = {
.mmap = mtdchar_mmap, .mmap = mtdchar_mmap,
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
.get_unmapped_area = mtdchar_get_unmapped_area, .get_unmapped_area = mtdchar_get_unmapped_area,
.mmap_capabilities = mtdchar_mmap_capabilities,
#endif #endif
}; };
static const struct super_operations mtd_ops = {
.drop_inode = generic_delete_inode,
.statfs = simple_statfs,
};
static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
}
static struct file_system_type mtd_inodefs_type = {
.name = "mtd_inodefs",
.mount = mtd_inodefs_mount,
.kill_sb = kill_anon_super,
};
MODULE_ALIAS_FS("mtd_inodefs");
int __init init_mtdchar(void) int __init init_mtdchar(void)
{ {
int ret; int ret;
@ -1193,23 +1153,11 @@ int __init init_mtdchar(void)
return ret; return ret;
} }
ret = register_filesystem(&mtd_inodefs_type);
if (ret) {
pr_err("Can't register mtd_inodefs filesystem, error %d\n",
ret);
goto err_unregister_chdev;
}
return ret;
err_unregister_chdev:
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
return ret; return ret;
} }
void __exit cleanup_mtdchar(void) void __exit cleanup_mtdchar(void)
{ {
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
} }

Просмотреть файл

@ -732,8 +732,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
concat->subdev[0] = subdev[0]; concat->subdev[0] = subdev[0];
for (i = 1; i < num_devs; i++) { for (i = 1; i < num_devs; i++) {
@ -761,14 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
subdev[i]->flags & MTD_WRITEABLE; subdev[i]->flags & MTD_WRITEABLE;
} }
/* only permit direct mapping if the BDIs are all the same
* - copy-mapping is still permitted
*/
if (concat->mtd.backing_dev_info !=
subdev[i]->backing_dev_info)
concat->mtd.backing_dev_info =
&default_backing_dev_info;
concat->mtd.size += subdev[i]->size; concat->mtd.size += subdev[i]->size;
concat->mtd.ecc_stats.badblocks += concat->mtd.ecc_stats.badblocks +=
subdev[i]->ecc_stats.badblocks; subdev[i]->ecc_stats.badblocks;

Просмотреть файл

@ -43,33 +43,7 @@
#include "mtdcore.h" #include "mtdcore.h"
/* static struct backing_dev_info mtd_bdi = {
* backing device capabilities for non-mappable devices (such as NAND flash)
* - permits private mappings, copies are taken of the data
*/
static struct backing_dev_info mtd_bdi_unmappable = {
.capabilities = BDI_CAP_MAP_COPY,
};
/*
* backing device capabilities for R/O mappable devices (such as ROM)
* - permits private mappings, copies are taken of the data
* - permits non-writable shared mappings
*/
static struct backing_dev_info mtd_bdi_ro_mappable = {
.capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
};
/*
* backing device capabilities for writable mappable devices (such as RAM)
* - permits private mappings, copies are taken of the data
* - permits non-writable shared mappings
*/
static struct backing_dev_info mtd_bdi_rw_mappable = {
.capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
BDI_CAP_WRITE_MAP),
}; };
static int mtd_cls_suspend(struct device *dev, pm_message_t state); static int mtd_cls_suspend(struct device *dev, pm_message_t state);
@ -365,6 +339,22 @@ static struct device_type mtd_devtype = {
.release = mtd_release, .release = mtd_release,
}; };
#ifndef CONFIG_MMU
unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
{
switch (mtd->type) {
case MTD_RAM:
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
NOMMU_MAP_READ | NOMMU_MAP_WRITE;
case MTD_ROM:
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
NOMMU_MAP_READ;
default:
return NOMMU_MAP_COPY;
}
}
#endif
/** /**
* add_mtd_device - register an MTD device * add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure * @mtd: pointer to new MTD device info structure
@ -380,19 +370,7 @@ int add_mtd_device(struct mtd_info *mtd)
struct mtd_notifier *not; struct mtd_notifier *not;
int i, error; int i, error;
if (!mtd->backing_dev_info) { mtd->backing_dev_info = &mtd_bdi;
switch (mtd->type) {
case MTD_RAM:
mtd->backing_dev_info = &mtd_bdi_rw_mappable;
break;
case MTD_ROM:
mtd->backing_dev_info = &mtd_bdi_ro_mappable;
break;
default:
mtd->backing_dev_info = &mtd_bdi_unmappable;
break;
}
}
BUG_ON(mtd->writesize == 0); BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex); mutex_lock(&mtd_table_mutex);
@ -1237,17 +1215,9 @@ static int __init init_mtd(void)
if (ret) if (ret)
goto err_reg; goto err_reg;
ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); ret = mtd_bdi_init(&mtd_bdi, "mtd");
if (ret) if (ret)
goto err_bdi1; goto err_bdi;
ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
if (ret)
goto err_bdi2;
ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
if (ret)
goto err_bdi3;
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
@ -1260,11 +1230,7 @@ static int __init init_mtd(void)
out_procfs: out_procfs:
if (proc_mtd) if (proc_mtd)
remove_proc_entry("mtd", NULL); remove_proc_entry("mtd", NULL);
err_bdi3: err_bdi:
bdi_destroy(&mtd_bdi_ro_mappable);
err_bdi2:
bdi_destroy(&mtd_bdi_unmappable);
err_bdi1:
class_unregister(&mtd_class); class_unregister(&mtd_class);
err_reg: err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret); pr_err("Error registering mtd class or bdi: %d\n", ret);
@ -1277,9 +1243,7 @@ static void __exit cleanup_mtd(void)
if (proc_mtd) if (proc_mtd)
remove_proc_entry("mtd", NULL); remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class); class_unregister(&mtd_class);
bdi_destroy(&mtd_bdi_unmappable); bdi_destroy(&mtd_bdi);
bdi_destroy(&mtd_bdi_ro_mappable);
bdi_destroy(&mtd_bdi_rw_mappable);
} }
module_init(init_mtd); module_init(init_mtd);

Просмотреть файл

@ -378,7 +378,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.name = name; slave->mtd.name = name;
slave->mtd.owner = master->owner; slave->mtd.owner = master->owner;
slave->mtd.backing_dev_info = master->backing_dev_info;
/* NOTE: we don't arrange MTDs as a tree; it'd be error-prone /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
* to have the same data be in two different partitions. * to have the same data be in two different partitions.

Просмотреть файл

@ -987,7 +987,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
if (err) if (err)
goto out_free; goto out_free;
lsi->lsi_flags |= LSI_BDI_INITIALIZED; lsi->lsi_flags |= LSI_BDI_INITIALIZED;
lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY; lsi->lsi_bdi.capabilities = 0;
err = ll_bdi_register(&lsi->lsi_bdi); err = ll_bdi_register(&lsi->lsi_bdi);
if (err) if (err)
goto out_free; goto out_free;

Просмотреть файл

@ -335,7 +335,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
} }
init_rwsem(&v9ses->rename_sem); init_rwsem(&v9ses->rename_sem);
rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); rc = bdi_setup_and_register(&v9ses->bdi, "9p");
if (rc) { if (rc) {
kfree(v9ses->aname); kfree(v9ses->aname);
kfree(v9ses->uname); kfree(v9ses->uname);

Просмотреть файл

@ -106,7 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
volume->cell = params->cell; volume->cell = params->cell;
volume->vid = vlocation->vldb.vid[params->type]; volume->vid = vlocation->vldb.vid[params->type];
ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY); ret = bdi_setup_and_register(&volume->bdi, "afs");
if (ret) if (ret)
goto error_bdi; goto error_bdi;

Просмотреть файл

@ -165,15 +165,6 @@ static struct vfsmount *aio_mnt;
static const struct file_operations aio_ring_fops; static const struct file_operations aio_ring_fops;
static const struct address_space_operations aio_ctx_aops; static const struct address_space_operations aio_ctx_aops;
/* Backing dev info for aio fs.
* -no dirty page accounting or writeback happens
*/
static struct backing_dev_info aio_fs_backing_dev_info = {
.name = "aiofs",
.state = 0,
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
};
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{ {
struct qstr this = QSTR_INIT("[aio]", 5); struct qstr this = QSTR_INIT("[aio]", 5);
@ -185,7 +176,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
inode->i_mapping->a_ops = &aio_ctx_aops; inode->i_mapping->a_ops = &aio_ctx_aops;
inode->i_mapping->private_data = ctx; inode->i_mapping->private_data = ctx;
inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; inode->i_mapping->backing_dev_info = &noop_backing_dev_info;
inode->i_size = PAGE_SIZE * nr_pages; inode->i_size = PAGE_SIZE * nr_pages;
path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@ -230,9 +221,6 @@ static int __init aio_setup(void)
if (IS_ERR(aio_mnt)) if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount."); panic("Failed to create aio fs mount.");
if (bdi_init(&aio_fs_backing_dev_info))
panic("Failed to init aio fs backing dev info.");
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);

Просмотреть файл

@ -1715,8 +1715,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{ {
int err; int err;
bdi->capabilities = BDI_CAP_MAP_COPY; err = bdi_setup_and_register(bdi, "btrfs");
err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
if (err) if (err)
return err; return err;

Просмотреть файл

@ -24,27 +24,6 @@
#include "internal.h" #include "internal.h"
/*
* capabilities for /dev/mem, /dev/kmem and similar directly mappable character
* devices
* - permits shared-mmap for read, write and/or exec
* - does not permit private mmap in NOMMU mode (can't do COW)
* - no readahead or I/O queue unplugging required
*/
struct backing_dev_info directly_mappable_cdev_bdi = {
.name = "char",
.capabilities = (
#ifdef CONFIG_MMU
/* permit private copies of the data to be taken */
BDI_CAP_MAP_COPY |
#endif
/* permit direct mmap, for read, write or exec */
BDI_CAP_MAP_DIRECT |
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
/* no writeback happens */
BDI_CAP_NO_ACCT_AND_WRITEBACK),
};
static struct kobj_map *cdev_map; static struct kobj_map *cdev_map;
static DEFINE_MUTEX(chrdevs_lock); static DEFINE_MUTEX(chrdevs_lock);
@ -575,8 +554,6 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data)
void __init chrdev_init(void) void __init chrdev_init(void)
{ {
cdev_map = kobj_map_init(base_probe, &chrdevs_lock); cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
if (bdi_init(&directly_mappable_cdev_bdi))
panic("Failed to init directly mappable cdev bdi");
} }
@ -590,4 +567,3 @@ EXPORT_SYMBOL(cdev_del);
EXPORT_SYMBOL(cdev_add); EXPORT_SYMBOL(cdev_add);
EXPORT_SYMBOL(__register_chrdev); EXPORT_SYMBOL(__register_chrdev);
EXPORT_SYMBOL(__unregister_chrdev); EXPORT_SYMBOL(__unregister_chrdev);
EXPORT_SYMBOL(directly_mappable_cdev_bdi);

Просмотреть файл

@ -3446,7 +3446,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
int referral_walks_count = 0; int referral_walks_count = 0;
#endif #endif
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs");
if (rc) if (rc)
return rc; return rc;

Просмотреть файл

@ -183,7 +183,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
goto unlock_out; goto unlock_out;
} }
error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY); error = bdi_setup_and_register(&vc->bdi, "coda");
if (error) if (error)
goto unlock_out; goto unlock_out;

Просмотреть файл

@ -70,8 +70,6 @@ extern int configfs_is_root(struct config_item *item);
extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
extern int configfs_inode_init(void);
extern void configfs_inode_exit(void);
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_make_dirent(struct configfs_dirent *, extern int configfs_make_dirent(struct configfs_dirent *,

Просмотреть файл

@ -50,12 +50,6 @@ static const struct address_space_operations configfs_aops = {
.write_end = simple_write_end, .write_end = simple_write_end,
}; };
static struct backing_dev_info configfs_backing_dev_info = {
.name = "configfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct inode_operations configfs_inode_operations ={ static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr, .setattr = configfs_setattr,
}; };
@ -137,7 +131,7 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
if (inode) { if (inode) {
inode->i_ino = get_next_ino(); inode->i_ino = get_next_ino();
inode->i_mapping->a_ops = &configfs_aops; inode->i_mapping->a_ops = &configfs_aops;
inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; inode->i_mapping->backing_dev_info = &noop_backing_dev_info;
inode->i_op = &configfs_inode_operations; inode->i_op = &configfs_inode_operations;
if (sd->s_iattr) { if (sd->s_iattr) {
@ -283,13 +277,3 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
} }
mutex_unlock(&dir->d_inode->i_mutex); mutex_unlock(&dir->d_inode->i_mutex);
} }
int __init configfs_inode_init(void)
{
return bdi_init(&configfs_backing_dev_info);
}
void configfs_inode_exit(void)
{
bdi_destroy(&configfs_backing_dev_info);
}

Просмотреть файл

@ -145,19 +145,13 @@ static int __init configfs_init(void)
if (!config_kobj) if (!config_kobj)
goto out2; goto out2;
err = configfs_inode_init(); err = register_filesystem(&configfs_fs_type);
if (err) if (err)
goto out3; goto out3;
err = register_filesystem(&configfs_fs_type);
if (err)
goto out4;
return 0; return 0;
out4:
pr_err("Unable to register filesystem!\n");
configfs_inode_exit();
out3: out3:
pr_err("Unable to register filesystem!\n");
kobject_put(config_kobj); kobject_put(config_kobj);
out2: out2:
kmem_cache_destroy(configfs_dir_cachep); kmem_cache_destroy(configfs_dir_cachep);
@ -172,7 +166,6 @@ static void __exit configfs_exit(void)
kobject_put(config_kobj); kobject_put(config_kobj);
kmem_cache_destroy(configfs_dir_cachep); kmem_cache_destroy(configfs_dir_cachep);
configfs_dir_cachep = NULL; configfs_dir_cachep = NULL;
configfs_inode_exit();
} }
MODULE_AUTHOR("Oracle"); MODULE_AUTHOR("Oracle");

Просмотреть файл

@ -520,7 +520,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out; goto out;
} }
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs");
if (rc) if (rc)
goto out1; goto out1;

Просмотреть файл

@ -836,7 +836,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi; goto free_sbi;
} }
ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY); ret = bdi_setup_and_register(&sbi->bdi, "exofs");
if (ret) { if (ret) {
EXOFS_DBGMSG("Failed to bdi_setup_and_register\n"); EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
dput(sb->s_root); dput(sb->s_root);

Просмотреть файл

@ -560,7 +560,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
server = NCP_SBP(sb); server = NCP_SBP(sb);
memset(server, 0, sizeof(*server)); memset(server, 0, sizeof(*server));
error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); error = bdi_setup_and_register(&server->bdi, "ncpfs");
if (error) if (error)
goto out_fput; goto out_fput;

Просмотреть файл

@ -34,7 +34,14 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long flags); unsigned long flags);
static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
static unsigned ramfs_mmap_capabilities(struct file *file)
{
return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ |
NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
}
const struct file_operations ramfs_file_operations = { const struct file_operations ramfs_file_operations = {
.mmap_capabilities = ramfs_mmap_capabilities,
.mmap = ramfs_nommu_mmap, .mmap = ramfs_nommu_mmap,
.get_unmapped_area = ramfs_nommu_get_unmapped_area, .get_unmapped_area = ramfs_nommu_get_unmapped_area,
.read = new_sync_read, .read = new_sync_read,

Просмотреть файл

@ -50,14 +50,6 @@ static const struct address_space_operations ramfs_aops = {
.set_page_dirty = __set_page_dirty_no_writeback, .set_page_dirty = __set_page_dirty_no_writeback,
}; };
static struct backing_dev_info ramfs_backing_dev_info = {
.name = "ramfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
};
struct inode *ramfs_get_inode(struct super_block *sb, struct inode *ramfs_get_inode(struct super_block *sb,
const struct inode *dir, umode_t mode, dev_t dev) const struct inode *dir, umode_t mode, dev_t dev)
{ {
@ -67,7 +59,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
inode->i_ino = get_next_ino(); inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode); inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &ramfs_aops; inode->i_mapping->a_ops = &ramfs_aops;
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; inode->i_mapping->backing_dev_info = &noop_backing_dev_info;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping); mapping_set_unevictable(inode->i_mapping);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@ -267,19 +259,9 @@ static struct file_system_type ramfs_fs_type = {
int __init init_ramfs_fs(void) int __init init_ramfs_fs(void)
{ {
static unsigned long once; static unsigned long once;
int err;
if (test_and_set_bit(0, &once)) if (test_and_set_bit(0, &once))
return 0; return 0;
return register_filesystem(&ramfs_fs_type);
err = bdi_init(&ramfs_backing_dev_info);
if (err)
return err;
err = register_filesystem(&ramfs_fs_type);
if (err)
bdi_destroy(&ramfs_backing_dev_info);
return err;
} }
fs_initcall(init_ramfs_fs); fs_initcall(init_ramfs_fs);

Просмотреть файл

@ -70,6 +70,15 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS; return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
} }
static unsigned romfs_mmap_capabilities(struct file *file)
{
struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd;
if (!mtd)
return NOMMU_MAP_COPY;
return mtd_mmap_capabilities(mtd);
}
const struct file_operations romfs_ro_fops = { const struct file_operations romfs_ro_fops = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
.read = new_sync_read, .read = new_sync_read,
@ -77,4 +86,5 @@ const struct file_operations romfs_ro_fops = {
.splice_read = generic_file_splice_read, .splice_read = generic_file_splice_read,
.mmap = romfs_mmap, .mmap = romfs_mmap,
.get_unmapped_area = romfs_get_unmapped_area, .get_unmapped_area = romfs_get_unmapped_area,
.mmap_capabilities = romfs_mmap_capabilities,
}; };

Просмотреть файл

@ -2017,7 +2017,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
* Read-ahead will be disabled because @c->bdi.ra_pages is 0. * Read-ahead will be disabled because @c->bdi.ra_pages is 0.
*/ */
c->bdi.name = "ubifs", c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY; c->bdi.capabilities = 0;
err = bdi_init(&c->bdi); err = bdi_init(&c->bdi);
if (err) if (err)
goto out_close; goto out_close;

Просмотреть файл

@ -114,7 +114,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...); const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi); void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason); enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi); void bdi_start_background_writeback(struct backing_dev_info *bdi);
@ -228,42 +228,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
* BDI_CAP_NO_WRITEBACK: Don't write pages back * BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
*
* BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
* BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
* BDI_CAP_READ_MAP: Can be mapped for reading
* BDI_CAP_WRITE_MAP: Can be mapped for writing
* BDI_CAP_EXEC_MAP: Can be mapped for execution
*
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*/ */
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002 #define BDI_CAP_NO_WRITEBACK 0x00000002
#define BDI_CAP_MAP_COPY 0x00000004 #define BDI_CAP_NO_ACCT_WB 0x00000004
#define BDI_CAP_MAP_DIRECT 0x00000008 #define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_READ_MAP 0x00000010 #define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_WRITE_MAP 0x00000020
#define BDI_CAP_EXEC_MAP 0x00000040
#define BDI_CAP_NO_ACCT_WB 0x00000080
#define BDI_CAP_STABLE_WRITES 0x00000200
#define BDI_CAP_STRICTLIMIT 0x00000400
#define BDI_CAP_VMFLAGS \
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
#if defined(VM_MAYREAD) && \
(BDI_CAP_READ_MAP != VM_MAYREAD || \
BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
BDI_CAP_EXEC_MAP != VM_MAYEXEC)
#error please change backing_dev_info::capabilities flags
#endif
extern struct backing_dev_info default_backing_dev_info; extern struct backing_dev_info default_backing_dev_info;
extern struct backing_dev_info noop_backing_dev_info; extern struct backing_dev_info noop_backing_dev_info;

Просмотреть файл

@ -30,6 +30,4 @@ void cdev_del(struct cdev *);
void cd_forget(struct inode *); void cd_forget(struct inode *);
extern struct backing_dev_info directly_mappable_cdev_bdi;
#endif #endif

Просмотреть файл

@ -1502,6 +1502,26 @@ struct block_device_operations;
#define HAVE_COMPAT_IOCTL 1 #define HAVE_COMPAT_IOCTL 1
#define HAVE_UNLOCKED_IOCTL 1 #define HAVE_UNLOCKED_IOCTL 1
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
*
* NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
* NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
* NOMMU_MAP_READ: Can be mapped for reading
* NOMMU_MAP_WRITE: Can be mapped for writing
* NOMMU_MAP_EXEC: Can be mapped for execution
*/
#define NOMMU_MAP_COPY 0x00000001
#define NOMMU_MAP_DIRECT 0x00000008
#define NOMMU_MAP_READ VM_MAYREAD
#define NOMMU_MAP_WRITE VM_MAYWRITE
#define NOMMU_MAP_EXEC VM_MAYEXEC
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
struct iov_iter; struct iov_iter;
struct file_operations { struct file_operations {
@ -1536,6 +1556,9 @@ struct file_operations {
long (*fallocate)(struct file *file, int mode, loff_t offset, long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len); loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f); void (*show_fdinfo)(struct seq_file *m, struct file *f);
#ifndef CONFIG_MMU
unsigned (*mmap_capabilities)(struct file *);
#endif
}; };
struct inode_operations { struct inode_operations {

Просмотреть файл

@ -408,4 +408,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
return mtd_is_bitflip(err) || mtd_is_eccerr(err); return mtd_is_bitflip(err) || mtd_is_eccerr(err);
} }
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
#endif /* __MTD_MTD_H__ */ #endif /* __MTD_MTD_H__ */

Просмотреть файл

@ -17,8 +17,6 @@ static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
struct backing_dev_info default_backing_dev_info = { struct backing_dev_info default_backing_dev_info = {
.name = "default", .name = "default",
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
.state = 0,
.capabilities = BDI_CAP_MAP_COPY,
}; };
EXPORT_SYMBOL_GPL(default_backing_dev_info); EXPORT_SYMBOL_GPL(default_backing_dev_info);
@ -513,13 +511,12 @@ EXPORT_SYMBOL(bdi_destroy);
* For use from filesystems to quickly init and register a bdi associated * For use from filesystems to quickly init and register a bdi associated
* with dirty writeback * with dirty writeback
*/ */
int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
unsigned int cap)
{ {
int err; int err;
bdi->name = name; bdi->name = name;
bdi->capabilities = cap; bdi->capabilities = 0;
err = bdi_init(bdi); err = bdi_init(bdi);
if (err) if (err)
return err; return err;

Просмотреть файл

@ -946,9 +946,6 @@ static int validate_mmap_request(struct file *file,
return -EOVERFLOW; return -EOVERFLOW;
if (file) { if (file) {
/* validate file mapping requests */
struct address_space *mapping;
/* files must support mmap */ /* files must support mmap */
if (!file->f_op->mmap) if (!file->f_op->mmap)
return -ENODEV; return -ENODEV;
@ -957,28 +954,22 @@ static int validate_mmap_request(struct file *file,
* - we support chardevs that provide their own "memory" * - we support chardevs that provide their own "memory"
* - we support files/blockdevs that are memory backed * - we support files/blockdevs that are memory backed
*/ */
mapping = file->f_mapping; if (file->f_op->mmap_capabilities) {
if (!mapping) capabilities = file->f_op->mmap_capabilities(file);
mapping = file_inode(file)->i_mapping; } else {
capabilities = 0;
if (mapping && mapping->backing_dev_info)
capabilities = mapping->backing_dev_info->capabilities;
if (!capabilities) {
/* no explicit capabilities set, so assume some /* no explicit capabilities set, so assume some
* defaults */ * defaults */
switch (file_inode(file)->i_mode & S_IFMT) { switch (file_inode(file)->i_mode & S_IFMT) {
case S_IFREG: case S_IFREG:
case S_IFBLK: case S_IFBLK:
capabilities = BDI_CAP_MAP_COPY; capabilities = NOMMU_MAP_COPY;
break; break;
case S_IFCHR: case S_IFCHR:
capabilities = capabilities =
BDI_CAP_MAP_DIRECT | NOMMU_MAP_DIRECT |
BDI_CAP_READ_MAP | NOMMU_MAP_READ |
BDI_CAP_WRITE_MAP; NOMMU_MAP_WRITE;
break; break;
default: default:
@ -989,9 +980,9 @@ static int validate_mmap_request(struct file *file,
/* eliminate any capabilities that we can't support on this /* eliminate any capabilities that we can't support on this
* device */ * device */
if (!file->f_op->get_unmapped_area) if (!file->f_op->get_unmapped_area)
capabilities &= ~BDI_CAP_MAP_DIRECT; capabilities &= ~NOMMU_MAP_DIRECT;
if (!file->f_op->read) if (!file->f_op->read)
capabilities &= ~BDI_CAP_MAP_COPY; capabilities &= ~NOMMU_MAP_COPY;
/* The file shall have been opened with read permission. */ /* The file shall have been opened with read permission. */
if (!(file->f_mode & FMODE_READ)) if (!(file->f_mode & FMODE_READ))
@ -1010,29 +1001,29 @@ static int validate_mmap_request(struct file *file,
if (locks_verify_locked(file)) if (locks_verify_locked(file))
return -EAGAIN; return -EAGAIN;
if (!(capabilities & BDI_CAP_MAP_DIRECT)) if (!(capabilities & NOMMU_MAP_DIRECT))
return -ENODEV; return -ENODEV;
/* we mustn't privatise shared mappings */ /* we mustn't privatise shared mappings */
capabilities &= ~BDI_CAP_MAP_COPY; capabilities &= ~NOMMU_MAP_COPY;
} else { } else {
/* we're going to read the file into private memory we /* we're going to read the file into private memory we
* allocate */ * allocate */
if (!(capabilities & BDI_CAP_MAP_COPY)) if (!(capabilities & NOMMU_MAP_COPY))
return -ENODEV; return -ENODEV;
/* we don't permit a private writable mapping to be /* we don't permit a private writable mapping to be
* shared with the backing device */ * shared with the backing device */
if (prot & PROT_WRITE) if (prot & PROT_WRITE)
capabilities &= ~BDI_CAP_MAP_DIRECT; capabilities &= ~NOMMU_MAP_DIRECT;
} }
if (capabilities & BDI_CAP_MAP_DIRECT) { if (capabilities & NOMMU_MAP_DIRECT) {
if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
) { ) {
capabilities &= ~BDI_CAP_MAP_DIRECT; capabilities &= ~NOMMU_MAP_DIRECT;
if (flags & MAP_SHARED) { if (flags & MAP_SHARED) {
printk(KERN_WARNING printk(KERN_WARNING
"MAP_SHARED not completely supported on !MMU\n"); "MAP_SHARED not completely supported on !MMU\n");
@ -1049,21 +1040,21 @@ static int validate_mmap_request(struct file *file,
} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
/* handle implication of PROT_EXEC by PROT_READ */ /* handle implication of PROT_EXEC by PROT_READ */
if (current->personality & READ_IMPLIES_EXEC) { if (current->personality & READ_IMPLIES_EXEC) {
if (capabilities & BDI_CAP_EXEC_MAP) if (capabilities & NOMMU_MAP_EXEC)
prot |= PROT_EXEC; prot |= PROT_EXEC;
} }
} else if ((prot & PROT_READ) && } else if ((prot & PROT_READ) &&
(prot & PROT_EXEC) && (prot & PROT_EXEC) &&
!(capabilities & BDI_CAP_EXEC_MAP) !(capabilities & NOMMU_MAP_EXEC)
) { ) {
/* backing file is not executable, try to copy */ /* backing file is not executable, try to copy */
capabilities &= ~BDI_CAP_MAP_DIRECT; capabilities &= ~NOMMU_MAP_DIRECT;
} }
} else { } else {
/* anonymous mappings are always memory backed and can be /* anonymous mappings are always memory backed and can be
* privately mapped * privately mapped
*/ */
capabilities = BDI_CAP_MAP_COPY; capabilities = NOMMU_MAP_COPY;
/* handle PROT_EXEC implication by PROT_READ */ /* handle PROT_EXEC implication by PROT_READ */
if ((prot & PROT_READ) && if ((prot & PROT_READ) &&
@ -1095,7 +1086,7 @@ static unsigned long determine_vm_flags(struct file *file,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
/* vm_flags |= mm->def_flags; */ /* vm_flags |= mm->def_flags; */
if (!(capabilities & BDI_CAP_MAP_DIRECT)) { if (!(capabilities & NOMMU_MAP_DIRECT)) {
/* attempt to share read-only copies of mapped file chunks */ /* attempt to share read-only copies of mapped file chunks */
vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (file && !(prot & PROT_WRITE)) if (file && !(prot & PROT_WRITE))
@ -1104,7 +1095,7 @@ static unsigned long determine_vm_flags(struct file *file,
/* overlay a shareable mapping on the backing device or inode /* overlay a shareable mapping on the backing device or inode
* if possible - used for chardevs, ramfs/tmpfs/shmfs and * if possible - used for chardevs, ramfs/tmpfs/shmfs and
* romfs/cramfs */ * romfs/cramfs */
vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
if (flags & MAP_SHARED) if (flags & MAP_SHARED)
vm_flags |= VM_SHARED; vm_flags |= VM_SHARED;
} }
@ -1157,7 +1148,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
* shared mappings on devices or memory * shared mappings on devices or memory
* - VM_MAYSHARE will be set if it may attempt to share * - VM_MAYSHARE will be set if it may attempt to share
*/ */
if (capabilities & BDI_CAP_MAP_DIRECT) { if (capabilities & NOMMU_MAP_DIRECT) {
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
if (ret == 0) { if (ret == 0) {
/* shouldn't return success if we're not sharing */ /* shouldn't return success if we're not sharing */
@ -1346,7 +1337,7 @@ unsigned long do_mmap_pgoff(struct file *file,
if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
!(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
/* new mapping is not a subset of the region */ /* new mapping is not a subset of the region */
if (!(capabilities & BDI_CAP_MAP_DIRECT)) if (!(capabilities & NOMMU_MAP_DIRECT))
goto sharing_violation; goto sharing_violation;
continue; continue;
} }
@ -1385,7 +1376,7 @@ unsigned long do_mmap_pgoff(struct file *file,
* - this is the hook for quasi-memory character devices to * - this is the hook for quasi-memory character devices to
* tell us the location of a shared mapping * tell us the location of a shared mapping
*/ */
if (capabilities & BDI_CAP_MAP_DIRECT) { if (capabilities & NOMMU_MAP_DIRECT) {
addr = file->f_op->get_unmapped_area(file, addr, len, addr = file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags); pgoff, flags);
if (IS_ERR_VALUE(addr)) { if (IS_ERR_VALUE(addr)) {
@ -1397,10 +1388,10 @@ unsigned long do_mmap_pgoff(struct file *file,
* the mapping so we'll have to attempt to copy * the mapping so we'll have to attempt to copy
* it */ * it */
ret = -ENODEV; ret = -ENODEV;
if (!(capabilities & BDI_CAP_MAP_COPY)) if (!(capabilities & NOMMU_MAP_COPY))
goto error_just_free; goto error_just_free;
capabilities &= ~BDI_CAP_MAP_DIRECT; capabilities &= ~NOMMU_MAP_DIRECT;
} else { } else {
vma->vm_start = region->vm_start = addr; vma->vm_start = region->vm_start = addr;
vma->vm_end = region->vm_end = addr + len; vma->vm_end = region->vm_end = addr + len;
@ -1411,7 +1402,7 @@ unsigned long do_mmap_pgoff(struct file *file,
vma->vm_region = region; vma->vm_region = region;
/* set up the mapping /* set up the mapping
* - the region is filled in if BDI_CAP_MAP_DIRECT is still set * - the region is filled in if NOMMU_MAP_DIRECT is still set
*/ */
if (file && vma->vm_flags & VM_SHARED) if (file && vma->vm_flags & VM_SHARED)
ret = do_mmap_shared_file(vma); ret = do_mmap_shared_file(vma);

Просмотреть файл

@ -726,16 +726,15 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
return prot | PROT_EXEC; return prot | PROT_EXEC;
/* /*
* ditto if it's not on noexec mount, except that on !MMU we need * ditto if it's not on noexec mount, except that on !MMU we need
* BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
*/ */
if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
unsigned long caps = 0; if (file->f_op->mmap_capabilities) {
struct address_space *mapping = file->f_mapping; unsigned caps = file->f_op->mmap_capabilities(file);
if (mapping && mapping->backing_dev_info) if (!(caps & NOMMU_MAP_EXEC))
caps = mapping->backing_dev_info->capabilities; return prot;
if (!(caps & BDI_CAP_EXEC_MAP)) }
return prot;
#endif #endif
return prot | PROT_EXEC; return prot | PROT_EXEC;
} }