staging: vme: mmap() support for vme_user

We also make sure that user won't be able to reconfigure the window while it is
mmap'ed.

Signed-off-by: Dmitry Kalinkin <dmitry.kalinkin@gmail.com>
Cc: Martyn Welch <martyn.welch@ge.com>
Cc: Igor Alekseev <igor.alekseev@itep.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Dmitry Kalinkin 2015-02-26 18:53:10 +03:00 коммит произвёл Greg Kroah-Hartman
Родитель 0cd189a42d
Коммит c74a804f11
3 изменённых файлов: 112 добавлений и 0 удалений

Просмотреть файл

@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/device.h>
@ -99,6 +100,7 @@ struct image_desc {
struct device *device; /* Sysfs device */
struct vme_resource *resource; /* VME resource */
int users; /* Number of current users */
int mmap_count; /* Number of current mmap's */
};
static struct image_desc image[VME_DEVS];
@ -134,6 +136,10 @@ static ssize_t vme_user_write(struct file *, const char __user *, size_t,
loff_t *);
static loff_t vme_user_llseek(struct file *, loff_t, int);
static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
static int vme_user_mmap(struct file *file, struct vm_area_struct *vma);
static void vme_user_vm_open(struct vm_area_struct *vma);
static void vme_user_vm_close(struct vm_area_struct *vma);
static int vme_user_match(struct vme_dev *);
static int vme_user_probe(struct vme_dev *);
@ -147,6 +153,17 @@ static const struct file_operations vme_user_fops = {
.llseek = vme_user_llseek,
.unlocked_ioctl = vme_user_unlocked_ioctl,
.compat_ioctl = vme_user_unlocked_ioctl,
.mmap = vme_user_mmap,
};
struct vme_user_vma_priv {
unsigned int minor;
atomic_t refcnt;
};
static const struct vm_operations_struct vme_user_vm_ops = {
.open = vme_user_vm_open,
.close = vme_user_vm_close,
};
@ -488,6 +505,11 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
case VME_SET_MASTER:
if (image[minor].mmap_count != 0) {
pr_warn("Can't adjust mapped window\n");
return -EPERM;
}
copied = copy_from_user(&master, argp, sizeof(master));
if (copied != 0) {
pr_warn("Partial copy from userspace\n");
@ -564,6 +586,69 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
static void vme_user_vm_open(struct vm_area_struct *vma)
{
struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
atomic_inc(&vma_priv->refcnt);
}
static void vme_user_vm_close(struct vm_area_struct *vma)
{
struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
unsigned int minor = vma_priv->minor;
if (!atomic_dec_and_test(&vma_priv->refcnt))
return;
mutex_lock(&image[minor].mutex);
image[minor].mmap_count--;
mutex_unlock(&image[minor].mutex);
kfree(vma_priv);
}
static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
{
int err;
struct vme_user_vma_priv *vma_priv;
mutex_lock(&image[minor].mutex);
err = vme_master_mmap(image[minor].resource, vma);
if (err) {
mutex_unlock(&image[minor].mutex);
return err;
}
vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL);
if (vma_priv == NULL) {
mutex_unlock(&image[minor].mutex);
return -ENOMEM;
}
vma_priv->minor = minor;
atomic_set(&vma_priv->refcnt, 1);
vma->vm_ops = &vme_user_vm_ops;
vma->vm_private_data = vma_priv;
image[minor].mmap_count++;
mutex_unlock(&image[minor].mutex);
return 0;
}
static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned int minor = MINOR(file_inode(file)->i_rdev);
if (type[minor] == MASTER_MINOR)
return vme_user_master_mmap(minor, vma);
return -ENODEV;
}
/*
* Unallocate a previously allocated buffer

Просмотреть файл

@ -609,6 +609,32 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
}
EXPORT_SYMBOL(vme_master_rmw);
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
{
struct vme_master_resource *image;
phys_addr_t phys_addr;
unsigned long vma_size;
if (resource->type != VME_MASTER) {
pr_err("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
vma_size = vma->vm_end - vma->vm_start;
if (phys_addr + vma_size > image->bus_resource.end + 1) {
pr_err("Map size cannot exceed the window size\n");
return -EFAULT;
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(vme_master_mmap);
void vme_master_free(struct vme_resource *resource)
{
struct vme_master_resource *master_image;

Просмотреть файл

@ -137,6 +137,7 @@ ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
unsigned int, loff_t);
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
void vme_master_free(struct vme_resource *);
struct vme_resource *vme_dma_request(struct vme_dev *, u32);