misc: fastrpc: add support for FASTRPC_IOCTL_MEM_MAP/UNMAP

Add support for IOCTL requests to map and unmap on DSP based on map
flags.

Signed-off-by: Jeya R <jeyr@codeaurora.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Link: https://lore.kernel.org/r/20220214161002.6831-3-srinivas.kandagatla@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jeya R 2022-02-14 16:09:52 +00:00 коммит произвёл Greg Kroah-Hartman
Родитель 965602eabb
Коммит 5c1b97c7d7
2 изменённых файлов: 205 добавлений и 0 удалений

Просмотреть файл

@ -72,6 +72,8 @@
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
#define FASTRPC_RMID_INIT_MEM_MAP 10
#define FASTRPC_RMID_INIT_MEM_UNMAP 11
/* Protection Domain(PD) ids */
#define AUDIO_PD (0) /* also GUEST_OS PD? */
@ -108,12 +110,29 @@ struct fastrpc_mmap_req_msg {
s32 num;
};
struct fastrpc_mem_map_req_msg {
s32 pgid;
s32 fd;
s32 offset;
u32 flags;
u64 vaddrin;
s32 num;
s32 data_len;
};
struct fastrpc_munmap_req_msg {
s32 pgid;
u64 vaddr;
u64 size;
};
struct fastrpc_mem_unmap_req_msg {
s32 pgid;
s32 fd;
u64 vaddrin;
u64 len;
};
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
@ -170,6 +189,7 @@ struct fastrpc_map {
u64 size;
void *va;
u64 len;
u64 raddr;
struct kref refcount;
};
@ -1500,6 +1520,134 @@ err_invoke:
return err;
}
static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
struct fastrpc_map *map = NULL, *m;
struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
int err = 0;
u32 sc;
struct device *dev = fl->sctx->dev;
spin_lock(&fl->lock);
list_for_each_entry_safe(map, m, &fl->maps, node) {
if ((req->fd < 0 || map->fd == req->fd) && (map->raddr == req->vaddr))
break;
map = NULL;
}
spin_unlock(&fl->lock);
if (!map) {
dev_err(dev, "map not in list\n");
return -EINVAL;
}
req_msg.pgid = fl->tgid;
req_msg.len = map->len;
req_msg.vaddrin = map->raddr;
req_msg.fd = map->fd;
args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
fastrpc_map_put(map);
if (err)
dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
return err;
}
static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_mem_unmap req;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
return fastrpc_req_mem_unmap_impl(fl, &req);
}
static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
struct fastrpc_mem_map_req_msg req_msg = { 0 };
struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
struct fastrpc_mem_unmap req_unmap = { 0 };
struct fastrpc_phy_page pages = { 0 };
struct fastrpc_mem_map req;
struct device *dev = fl->sctx->dev;
struct fastrpc_map *map = NULL;
int err;
u32 sc;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
/* create SMMU mapping */
err = fastrpc_map_create(fl, req.fd, req.length, &map);
if (err) {
dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
return err;
}
req_msg.pgid = fl->tgid;
req_msg.fd = req.fd;
req_msg.offset = req.offset;
req_msg.vaddrin = req.vaddrin;
map->va = (void *) (uintptr_t) req.vaddrin;
req_msg.flags = req.flags;
req_msg.num = sizeof(pages);
req_msg.data_len = 0;
args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
pages.addr = map->phys;
pages.size = map->size;
args[1].ptr = (u64) (uintptr_t) &pages;
args[1].length = sizeof(pages);
args[2].ptr = (u64) (uintptr_t) &pages;
args[2].length = 0;
args[3].ptr = (u64) (uintptr_t) &rsp_msg;
args[3].length = sizeof(rsp_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
if (err) {
dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
req.fd, req.vaddrin, map->size);
goto err_invoke;
}
/* update the buffer to be able to deallocate the memory on the DSP */
map->raddr = rsp_msg.vaddr;
/* let the client know the address to use */
req.vaddrout = rsp_msg.vaddr;
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
/* unmap the memory and release the buffer */
req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
req_unmap.length = map->size;
fastrpc_req_mem_unmap_impl(fl, &req_unmap);
return -EFAULT;
}
return 0;
err_invoke:
fastrpc_map_put(map);
return err;
}
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@ -1529,6 +1677,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_MUNMAP:
err = fastrpc_req_munmap(fl, argp);
break;
case FASTRPC_IOCTL_MEM_MAP:
err = fastrpc_req_mem_map(fl, argp);
break;
case FASTRPC_IOCTL_MEM_UNMAP:
err = fastrpc_req_mem_unmap(fl, argp);
break;
default:
err = -ENOTTY;
break;

Просмотреть файл

@ -13,6 +13,37 @@
#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
#define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8)
#define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map)
#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap)
/**
* enum fastrpc_map_flags - control flags for mapping memory on DSP user process
* @FASTRPC_MAP_STATIC: Map memory pages with RW- permission and CACHE WRITEBACK.
* The driver is responsible for cache maintenance when passed
* the buffer to FastRPC calls. Same virtual address will be
* assigned for subsequent FastRPC calls.
* @FASTRPC_MAP_RESERVED: Reserved
* @FASTRPC_MAP_FD: Map memory pages with RW- permission and CACHE WRITEBACK.
* Mapping tagged with a file descriptor. User is responsible for
* CPU and DSP cache maintenance for the buffer. Get virtual address
* of buffer on DSP using HAP_mmap_get() and HAP_mmap_put() APIs.
* @FASTRPC_MAP_FD_DELAYED: Mapping delayed until user call HAP_mmap() and HAP_munmap()
* functions on DSP. It is useful to map a buffer with cache modes
* other than default modes. User is responsible for CPU and DSP
* cache maintenance for the buffer.
* @FASTRPC_MAP_FD_NOMAP: This flag is used to skip CPU mapping,
* otherwise behaves similar to FASTRPC_MAP_FD_DELAYED flag.
* @FASTRPC_MAP_MAX: max count for flags
*
*/
enum fastrpc_map_flags {
FASTRPC_MAP_STATIC = 0,
FASTRPC_MAP_RESERVED,
FASTRPC_MAP_FD = 2,
FASTRPC_MAP_FD_DELAYED,
FASTRPC_MAP_FD_NOMAP = 16,
FASTRPC_MAP_MAX,
};
struct fastrpc_invoke_args {
__u64 ptr;
@ -49,9 +80,29 @@ struct fastrpc_req_mmap {
__u64 vaddrout; /* dsp virtual address */
};
struct fastrpc_mem_map {
__s32 version;
__s32 fd; /* fd */
__s32 offset; /* buffer offset */
__u32 flags; /* flags defined in enum fastrpc_map_flags */
__u64 vaddrin; /* buffer virtual address */
__u64 length; /* buffer length */
__u64 vaddrout; /* [out] remote virtual address */
__s32 attrs; /* buffer attributes used for SMMU mapping */
__s32 reserved[4];
};
struct fastrpc_req_munmap {
__u64 vaddrout; /* address to unmap */
__u64 size; /* size */
};
struct fastrpc_mem_unmap {
__s32 vesion;
__s32 fd; /* fd */
__u64 vaddr; /* remote process (dsp) virtual address */
__u64 length; /* buffer size */
__s32 reserved[5];
};
#endif /* __QCOM_FASTRPC_H__ */