msm: camera: memmgr: Avoid TOCTOU buffer access on multiple use of same fd
Fd is a user-accessible value, referring it multiple times leads to TOCTOU issues. Dma_buf can be freed after the 1st use of fd and userspace can create another dma_buf but with same fd. In such scenario, during 2nd use of fd, we may get a different dma_buf with different length. To avoid this, we can use same dma_buf instead of retrieving it twice using same fd. In this change FD is accessed only once in syscall. CRs-Fixed: 3159446 Change-Id: I00eb6dd3d798165f5c6c0bd59feabe80a68592b1 Signed-off-by: Yash Upadhyay <quic_yupadhya@quicinc.com>
This commit is contained in:
Родитель
d35e7e899b
Коммит
336163ddec
|
@ -577,7 +577,7 @@ static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
|
|||
static int cam_mem_util_map_hw_va(uint32_t flags,
|
||||
int32_t *mmu_hdls,
|
||||
int32_t num_hdls,
|
||||
int fd,
|
||||
int fd, struct dma_buf *dmabuf,
|
||||
dma_addr_t *hw_vaddr,
|
||||
size_t *len,
|
||||
enum cam_smmu_region_id region,
|
||||
|
@ -606,7 +606,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
|
|||
fd,
|
||||
dir,
|
||||
hw_vaddr,
|
||||
len);
|
||||
len,
|
||||
dmabuf);
|
||||
|
||||
if (rc < 0) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
|
@ -624,7 +625,8 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
|
|||
(dma_addr_t *)hw_vaddr,
|
||||
len,
|
||||
region,
|
||||
is_internal);
|
||||
is_internal,
|
||||
dmabuf);
|
||||
|
||||
if (rc < 0) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
|
@ -716,6 +718,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
|
|||
cmd->mmu_hdls,
|
||||
cmd->num_hdl,
|
||||
fd,
|
||||
dmabuf,
|
||||
&hw_vaddr,
|
||||
&len,
|
||||
region,
|
||||
|
@ -856,6 +859,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
|
|||
cmd->mmu_hdls,
|
||||
cmd->num_hdl,
|
||||
cmd->fd,
|
||||
dmabuf,
|
||||
&hw_vaddr,
|
||||
&len,
|
||||
CAM_SMMU_REGION_IO,
|
||||
|
@ -1109,8 +1113,6 @@ static int cam_mem_util_unmap(int32_t idx,
|
|||
if (cam_mem_util_unmap_hw_va(idx, region, client))
|
||||
CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
|
||||
tbl.bufq[idx].dma_buf);
|
||||
if (client == CAM_SMMU_MAPPING_KERNEL)
|
||||
tbl.bufq[idx].dma_buf = NULL;
|
||||
}
|
||||
|
||||
mutex_lock(&tbl.m_lock);
|
||||
|
@ -1126,8 +1128,7 @@ static int cam_mem_util_unmap(int32_t idx,
|
|||
tbl.bufq[idx].is_imported,
|
||||
tbl.bufq[idx].dma_buf);
|
||||
|
||||
if (tbl.bufq[idx].dma_buf)
|
||||
dma_buf_put(tbl.bufq[idx].dma_buf);
|
||||
dma_buf_put(tbl.bufq[idx].dma_buf);
|
||||
|
||||
tbl.bufq[idx].fd = -1;
|
||||
tbl.bufq[idx].dma_buf = NULL;
|
||||
|
|
|
@ -262,7 +262,7 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
|
|||
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
|
||||
bool dis_delayed_unmap, enum dma_data_direction dma_dir,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
enum cam_smmu_region_id region_id, bool is_internal);
|
||||
enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf);
|
||||
|
||||
static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
|
||||
struct dma_buf *buf, enum dma_data_direction dma_dir,
|
||||
|
@ -2028,7 +2028,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
|
|||
if (IS_ERR_OR_NULL(attach)) {
|
||||
rc = PTR_ERR(attach);
|
||||
CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
|
||||
goto err_put;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (region_id == CAM_SMMU_REGION_SHARED) {
|
||||
|
@ -2175,8 +2175,6 @@ err_unmap_sg:
|
|||
dma_buf_unmap_attachment(attach, table, dma_dir);
|
||||
err_detach:
|
||||
dma_buf_detach(buf, attach);
|
||||
err_put:
|
||||
dma_buf_put(buf);
|
||||
err_out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -2185,14 +2183,10 @@ err_out:
|
|||
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
|
||||
bool dis_delayed_unmap, enum dma_data_direction dma_dir,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
enum cam_smmu_region_id region_id, bool is_internal)
|
||||
enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf)
|
||||
{
|
||||
int rc = -1;
|
||||
struct cam_dma_buff_info *mapping_info = NULL;
|
||||
struct dma_buf *buf = NULL;
|
||||
|
||||
/* returns the dma_buf structure related to an fd */
|
||||
buf = dma_buf_get(ion_fd);
|
||||
|
||||
rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
|
||||
region_id, dis_delayed_unmap, &mapping_info);
|
||||
|
@ -2316,7 +2310,6 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
|
|||
|
||||
|
||||
dma_buf_detach(mapping_info->buf, mapping_info->attach);
|
||||
dma_buf_put(mapping_info->buf);
|
||||
|
||||
if (iommu_cb_set.map_profile_enable) {
|
||||
CAM_GET_TIMESTAMP(ts2);
|
||||
|
@ -2817,10 +2810,9 @@ handle_err:
|
|||
|
||||
static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
|
||||
enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
|
||||
size_t *len_ptr)
|
||||
size_t *len_ptr, struct dma_buf *dmabuf)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dma_buf *dmabuf = NULL;
|
||||
struct dma_buf_attachment *attach = NULL;
|
||||
struct sg_table *table = NULL;
|
||||
struct cam_sec_buff_info *mapping_info;
|
||||
|
@ -2829,15 +2821,6 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
|
|||
*paddr_ptr = (dma_addr_t)NULL;
|
||||
*len_ptr = (size_t)0;
|
||||
|
||||
dmabuf = dma_buf_get(ion_fd);
|
||||
if (IS_ERR_OR_NULL((void *)(dmabuf))) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"Error: dma buf get failed, idx=%d, ion_fd=%d",
|
||||
idx, ion_fd);
|
||||
rc = PTR_ERR(dmabuf);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* ion_phys() is deprecated. call dma_buf_attach() and
|
||||
* dma_buf_map_attachment() to get the buffer's physical
|
||||
|
@ -2849,7 +2832,7 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
|
|||
"Error: dma buf attach failed, idx=%d, ion_fd=%d",
|
||||
idx, ion_fd);
|
||||
rc = PTR_ERR(attach);
|
||||
goto err_put;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
@ -2895,15 +2878,14 @@ err_unmap_sg:
|
|||
dma_buf_unmap_attachment(attach, table, dma_dir);
|
||||
err_detach:
|
||||
dma_buf_detach(dmabuf, attach);
|
||||
err_put:
|
||||
dma_buf_put(dmabuf);
|
||||
err_out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cam_smmu_map_stage2_iova(int handle,
|
||||
int ion_fd, enum cam_smmu_map_dir dir,
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr)
|
||||
dma_addr_t *paddr_ptr, size_t *len_ptr,
|
||||
struct dma_buf *dmabuf)
|
||||
{
|
||||
int idx, rc;
|
||||
enum dma_data_direction dma_dir;
|
||||
|
@ -2962,7 +2944,7 @@ int cam_smmu_map_stage2_iova(int handle,
|
|||
goto get_addr_end;
|
||||
}
|
||||
rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
|
||||
paddr_ptr, len_ptr);
|
||||
paddr_ptr, len_ptr, dmabuf);
|
||||
if (rc < 0) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d",
|
||||
|
@ -2998,7 +2980,6 @@ static int cam_smmu_secure_unmap_buf_and_remove_from_list(
|
|||
dma_buf_unmap_attachment(mapping_info->attach,
|
||||
mapping_info->table, mapping_info->dir);
|
||||
dma_buf_detach(mapping_info->buf, mapping_info->attach);
|
||||
dma_buf_put(mapping_info->buf);
|
||||
mapping_info->buf = NULL;
|
||||
|
||||
list_del_init(&mapping_info->list);
|
||||
|
@ -3116,7 +3097,7 @@ static int cam_smmu_map_iova_validate_params(int handle,
|
|||
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
|
||||
enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
|
||||
size_t *len_ptr, enum cam_smmu_region_id region_id,
|
||||
bool is_internal)
|
||||
bool is_internal, struct dma_buf *dmabuf)
|
||||
{
|
||||
int idx, rc = 0;
|
||||
struct timespec64 *ts = NULL;
|
||||
|
@ -3180,7 +3161,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
|
|||
|
||||
rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd,
|
||||
dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr,
|
||||
region_id, is_internal);
|
||||
region_id, is_internal, dmabuf);
|
||||
if (rc < 0) {
|
||||
CAM_ERR(CAM_SMMU,
|
||||
"mapping or add list fail cb:%s idx=%d, fd=%d, region=%d, rc=%d",
|
||||
|
|
|
@ -128,7 +128,7 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
|
|||
*/
|
||||
int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
|
||||
enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
|
||||
enum cam_smmu_region_id region_id, bool is_internal);
|
||||
enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf);
|
||||
|
||||
/**
|
||||
* @brief : Maps kernel space IOVA for calling driver
|
||||
|
@ -304,7 +304,7 @@ int cam_smmu_put_iova(int handle, int ion_fd);
|
|||
*/
|
||||
int cam_smmu_map_stage2_iova(int handle,
|
||||
int ion_fd, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr,
|
||||
size_t *len_ptr);
|
||||
size_t *len_ptr, struct dma_buf *dmabuf);
|
||||
|
||||
/**
|
||||
* @brief Unmaps secure memopry for SMMU handle
|
||||
|
|
Загрузка…
Ссылка в новой задаче