drm/vmwgfx: Fix stale file descriptors on failed usercopy

A failing usercopy of the fence_rep object will lead to a stale entry in
the file descriptor table as put_unused_fd() won't release it. This
enables userland to refer to a dangling 'file' object through that still
valid file descriptor, leading to all kinds of use-after-free
exploitation scenarios.

Fix this by deferring the call to fd_install() until after the usercopy
has succeeded.

Fixes: c906965dee ("drm/vmwgfx: Add export fence to file descriptor support")
Signed-off-by: Mathias Krause <minipli@grsecurity.net>
Signed-off-by: Zack Rusin <zackr@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mathias Krause 2022-01-27 18:34:19 +10:00 коммит произвёл Linus Torvalds
Родитель 626b2dda76
Коммит a0f90c8815
4 изменённых файлов: 21 добавлений и 21 удалений

Просмотреть файл

@ -1140,15 +1140,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence, struct vmw_fence_obj **p_fence,
uint32_t *p_handle); uint32_t *p_handle);
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, struct vmw_fpriv *vmw_fp,
int ret, int ret,
struct drm_vmw_fence_rep __user struct drm_vmw_fence_rep __user
*user_fence_rep, *user_fence_rep,
struct vmw_fence_obj *fence, struct vmw_fence_obj *fence,
uint32_t fence_handle, uint32_t fence_handle,
int32_t out_fence_fd, int32_t out_fence_fd);
struct sync_file *sync_file);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/** /**

Просмотреть файл

@ -3879,17 +3879,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* Also if copying fails, user-space will be unable to signal the fence object * Also if copying fails, user-space will be unable to signal the fence object
* so we wait for it immediately, and then unreference the user-space reference. * so we wait for it immediately, and then unreference the user-space reference.
*/ */
void int
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, int ret, struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, uint32_t fence_handle, struct vmw_fence_obj *fence, uint32_t fence_handle,
int32_t out_fence_fd, struct sync_file *sync_file) int32_t out_fence_fd)
{ {
struct drm_vmw_fence_rep fence_rep; struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL) if (user_fence_rep == NULL)
return; return 0;
memset(&fence_rep, 0, sizeof(fence_rep)); memset(&fence_rep, 0, sizeof(fence_rep));
@ -3917,19 +3917,13 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* handle. * handle.
*/ */
if (unlikely(ret != 0) && (fence_rep.error == 0)) { if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file)
fput(sync_file->file);
if (fence_rep.fd != -1) {
put_unused_fd(fence_rep.fd);
fence_rep.fd = -1;
}
ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
VMW_DEBUG_USER("Fence copy error. Syncing.\n"); VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false, (void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT); VMW_FENCE_WAIT_TIMEOUT);
} }
return ret ? -EFAULT : 0;
} }
/** /**
@ -4266,16 +4260,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
(void) vmw_fence_obj_wait(fence, false, false, (void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT); VMW_FENCE_WAIT_TIMEOUT);
}
}
ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle, out_fence_fd);
if (sync_file) {
if (ret) {
/* usercopy of fence failed, put the file object */
fput(sync_file->file);
put_unused_fd(out_fence_fd);
} else { } else {
/* Link the fence with the FD created earlier */ /* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file); fd_install(out_fence_fd, sync_file->file);
} }
} }
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle, out_fence_fd,
sync_file);
/* Don't unreference when handing fence out */ /* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) { if (unlikely(out_fence != NULL)) {
*out_fence = fence; *out_fence = fence;
@ -4293,7 +4294,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
*/ */
vmw_validation_unref_lists(&val_ctx); vmw_validation_unref_lists(&val_ctx);
return 0; return ret;
out_unlock_binding: out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);

Просмотреть файл

@ -1128,7 +1128,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
} }
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
handle, -1, NULL); handle, -1);
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return 0; return 0;
out_no_create: out_no_create:

Просмотреть файл

@ -2501,7 +2501,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
if (file_priv) if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence, ret, user_fence_rep, fence,
handle, -1, NULL); handle, -1);
if (out_fence) if (out_fence)
*out_fence = fence; *out_fence = fence;
else else