drm/virtio: add virtio_gpu_object_array & helpers

Some helper functions to manage an array of gem objects.

v9: use dma_resv_lock_interruptible.
v6:
 - add ticket to struct virtio_gpu_object_array.
 - add virtio_gpu_array_{lock,unlock}_resv helpers.
 - add virtio_gpu_array_add_fence helper.
v5: some small optimizations (Chia-I Wu).
v4: make them virtio-private instead of generic helpers.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-8-kraxel@redhat.com
This commit is contained in:
Gerd Hoffmann 2019-08-29 12:32:50 +02:00
Родитель cde14fd4a6
Коммит 98abe21d07
2 изменённых файлов: 110 добавлений и 0 удалений

Просмотреть файл

@ -84,6 +84,12 @@ struct virtio_gpu_object {
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
u32 nents, total;
struct drm_gem_object *objs[];
};
struct virtio_gpu_vbuffer;
struct virtio_gpu_device;
@ -251,6 +257,17 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
struct drm_gem_object *obj);
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
struct dma_fence *fence);
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);

Просмотреть файл

@ -171,3 +171,96 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
qobj->hw_res_handle);
virtio_gpu_object_unreserve(qobj);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
objs = kmalloc(size, GFP_KERNEL);
if (!objs)
return NULL;
objs->nents = 0;
objs->total = nents;
return objs;
}
static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
{
kfree(objs);
}
struct virtio_gpu_object_array*
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
{
struct virtio_gpu_object_array *objs;
u32 i;
objs = virtio_gpu_array_alloc(nents);
if (!objs)
return NULL;
for (i = 0; i < nents; i++) {
objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
if (!objs->objs[i]) {
objs->nents = i;
virtio_gpu_array_put_free(objs);
return NULL;
}
}
objs->nents = i;
return objs;
}
void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
struct drm_gem_object *obj)
{
if (WARN_ON_ONCE(objs->nents == objs->total))
return;
drm_gem_object_get(obj);
objs->objs[objs->nents] = obj;
objs->nents++;
}
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
{
int ret;
if (objs->nents == 1) {
ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
} else {
ret = drm_gem_lock_reservations(objs->objs, objs->nents,
&objs->ticket);
}
return ret;
}
void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
{
if (objs->nents == 1) {
dma_resv_unlock(objs->objs[0]->resv);
} else {
drm_gem_unlock_reservations(objs->objs, objs->nents,
&objs->ticket);
}
}
void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
struct dma_fence *fence)
{
int i;
for (i = 0; i < objs->nents; i++)
dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
}
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
{
u32 i;
for (i = 0; i < objs->nents; i++)
drm_gem_object_put_unlocked(objs->objs[i]);
virtio_gpu_array_free(objs);
}