Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
Mostly code reorganizations and optimizations for vmwgfx. - Move TTM code that's only used by vmwgfx to vmwgfx - Break out the vmwgfx buffer- and resource validation code to a separate source file - Get rid of a number of atomic operations during command buffer validation. From: Thomas Hellstrom <thellstrom@vmware.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180928131157.2810-1-thellstrom@vmware.com
This commit is contained in:
Коммит
d04a836ea7
|
@ -4,8 +4,8 @@
|
|||
|
||||
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
|
||||
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
|
||||
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
|
||||
ttm_bo_manager.o ttm_page_alloc_dma.o
|
||||
ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
|
||||
ttm_page_alloc_dma.o
|
||||
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
||||
|
|
|
@ -409,8 +409,7 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
|
|||
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
|
||||
if (likely(node)) {
|
||||
bo = container_of(node, struct ttm_buffer_object, vma_node);
|
||||
if (!kref_get_unless_zero(&bo->kref))
|
||||
bo = NULL;
|
||||
bo = ttm_bo_get_unless_zero(bo);
|
||||
}
|
||||
|
||||
drm_vma_offset_unlock_lookup(&bdev->vma_manager);
|
||||
|
|
|
@ -7,6 +7,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
|||
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
|
||||
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
|
||||
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
|
||||
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o
|
||||
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
|
||||
vmwgfx_validation.o \
|
||||
ttm_object.o ttm_lock.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
||||
|
|
|
@ -29,13 +29,13 @@
|
|||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_lock.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/module.h>
|
||||
#include "ttm_lock.h"
|
||||
#include "ttm_object.h"
|
||||
|
||||
#define TTM_WRITE_LOCK_PENDING (1 << 0)
|
||||
#define TTM_VT_LOCK_PENDING (1 << 1)
|
||||
|
@ -52,7 +52,6 @@ void ttm_lock_init(struct ttm_lock *lock)
|
|||
lock->kill_takers = false;
|
||||
lock->signal = SIGKILL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_lock_init);
|
||||
|
||||
void ttm_read_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
|
@ -61,7 +60,6 @@ void ttm_read_unlock(struct ttm_lock *lock)
|
|||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_read_unlock);
|
||||
|
||||
static bool __ttm_read_lock(struct ttm_lock *lock)
|
||||
{
|
||||
|
@ -92,7 +90,6 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
|
|||
wait_event(lock->queue, __ttm_read_lock(lock));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_read_lock);
|
||||
|
||||
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
|
||||
{
|
||||
|
@ -144,7 +141,6 @@ void ttm_write_unlock(struct ttm_lock *lock)
|
|||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_write_unlock);
|
||||
|
||||
static bool __ttm_write_lock(struct ttm_lock *lock)
|
||||
{
|
||||
|
@ -185,7 +181,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_write_lock);
|
||||
|
||||
static int __ttm_vt_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
|
@ -262,14 +257,12 @@ int ttm_vt_lock(struct ttm_lock *lock,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_vt_lock);
|
||||
|
||||
int ttm_vt_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
return ttm_ref_object_base_unref(lock->vt_holder,
|
||||
lock->base.hash.key, TTM_REF_USAGE);
|
||||
lock->base.handle, TTM_REF_USAGE);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_vt_unlock);
|
||||
|
||||
void ttm_suspend_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
|
@ -278,7 +271,6 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
|
|||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_suspend_unlock);
|
||||
|
||||
static bool __ttm_suspend_lock(struct ttm_lock *lock)
|
||||
{
|
||||
|
@ -300,4 +292,3 @@ void ttm_suspend_lock(struct ttm_lock *lock)
|
|||
{
|
||||
wait_event(lock->queue, __ttm_suspend_lock(lock));
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_suspend_lock);
|
|
@ -59,13 +59,12 @@
|
|||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/atomic.h>
|
||||
#include "ttm_object.h"
|
||||
|
||||
struct ttm_object_file {
|
||||
struct ttm_object_device *tdev;
|
||||
|
@ -95,6 +94,7 @@ struct ttm_object_device {
|
|||
struct dma_buf_ops ops;
|
||||
void (*dmabuf_release)(struct dma_buf *dma_buf);
|
||||
size_t dma_buf_size;
|
||||
struct idr idr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -172,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
|
|||
base->ref_obj_release = ref_obj_release;
|
||||
base->object_type = object_type;
|
||||
kref_init(&base->refcount);
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&tdev->object_lock);
|
||||
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
|
||||
&base->hash,
|
||||
(unsigned long)base, 31, 0, 0);
|
||||
ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
idr_preload_end();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
base->handle = ret;
|
||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
@ -189,12 +190,10 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
|
|||
return 0;
|
||||
out_err1:
|
||||
spin_lock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
|
||||
idr_remove(&tdev->idr, base->handle);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
out_err0:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_init);
|
||||
|
||||
static void ttm_release_base(struct kref *kref)
|
||||
{
|
||||
|
@ -203,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
|
|||
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||
|
||||
spin_lock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
|
||||
idr_remove(&tdev->idr, base->handle);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
|
||||
/*
|
||||
|
@ -225,7 +224,41 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
|
|||
|
||||
kref_put(&base->refcount, ttm_release_base);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_unref);
|
||||
|
||||
/**
|
||||
* ttm_base_object_noref_lookup - look up a base object without reference
|
||||
* @tfile: The struct ttm_object_file the object is registered with.
|
||||
* @key: The object handle.
|
||||
*
|
||||
* This function looks up a ttm base object and returns a pointer to it
|
||||
* without refcounting the pointer. The returned pointer is only valid
|
||||
* until ttm_base_object_noref_release() is called, and the object
|
||||
* pointed to by the returned pointer may be doomed. Any persistent usage
|
||||
* of the object requires a refcount to be taken using kref_get_unless_zero().
|
||||
* Iff this function returns successfully it needs to be paired with
|
||||
* ttm_base_object_noref_release() and no sleeping- or scheduling functions
|
||||
* may be called inbetween these function callse.
|
||||
*
|
||||
* Return: A pointer to the object if successful or NULL otherwise.
|
||||
*/
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
|
||||
{
|
||||
struct drm_hash_item *hash;
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(ht, key, &hash);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__release(RCU);
|
||||
return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_noref_lookup);
|
||||
|
||||
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t key)
|
||||
|
@ -247,29 +280,21 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
|||
|
||||
return base;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_lookup);
|
||||
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
|
||||
{
|
||||
struct ttm_base_object *base = NULL;
|
||||
struct drm_hash_item *hash;
|
||||
struct drm_open_hash *ht = &tdev->object_hash;
|
||||
int ret;
|
||||
struct ttm_base_object *base;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(ht, key, &hash);
|
||||
base = idr_find(&tdev->idr, key);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
||||
if (!kref_get_unless_zero(&base->refcount))
|
||||
base = NULL;
|
||||
}
|
||||
if (base && !kref_get_unless_zero(&base->refcount))
|
||||
base = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return base;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
|
||||
|
||||
/**
|
||||
* ttm_ref_object_exists - Check whether a caller has a valid ref object
|
||||
|
@ -289,7 +314,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
|
|||
struct ttm_ref_object *ref;
|
||||
|
||||
rcu_read_lock();
|
||||
if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
|
||||
if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
|
||||
goto out_false;
|
||||
|
||||
/*
|
||||
|
@ -315,7 +340,6 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
|
|||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_exists);
|
||||
|
||||
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
|
@ -340,7 +364,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
|
||||
while (ret == -EINVAL) {
|
||||
rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
|
||||
ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
|
||||
|
||||
if (ret == 0) {
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
|
@ -364,7 +388,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ref->hash.key = base->hash.key;
|
||||
ref->hash.key = base->handle;
|
||||
ref->obj = base;
|
||||
ref->tfile = tfile;
|
||||
ref->ref_type = ref_type;
|
||||
|
@ -391,9 +415,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_add);
|
||||
|
||||
static void ttm_ref_object_release(struct kref *kref)
|
||||
static void __releases(tfile->lock) __acquires(tfile->lock)
|
||||
ttm_ref_object_release(struct kref *kref)
|
||||
{
|
||||
struct ttm_ref_object *ref =
|
||||
container_of(kref, struct ttm_ref_object, kref);
|
||||
|
@ -435,7 +459,6 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
|||
spin_unlock(&tfile->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_base_unref);
|
||||
|
||||
void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
|
@ -464,7 +487,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
|||
|
||||
ttm_object_file_unref(&tfile);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_file_release);
|
||||
|
||||
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
||||
unsigned int hash_order)
|
||||
|
@ -499,7 +521,6 @@ out_err:
|
|||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_file_init);
|
||||
|
||||
struct ttm_object_device *
|
||||
ttm_object_device_init(struct ttm_mem_global *mem_glob,
|
||||
|
@ -519,6 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
|
|||
if (ret != 0)
|
||||
goto out_no_object_hash;
|
||||
|
||||
idr_init(&tdev->idr);
|
||||
tdev->ops = *ops;
|
||||
tdev->dmabuf_release = tdev->ops.release;
|
||||
tdev->ops.release = ttm_prime_dmabuf_release;
|
||||
|
@ -530,7 +552,6 @@ out_no_object_hash:
|
|||
kfree(tdev);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_device_init);
|
||||
|
||||
void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
||||
{
|
||||
|
@ -538,11 +559,12 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
|||
|
||||
*p_tdev = NULL;
|
||||
|
||||
WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
|
||||
idr_destroy(&tdev->idr);
|
||||
drm_ht_remove(&tdev->object_hash);
|
||||
|
||||
kfree(tdev);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_device_release);
|
||||
|
||||
/**
|
||||
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
|
||||
|
@ -641,14 +663,13 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
|
|||
|
||||
prime = (struct ttm_prime_object *) dma_buf->priv;
|
||||
base = &prime->base;
|
||||
*handle = base->hash.key;
|
||||
*handle = base->handle;
|
||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
|
||||
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
|
||||
|
||||
/**
|
||||
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
|
||||
|
@ -739,7 +760,6 @@ out_unref:
|
|||
ttm_base_object_unref(&base);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
|
||||
|
||||
/**
|
||||
* ttm_prime_object_init - Initialize a ttm_prime_object
|
||||
|
@ -772,4 +792,3 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
|
|||
ttm_prime_refcount_release,
|
||||
ref_obj_release);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_prime_object_init);
|
|
@ -42,8 +42,7 @@
|
|||
#include <linux/kref.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#include "ttm_memory.h"
|
||||
#include <drm/ttm/ttm_memory.h>
|
||||
|
||||
/**
|
||||
* enum ttm_ref_type
|
||||
|
@ -125,14 +124,14 @@ struct ttm_object_device;
|
|||
|
||||
struct ttm_base_object {
|
||||
struct rcu_head rhead;
|
||||
struct drm_hash_item hash;
|
||||
enum ttm_object_type object_type;
|
||||
bool shareable;
|
||||
struct ttm_object_file *tfile;
|
||||
struct kref refcount;
|
||||
void (*refcount_release) (struct ttm_base_object **base);
|
||||
void (*ref_obj_release) (struct ttm_base_object *base,
|
||||
enum ttm_ref_type ref_type);
|
||||
u32 handle;
|
||||
enum ttm_object_type object_type;
|
||||
u32 shareable;
|
||||
};
|
||||
|
||||
|
||||
|
@ -351,4 +350,26 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
|
|||
|
||||
#define ttm_prime_object_kfree(__obj, __prime) \
|
||||
kfree_rcu(__obj, __prime.base.rhead)
|
||||
|
||||
/*
|
||||
* Extra memory required by the base object's idr storage, which is allocated
|
||||
* separately from the base object itself. We estimate an on-average 128 bytes
|
||||
* per idr.
|
||||
*/
|
||||
#define TTM_OBJ_EXTRA_SIZE 128
|
||||
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_noref_release - release a base object pointer looked up
|
||||
* without reference
|
||||
*
|
||||
* Releases a base object pointer looked up with ttm_base_object_noref_lookup().
|
||||
*/
|
||||
static inline void ttm_base_object_noref_release(void)
|
||||
{
|
||||
__acquire(RCU);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "drm/ttm/ttm_object.h"
|
||||
#include "ttm_object.h"
|
||||
|
||||
|
||||
/**
|
||||
|
@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
|
|||
struct_size = backend_size +
|
||||
ttm_round_pot(sizeof(struct vmw_buffer_object));
|
||||
user_struct_size = backend_size +
|
||||
ttm_round_pot(sizeof(struct vmw_user_buffer_object));
|
||||
ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
|
||||
TTM_OBJ_EXTRA_SIZE;
|
||||
}
|
||||
|
||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
|
@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
|
|||
*p_base = &user_bo->prime.base;
|
||||
kref_get(&(*p_base)->refcount);
|
||||
}
|
||||
*handle = user_bo->prime.base.hash.key;
|
||||
*handle = user_bo->prime.base.handle;
|
||||
|
||||
out_no_base_object:
|
||||
return ret;
|
||||
|
@ -920,6 +921,47 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
|
||||
* @tfile: The TTM object file the handle is registered with.
|
||||
* @handle: The user buffer object handle.
|
||||
*
|
||||
* This function looks up a struct vmw_user_bo and returns a pointer to the
|
||||
* struct vmw_buffer_object it derives from without refcounting the pointer.
|
||||
* The returned pointer is only valid until vmw_user_bo_noref_release() is
|
||||
* called, and the object pointed to by the returned pointer may be doomed.
|
||||
* Any persistent usage of the object requires a refcount to be taken using
|
||||
* ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
|
||||
* needs to be paired with vmw_user_bo_noref_release() and no sleeping-
|
||||
* or scheduling functions may be called inbetween these function calls.
|
||||
*
|
||||
* Return: A struct vmw_buffer_object pointer if successful or negative
|
||||
* error pointer on failure.
|
||||
*/
|
||||
struct vmw_buffer_object *
|
||||
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
|
||||
{
|
||||
struct vmw_user_buffer_object *vmw_user_bo;
|
||||
struct ttm_base_object *base;
|
||||
|
||||
base = ttm_base_object_noref_lookup(tfile, handle);
|
||||
if (!base) {
|
||||
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
|
||||
(unsigned long)handle);
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
|
||||
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
|
||||
ttm_base_object_noref_release();
|
||||
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
|
||||
(unsigned long)handle);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
|
||||
prime.base);
|
||||
return &vmw_user_bo->vbo;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_bo_reference - Open a handle to a vmw user buffer object.
|
||||
|
@ -940,7 +982,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
|
|||
|
||||
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
|
||||
|
||||
*handle = user_bo->prime.base.hash.key;
|
||||
*handle = user_bo->prime.base.handle;
|
||||
return ttm_ref_object_add(tfile, &user_bo->prime.base,
|
||||
TTM_REF_USAGE, NULL, false);
|
||||
}
|
||||
|
|
|
@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
|
|||
{
|
||||
struct vmw_cmdbuf_header *cur = man->cur;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&man->cur_mutex));
|
||||
lockdep_assert_held_once(&man->cur_mutex);
|
||||
|
||||
if (!cur)
|
||||
return;
|
||||
|
@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
|
|||
{
|
||||
struct vmw_cmdbuf_header *cur = man->cur;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&man->cur_mutex));
|
||||
lockdep_assert_held_once(&man->cur_mutex);
|
||||
|
||||
WARN_ON(size > cur->reserved);
|
||||
man->cur_pos += size;
|
||||
|
|
|
@ -89,8 +89,7 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
|
|||
if (unlikely(ret != 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return vmw_resource_reference
|
||||
(drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
|
||||
return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
res->hw_destroy = vmw_hw_context_destroy;
|
||||
return 0;
|
||||
|
||||
out_cotables:
|
||||
|
@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
|
|||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
vmw_fifo_resource_inc(dev_priv);
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
res->hw_destroy = vmw_hw_context_destroy;
|
||||
return 0;
|
||||
|
||||
out_early:
|
||||
|
@ -757,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||
* by maximum number_of contexts anyway.
|
||||
*/
|
||||
|
||||
if (unlikely(vmw_user_context_size == 0))
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
|
||||
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
|
||||
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
|
||||
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -809,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
arg->cid = ctx->base.hash.key;
|
||||
arg->cid = ctx->base.handle;
|
||||
out_err:
|
||||
vmw_resource_unreference(&res);
|
||||
out_unlock:
|
||||
|
@ -867,9 +861,8 @@ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
|
|||
if (cotable_type >= SVGA_COTABLE_DX10_MAX)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return vmw_resource_reference
|
||||
(container_of(ctx, struct vmw_user_context, res)->
|
||||
cotables[cotable_type]);
|
||||
return container_of(ctx, struct vmw_user_context, res)->
|
||||
cotables[cotable_type];
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
|||
vcotbl->type = type;
|
||||
vcotbl->ctx = ctx;
|
||||
|
||||
vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
|
||||
vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
|
||||
|
||||
return &vcotbl->res;
|
||||
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_binding.h"
|
||||
#include "ttm_object.h"
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
|
@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
mutex_init(&dev_priv->binding_mutex);
|
||||
mutex_init(&dev_priv->requested_layout_mutex);
|
||||
mutex_init(&dev_priv->global_kms_state_mutex);
|
||||
rwlock_init(&dev_priv->resource_lock);
|
||||
ttm_lock_init(&dev_priv->reservation_sem);
|
||||
spin_lock_init(&dev_priv->resource_lock);
|
||||
spin_lock_init(&dev_priv->hw_lock);
|
||||
spin_lock_init(&dev_priv->waiter_lock);
|
||||
spin_lock_init(&dev_priv->cap_lock);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#ifndef _VMWGFX_DRV_H_
|
||||
#define _VMWGFX_DRV_H_
|
||||
|
||||
#include "vmwgfx_validation.h"
|
||||
#include "vmwgfx_reg.h"
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
|
@ -35,11 +36,11 @@
|
|||
#include <drm/drm_auth.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
#include <drm/ttm/ttm_lock.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include "vmwgfx_fence.h"
|
||||
#include "ttm_object.h"
|
||||
#include "ttm_lock.h"
|
||||
#include <linux/sync_file.h>
|
||||
|
||||
#define VMWGFX_DRIVER_NAME "vmwgfx"
|
||||
|
@ -112,21 +113,49 @@ struct vmw_validate_buffer {
|
|||
};
|
||||
|
||||
struct vmw_res_func;
|
||||
|
||||
|
||||
/**
|
||||
* struct vmw-resource - base class for hardware resources
|
||||
*
|
||||
* @kref: For refcounting.
|
||||
* @dev_priv: Pointer to the device private for this resource. Immutable.
|
||||
* @id: Device id. Protected by @dev_priv::resource_lock.
|
||||
* @backup_size: Backup buffer size. Immutable.
|
||||
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
|
||||
* by resource reserved.
|
||||
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
|
||||
* Protecte by resource reserved.
|
||||
* @backup: The backup buffer if any. Protected by resource reserved.
|
||||
* @backup_offset: Offset into the backup buffer if any. Protected by resource
|
||||
* reserved. Note that only a few resource types can have a @backup_offset
|
||||
* different from zero.
|
||||
* @pin_count: The pin count for this resource. A pinned resource has a
|
||||
* pin-count greater than zero. It is not on the resource LRU lists and its
|
||||
* backup buffer is pinned. Hence it can't be evicted.
|
||||
* @func: Method vtable for this resource. Immutable.
|
||||
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
|
||||
* @mob_head: List head for the MOB backup list. Protected by @backup reserved.
|
||||
* @binding_head: List head for the context binding list. Protected by
|
||||
* the @dev_priv::binding_mutex
|
||||
* @res_free: The resource destructor.
|
||||
* @hw_destroy: Callback to destroy the resource on the device, as part of
|
||||
* resource destruction.
|
||||
*/
|
||||
struct vmw_resource {
|
||||
struct kref kref;
|
||||
struct vmw_private *dev_priv;
|
||||
int id;
|
||||
bool avail;
|
||||
unsigned long backup_size;
|
||||
bool res_dirty; /* Protected by backup buffer reserved */
|
||||
bool backup_dirty; /* Protected by backup buffer reserved */
|
||||
bool res_dirty;
|
||||
bool backup_dirty;
|
||||
struct vmw_buffer_object *backup;
|
||||
unsigned long backup_offset;
|
||||
unsigned long pin_count; /* Protected by resource reserved */
|
||||
unsigned long pin_count;
|
||||
const struct vmw_res_func *func;
|
||||
struct list_head lru_head; /* Protected by the resource lock */
|
||||
struct list_head mob_head; /* Protected by @backup reserved */
|
||||
struct list_head binding_head; /* Protected by binding_mutex */
|
||||
struct list_head lru_head;
|
||||
struct list_head mob_head;
|
||||
struct list_head binding_head;
|
||||
void (*res_free) (struct vmw_resource *res);
|
||||
void (*hw_destroy) (struct vmw_resource *res);
|
||||
};
|
||||
|
@ -204,29 +233,24 @@ struct vmw_fifo_state {
|
|||
bool dx;
|
||||
};
|
||||
|
||||
struct vmw_relocation {
|
||||
SVGAMobId *mob_loc;
|
||||
SVGAGuestPtr *location;
|
||||
uint32_t index;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_res_cache_entry - resource information cache entry
|
||||
*
|
||||
* @handle: User-space handle of a resource.
|
||||
* @res: Non-ref-counted pointer to the resource.
|
||||
* @valid_handle: Whether the @handle member is valid.
|
||||
* @valid: Whether the entry is valid, which also implies that the execbuf
|
||||
* code holds a reference to the resource, and it's placed on the
|
||||
* validation list.
|
||||
* @handle: User-space handle of a resource.
|
||||
* @res: Non-ref-counted pointer to the resource.
|
||||
*
|
||||
* Used to avoid frequent repeated user-space handle lookups of the
|
||||
* same resource.
|
||||
*/
|
||||
struct vmw_res_cache_entry {
|
||||
bool valid;
|
||||
uint32_t handle;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource_val_node *node;
|
||||
void *private;
|
||||
unsigned short valid_handle;
|
||||
unsigned short valid;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -291,35 +315,63 @@ enum vmw_display_unit_type {
|
|||
vmw_du_screen_target
|
||||
};
|
||||
|
||||
struct vmw_validation_context;
|
||||
struct vmw_ctx_validation_info;
|
||||
|
||||
/**
|
||||
* struct vmw_sw_context - Command submission context
|
||||
* @res_ht: Pointer hash table used to find validation duplicates
|
||||
* @kernel: Whether the command buffer originates from kernel code rather
|
||||
* than from user-space
|
||||
* @fp: If @kernel is false, points to the file of the client. Otherwise
|
||||
* NULL
|
||||
* @cmd_bounce: Command bounce buffer used for command validation before
|
||||
* copying to fifo space
|
||||
* @cmd_bounce_size: Current command bounce buffer size
|
||||
* @cur_query_bo: Current buffer object used as query result buffer
|
||||
* @bo_relocations: List of buffer object relocations
|
||||
* @res_relocations: List of resource relocations
|
||||
* @buf_start: Pointer to start of memory where command validation takes
|
||||
* place
|
||||
* @res_cache: Cache of recently looked up resources
|
||||
* @last_query_ctx: Last context that submitted a query
|
||||
* @needs_post_query_barrier: Whether a query barrier is needed after
|
||||
* command submission
|
||||
* @staged_bindings: Cached per-context binding tracker
|
||||
* @staged_bindings_inuse: Whether the cached per-context binding tracker
|
||||
* is in use
|
||||
* @staged_cmd_res: List of staged command buffer managed resources in this
|
||||
* command buffer
|
||||
* @ctx_list: List of context resources referenced in this command buffer
|
||||
* @dx_ctx_node: Validation metadata of the current DX context
|
||||
* @dx_query_mob: The MOB used for DX queries
|
||||
* @dx_query_ctx: The DX context used for the last DX query
|
||||
* @man: Pointer to the command buffer managed resource manager
|
||||
* @ctx: The validation context
|
||||
*/
|
||||
struct vmw_sw_context{
|
||||
struct drm_open_hash res_ht;
|
||||
bool res_ht_initialized;
|
||||
bool kernel; /**< is the called made from the kernel */
|
||||
bool kernel;
|
||||
struct vmw_fpriv *fp;
|
||||
struct list_head validate_nodes;
|
||||
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
|
||||
uint32_t cur_reloc;
|
||||
struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
|
||||
uint32_t cur_val_buf;
|
||||
uint32_t *cmd_bounce;
|
||||
uint32_t cmd_bounce_size;
|
||||
struct list_head resource_list;
|
||||
struct list_head ctx_resource_list; /* For contexts and cotables */
|
||||
struct vmw_buffer_object *cur_query_bo;
|
||||
struct list_head bo_relocations;
|
||||
struct list_head res_relocations;
|
||||
uint32_t *buf_start;
|
||||
struct vmw_res_cache_entry res_cache[vmw_res_max];
|
||||
struct vmw_resource *last_query_ctx;
|
||||
bool needs_post_query_barrier;
|
||||
struct vmw_resource *error_resource;
|
||||
struct vmw_ctx_binding_state *staged_bindings;
|
||||
bool staged_bindings_inuse;
|
||||
struct list_head staged_cmd_res;
|
||||
struct vmw_resource_val_node *dx_ctx_node;
|
||||
struct list_head ctx_list;
|
||||
struct vmw_ctx_validation_info *dx_ctx_node;
|
||||
struct vmw_buffer_object *dx_query_mob;
|
||||
struct vmw_resource *dx_query_ctx;
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
struct vmw_validation_context *ctx;
|
||||
};
|
||||
|
||||
struct vmw_legacy_display;
|
||||
|
@ -444,7 +496,7 @@ struct vmw_private {
|
|||
* Context and surface management.
|
||||
*/
|
||||
|
||||
rwlock_t resource_lock;
|
||||
spinlock_t resource_lock;
|
||||
struct idr res_idr[vmw_res_max];
|
||||
/*
|
||||
* Block lastclose from racing with firstopen.
|
||||
|
@ -628,7 +680,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
|||
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
||||
extern struct vmw_resource *
|
||||
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
|
||||
extern int vmw_resource_validate(struct vmw_resource *res);
|
||||
extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
|
||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
|
||||
bool no_backup);
|
||||
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
||||
|
@ -643,6 +695,12 @@ extern int vmw_user_resource_lookup_handle(
|
|||
uint32_t handle,
|
||||
const struct vmw_user_resource_conv *converter,
|
||||
struct vmw_resource **p_res);
|
||||
extern struct vmw_resource *
|
||||
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
const struct vmw_user_resource_conv *
|
||||
converter);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -661,6 +719,15 @@ extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
|
|||
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
|
||||
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
|
||||
|
||||
/**
|
||||
* vmw_user_resource_noref_release - release a user resource pointer looked up
|
||||
* without reference
|
||||
*/
|
||||
static inline void vmw_user_resource_noref_release(void)
|
||||
{
|
||||
ttm_base_object_noref_release();
|
||||
}
|
||||
|
||||
/**
|
||||
* Buffer object helper functions - vmwgfx_bo.c
|
||||
*/
|
||||
|
@ -717,6 +784,18 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
|
|||
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem);
|
||||
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
|
||||
extern struct vmw_buffer_object *
|
||||
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
|
||||
|
||||
/**
|
||||
* vmw_user_bo_noref_release - release a buffer object pointer looked up
|
||||
* without reference
|
||||
*/
|
||||
static inline void vmw_user_bo_noref_release(void)
|
||||
{
|
||||
ttm_base_object_noref_release();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Misc Ioctl functionality - vmwgfx_ioctl.c
|
||||
|
@ -864,10 +943,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
|||
uint32_t fence_handle,
|
||||
int32_t out_fence_fd,
|
||||
struct sync_file *sync_file);
|
||||
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool validate_as_mob);
|
||||
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
|
||||
|
||||
/**
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
|
|||
INIT_LIST_HEAD(&fman->cleanup_list);
|
||||
INIT_WORK(&fman->work, &vmw_fence_work_func);
|
||||
fman->fifo_down = true;
|
||||
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
|
||||
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
|
||||
TTM_OBJ_EXTRA_SIZE;
|
||||
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
|
||||
fman->event_fence_action_size =
|
||||
ttm_round_pot(sizeof(struct vmw_event_fence_action));
|
||||
|
@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
|
|||
}
|
||||
|
||||
*p_fence = &ufence->fence;
|
||||
*p_handle = ufence->base.hash.key;
|
||||
*p_handle = ufence->base.handle;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
|
@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
|||
"object.\n");
|
||||
goto out_no_ref_obj;
|
||||
}
|
||||
handle = base->hash.key;
|
||||
handle = base->handle;
|
||||
}
|
||||
ttm_base_object_unref(&base);
|
||||
}
|
||||
|
|
|
@ -2575,88 +2575,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
|
||||
* command submission.
|
||||
*
|
||||
* @dev_priv. Pointer to a device private structure.
|
||||
* @buf: The buffer object
|
||||
* @interruptible: Whether to perform waits as interruptible.
|
||||
* @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
|
||||
* The buffer will be validated as a GMR. Already pinned buffers will not be
|
||||
* validated.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure, -ERESTARTSYS if
|
||||
* interrupted by a signal.
|
||||
* vmw_kms_helper_validation_finish - Helper for post KMS command submission
|
||||
* cleanup and fencing
|
||||
* @dev_priv: Pointer to the device-private struct
|
||||
* @file_priv: Pointer identifying the client when user-space fencing is used
|
||||
* @ctx: Pointer to the validation context
|
||||
* @out_fence: If non-NULL, returned refcounted fence-pointer
|
||||
* @user_fence_rep: If non-NULL, pointer to user-space address area
|
||||
* in which to copy user-space fence info
|
||||
*/
|
||||
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
bool interruptible,
|
||||
bool validate_as_mob,
|
||||
bool for_cpu_blit)
|
||||
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = interruptible,
|
||||
.no_wait_gpu = false};
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
||||
ttm_bo_reserve(bo, false, false, NULL);
|
||||
if (for_cpu_blit)
|
||||
ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
|
||||
else
|
||||
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
|
||||
validate_as_mob);
|
||||
if (ret)
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_buffer_revert - Undo the actions of
|
||||
* vmw_kms_helper_buffer_prepare.
|
||||
*
|
||||
* @res: Pointer to the buffer object.
|
||||
*
|
||||
* Helper to be used if an error forces the caller to undo the actions of
|
||||
* vmw_kms_helper_buffer_prepare.
|
||||
*/
|
||||
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
|
||||
{
|
||||
if (buf)
|
||||
ttm_bo_unreserve(&buf->base);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
|
||||
* kms command submission.
|
||||
*
|
||||
* @dev_priv: Pointer to a device private structure.
|
||||
* @file_priv: Pointer to a struct drm_file representing the caller's
|
||||
* connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
|
||||
* if non-NULL, @user_fence_rep must be non-NULL.
|
||||
* @buf: The buffer object.
|
||||
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
|
||||
* ref-counted fence pointer is returned here.
|
||||
* @user_fence_rep: Optional pointer to a user-space provided struct
|
||||
* drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
|
||||
* function copies fence data to user-space in a fail-safe manner.
|
||||
*/
|
||||
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep)
|
||||
{
|
||||
struct vmw_fence_obj *fence;
|
||||
struct vmw_fence_obj *fence = NULL;
|
||||
uint32_t handle;
|
||||
int ret;
|
||||
|
||||
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
|
||||
file_priv ? &handle : NULL);
|
||||
if (buf)
|
||||
vmw_bo_fence_single(&buf->base, fence);
|
||||
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
|
||||
out_fence)
|
||||
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
|
||||
file_priv ? &handle : NULL);
|
||||
vmw_validation_done(ctx, fence);
|
||||
if (file_priv)
|
||||
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
|
||||
ret, user_fence_rep, fence,
|
||||
|
@ -2665,106 +2608,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
|||
*out_fence = fence;
|
||||
else
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_resource_revert - Undo the actions of
|
||||
* vmw_kms_helper_resource_prepare.
|
||||
*
|
||||
* @res: Pointer to the resource. Typically a surface.
|
||||
*
|
||||
* Helper to be used if an error forces the caller to undo the actions of
|
||||
* vmw_kms_helper_resource_prepare.
|
||||
*/
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
vmw_bo_unreference(&ctx->buf);
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_resource_prepare - Reserve and validate a resource before
|
||||
* command submission.
|
||||
*
|
||||
* @res: Pointer to the resource. Typically a surface.
|
||||
* @interruptible: Whether to perform waits as interruptible.
|
||||
*
|
||||
* Reserves and validates also the backup buffer if a guest-backed resource.
|
||||
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
|
||||
* interrupted by a signal.
|
||||
*/
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ctx->buf = NULL;
|
||||
ctx->res = res;
|
||||
|
||||
if (interruptible)
|
||||
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
|
||||
else
|
||||
mutex_lock(&res->dev_priv->cmdbuf_mutex);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
ret = vmw_resource_reserve(res, interruptible, false);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (res->backup) {
|
||||
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
|
||||
interruptible,
|
||||
res->dev_priv->has_mob,
|
||||
false);
|
||||
if (ret)
|
||||
goto out_unreserve;
|
||||
|
||||
ctx->buf = vmw_bo_reference(res->backup);
|
||||
}
|
||||
ret = vmw_resource_validate(res);
|
||||
if (ret)
|
||||
goto out_revert;
|
||||
return 0;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(ctx->buf);
|
||||
out_unreserve:
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
out_unlock:
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_kms_helper_resource_finish - Unreserve and fence a resource after
|
||||
* kms command submission.
|
||||
*
|
||||
* @res: Pointer to the resource. Typically a surface.
|
||||
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
|
||||
* ref-counted fence pointer is returned here.
|
||||
*/
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence)
|
||||
{
|
||||
struct vmw_resource *res = ctx->res;
|
||||
|
||||
if (ctx->buf || out_fence)
|
||||
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
|
||||
out_fence, NULL);
|
||||
|
||||
vmw_bo_unreference(&ctx->buf);
|
||||
vmw_resource_unreserve(res, false, NULL, 0);
|
||||
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
|||
int increment,
|
||||
struct vmw_kms_dirty *dirty);
|
||||
|
||||
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
bool interruptible,
|
||||
bool validate_as_mob,
|
||||
bool for_cpu_blit);
|
||||
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
|
||||
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_buffer_object *buf,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
|
||||
bool interruptible,
|
||||
struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
|
||||
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj **out_fence,
|
||||
struct drm_vmw_fence_rep __user *
|
||||
user_fence_rep);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
*/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "ttm_object.h"
|
||||
#include <linux/dma-buf.h>
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
|
||||
/*
|
||||
* DMA-BUF attach- and mapping methods. No need to implement
|
||||
|
|
|
@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
|
|||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
if (res->id != -1)
|
||||
idr_remove(idr, res->id);
|
||||
res->id = -1;
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
static void vmw_resource_release(struct kref *kref)
|
||||
|
@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
|
|||
int id;
|
||||
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
res->avail = false;
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
list_del_init(&res->lru_head);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
if (res->backup) {
|
||||
struct ttm_buffer_object *bo = &res->backup->base;
|
||||
|
||||
|
@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
|
|||
else
|
||||
kfree(res);
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
if (id != -1)
|
||||
idr_remove(idr, id);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
void vmw_resource_unreference(struct vmw_resource **p_res)
|
||||
|
@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
|
|||
BUG_ON(res->id != -1);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
|
||||
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
|
||||
if (ret >= 0)
|
||||
res->id = ret;
|
||||
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
idr_preload_end();
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
|
|||
kref_init(&res->kref);
|
||||
res->hw_destroy = NULL;
|
||||
res->res_free = res_free;
|
||||
res->avail = false;
|
||||
res->dev_priv = dev_priv;
|
||||
res->func = func;
|
||||
INIT_LIST_HEAD(&res->lru_head);
|
||||
|
@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
|
|||
return vmw_resource_alloc_id(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_resource_activate
|
||||
*
|
||||
* @res: Pointer to the newly created resource
|
||||
* @hw_destroy: Destroy function. NULL if none.
|
||||
*
|
||||
* Activate a resource after the hardware has been made aware of it.
|
||||
* Set tye destroy function to @destroy. Typically this frees the
|
||||
* resource and destroys the hardware resources associated with it.
|
||||
* Activate basically means that the function vmw_resource_lookup will
|
||||
* find it.
|
||||
*/
|
||||
void vmw_resource_activate(struct vmw_resource *res,
|
||||
void (*hw_destroy) (struct vmw_resource *))
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
res->avail = true;
|
||||
res->hw_destroy = hw_destroy;
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_resource_lookup_handle - lookup a struct resource from a
|
||||
|
@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
|
|||
goto out_bad_resource;
|
||||
|
||||
res = converter->base_obj_to_res(base);
|
||||
|
||||
read_lock(&dev_priv->resource_lock);
|
||||
if (!res->avail || res->res_free != converter->res_free) {
|
||||
read_unlock(&dev_priv->resource_lock);
|
||||
goto out_bad_resource;
|
||||
}
|
||||
|
||||
kref_get(&res->kref);
|
||||
read_unlock(&dev_priv->resource_lock);
|
||||
|
||||
*p_res = res;
|
||||
ret = 0;
|
||||
|
@ -262,6 +230,41 @@ out_bad_resource:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_resource_lookup_handle - lookup a struct resource from a
|
||||
* TTM user-space handle and perform basic type checks
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct
|
||||
* @tfile: Pointer to a struct ttm_object_file identifying the caller
|
||||
* @handle: The TTM user-space handle
|
||||
* @converter: Pointer to an object describing the resource type
|
||||
* @p_res: On successful return the location pointed to will contain
|
||||
* a pointer to a refcounted struct vmw_resource.
|
||||
*
|
||||
* If the handle can't be found or is associated with an incorrect resource
|
||||
* type, -EINVAL will be returned.
|
||||
*/
|
||||
struct vmw_resource *
|
||||
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
const struct vmw_user_resource_conv
|
||||
*converter)
|
||||
{
|
||||
struct ttm_base_object *base;
|
||||
|
||||
base = ttm_base_object_noref_lookup(tfile, handle);
|
||||
if (!base)
|
||||
return ERR_PTR(-ESRCH);
|
||||
|
||||
if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
|
||||
ttm_base_object_noref_release();
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return converter->base_obj_to_res(base);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function that looks either a surface or bo.
|
||||
*
|
||||
|
@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
|
|||
if (!res->func->may_evict || res->id == -1 || res->pin_count)
|
||||
return;
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
list_add_tail(&res->lru_head,
|
||||
&res->dev_priv->res_lru[res->func->res_type]);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
|
|||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
int ret;
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
list_del_init(&res->lru_head);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
|
||||
if (res->func->needs_backup && res->backup == NULL &&
|
||||
!no_backup) {
|
||||
|
@ -587,15 +590,18 @@ out_no_unbind:
|
|||
/**
|
||||
* vmw_resource_validate - Make a resource up-to-date and visible
|
||||
* to the device.
|
||||
*
|
||||
* @res: The resource to make visible to the device.
|
||||
* @res: The resource to make visible to the device.
|
||||
* @intr: Perform waits interruptible if possible.
|
||||
*
|
||||
* On succesful return, any backup DMA buffer pointed to by @res->backup will
|
||||
* be reserved and validated.
|
||||
* On hardware resource shortage, this function will repeatedly evict
|
||||
* resources of the same type until the validation succeeds.
|
||||
*
|
||||
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
|
||||
* on failure.
|
||||
*/
|
||||
int vmw_resource_validate(struct vmw_resource *res)
|
||||
int vmw_resource_validate(struct vmw_resource *res, bool intr)
|
||||
{
|
||||
int ret;
|
||||
struct vmw_resource *evict_res;
|
||||
|
@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||
if (likely(ret != -EBUSY))
|
||||
break;
|
||||
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
if (list_empty(lru_list) || !res->func->may_evict) {
|
||||
DRM_ERROR("Out of device device resources "
|
||||
"for %s.\n", res->func->type_name);
|
||||
ret = -EBUSY;
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res)
|
|||
lru_head));
|
||||
list_del_init(&evict_res->lru_head);
|
||||
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
|
||||
/* Trylock backup buffers with a NULL ticket. */
|
||||
ret = vmw_resource_do_evict(NULL, evict_res, true);
|
||||
ret = vmw_resource_do_evict(NULL, evict_res, intr);
|
||||
if (unlikely(ret != 0)) {
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
list_add_tail(&evict_res->lru_head, lru_list);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
if (ret == -ERESTARTSYS ||
|
||||
++err_count > VMW_RES_EVICT_ERR_COUNT) {
|
||||
vmw_resource_unreference(&evict_res);
|
||||
|
@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
|||
struct ww_acquire_ctx ticket;
|
||||
|
||||
do {
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
|
||||
if (list_empty(lru_list))
|
||||
goto out_unlock;
|
||||
|
@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
|||
list_first_entry(lru_list, struct vmw_resource,
|
||||
lru_head));
|
||||
list_del_init(&evict_res->lru_head);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
|
||||
/* Wait lock backup buffers with a ticket. */
|
||||
ret = vmw_resource_do_evict(&ticket, evict_res, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
write_lock(&dev_priv->resource_lock);
|
||||
spin_lock(&dev_priv->resource_lock);
|
||||
list_add_tail(&evict_res->lru_head, lru_list);
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
|
||||
vmw_resource_unreference(&evict_res);
|
||||
return;
|
||||
|
@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
|
|||
} while (1);
|
||||
|
||||
out_unlock:
|
||||
write_unlock(&dev_priv->resource_lock);
|
||||
spin_unlock(&dev_priv->resource_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
|
|||
/* Do we really need to pin the MOB as well? */
|
||||
vmw_bo_pin_reserved(vbo, true);
|
||||
}
|
||||
ret = vmw_resource_validate(res);
|
||||
ret = vmw_resource_validate(res, interruptible);
|
||||
if (vbo)
|
||||
ttm_bo_unreserve(&vbo->base);
|
||||
if (ret)
|
||||
|
|
|
@ -30,6 +30,11 @@
|
|||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
/*
|
||||
* Extra memory required by the resource id's ida storage, which is allocated
|
||||
* separately from the base object itself. We estimate an on-average 128 bytes
|
||||
* per ida.
|
||||
*/
|
||||
#define VMW_IDA_ACC_SIZE 128
|
||||
|
||||
enum vmw_cmdbuf_res_state {
|
||||
|
@ -120,8 +125,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
|
|||
bool delay_id,
|
||||
void (*res_free) (struct vmw_resource *res),
|
||||
const struct vmw_res_func *func);
|
||||
void vmw_resource_activate(struct vmw_resource *res,
|
||||
void (*hw_destroy) (struct vmw_resource *));
|
||||
int
|
||||
vmw_simple_resource_create_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
|
|
|
@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
|||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_kms_sou_surface_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
|
||||
sdirty.base.clip = vmw_sou_surface_clip;
|
||||
sdirty.base.dev_priv = dev_priv;
|
||||
|
@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
|
|||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
|
||||
NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
|
|||
container_of(framebuffer, struct vmw_framebuffer_bo,
|
||||
base)->buffer;
|
||||
struct vmw_kms_dirty dirty;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
|
||||
false, false);
|
||||
ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ret = do_bo_define_gmrfb(dev_priv, framebuffer);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_revert;
|
||||
|
@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
|
|||
num_clips;
|
||||
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
|
||||
0, 0, num_clips, increment, &dirty);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
|
||||
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
|
||||
NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
vmw_validation_revert(&val_ctx);
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
|
|||
struct vmw_buffer_object *buf =
|
||||
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
|
||||
struct vmw_kms_dirty dirty;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
|
||||
false);
|
||||
ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, NULL, true);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ret = do_bo_define_gmrfb(dev_priv, vfb);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_revert;
|
||||
|
@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
|
|||
num_clips;
|
||||
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
|
||||
0, 0, num_clips, 1, &dirty);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
|
||||
user_fence_rep);
|
||||
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
|
||||
user_fence_rep);
|
||||
|
||||
return ret;
|
||||
|
||||
out_revert:
|
||||
vmw_kms_helper_buffer_revert(buf);
|
||||
|
||||
vmw_validation_revert(&val_ctx);
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
|
|||
shader->num_input_sig = num_input_sig;
|
||||
shader->num_output_sig = num_output_sig;
|
||||
|
||||
vmw_resource_activate(res, vmw_hw_shader_destroy);
|
||||
res->hw_destroy = vmw_hw_shader_destroy;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
|
|||
{
|
||||
struct vmw_dx_shader *entry, *next;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
lockdep_assert_held_once(&dev_priv->binding_mutex);
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, cotable_head) {
|
||||
WARN_ON(vmw_dx_shader_scrub(&entry->res));
|
||||
|
@ -636,7 +636,8 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
|
|||
|
||||
res = &shader->res;
|
||||
shader->ctx = ctx;
|
||||
shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
|
||||
shader->cotable = vmw_resource_reference
|
||||
(vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
|
||||
shader->id = user_key;
|
||||
shader->committed = false;
|
||||
INIT_LIST_HEAD(&shader->cotable_head);
|
||||
|
@ -656,7 +657,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
|
|||
goto out_resource_init;
|
||||
|
||||
res->id = shader->id;
|
||||
vmw_resource_activate(res, vmw_hw_shader_destroy);
|
||||
res->hw_destroy = vmw_hw_shader_destroy;
|
||||
|
||||
out_resource_init:
|
||||
vmw_resource_unreference(&res);
|
||||
|
@ -740,13 +741,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
|||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||
* by maximum number_of shaders anyway.
|
||||
*/
|
||||
if (unlikely(vmw_user_shader_size == 0))
|
||||
vmw_user_shader_size =
|
||||
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
|
||||
ttm_round_pot(sizeof(struct vmw_user_shader)) +
|
||||
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_user_shader_size,
|
||||
|
@ -792,7 +790,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
|||
}
|
||||
|
||||
if (handle)
|
||||
*handle = ushader->base.hash.key;
|
||||
*handle = ushader->base.handle;
|
||||
out_err:
|
||||
vmw_resource_unreference(&res);
|
||||
out:
|
||||
|
@ -814,13 +812,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
|||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||
* by maximum number_of shaders anyway.
|
||||
*/
|
||||
if (unlikely(vmw_shader_size == 0))
|
||||
vmw_shader_size =
|
||||
ttm_round_pot(sizeof(struct vmw_shader)) + 128;
|
||||
ttm_round_pot(sizeof(struct vmw_shader)) +
|
||||
VMW_IDA_ACC_SIZE;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_shader_size,
|
||||
|
|
|
@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
vmw_resource_activate(&simple->res, simple->func->hw_destroy);
|
||||
simple->res.hw_destroy = simple->func->hw_destroy;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
|
||||
func->size;
|
||||
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
|
||||
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
|
||||
TTM_OBJ_EXTRA_SIZE;
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (ret)
|
||||
|
@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
func->set_arg_handle(data, usimple->base.hash.key);
|
||||
func->set_arg_handle(data, usimple->base.handle);
|
||||
out_err:
|
||||
vmw_resource_unreference(&res);
|
||||
out_ret:
|
||||
|
|
|
@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res)
|
|||
union vmw_view_destroy body;
|
||||
} *cmd;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
lockdep_assert_held_once(&dev_priv->binding_mutex);
|
||||
vmw_binding_res_list_scrub(&res->binding_head);
|
||||
|
||||
if (!view->committed || res->id == -1)
|
||||
|
@ -366,7 +366,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
|
|||
res = &view->res;
|
||||
view->ctx = ctx;
|
||||
view->srf = vmw_resource_reference(srf);
|
||||
view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
|
||||
view->cotable = vmw_resource_reference
|
||||
(vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
|
||||
view->view_type = view_type;
|
||||
view->view_id = user_key;
|
||||
view->cmd_size = cmd_size;
|
||||
|
@ -386,7 +387,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
|
|||
goto out_resource_init;
|
||||
|
||||
res->id = view->view_id;
|
||||
vmw_resource_activate(res, vmw_hw_view_destroy);
|
||||
res->hw_destroy = vmw_hw_view_destroy;
|
||||
|
||||
out_resource_init:
|
||||
vmw_resource_unreference(&res);
|
||||
|
@ -439,7 +440,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
|
|||
{
|
||||
struct vmw_view *entry, *next;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
lockdep_assert_held_once(&dev_priv->binding_mutex);
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, cotable_head)
|
||||
WARN_ON(vmw_view_destroy(&entry->res));
|
||||
|
@ -459,7 +460,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
|
|||
{
|
||||
struct vmw_view *entry, *next;
|
||||
|
||||
WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
|
||||
lockdep_assert_held_once(&dev_priv->binding_mutex);
|
||||
|
||||
list_for_each_entry_safe(entry, next, list, srf_head)
|
||||
WARN_ON(vmw_view_destroy(&entry->res));
|
||||
|
|
|
@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
|
|||
struct vmw_stdu_dirty ddirty;
|
||||
int ret;
|
||||
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
|
||||
/*
|
||||
* VMs without 3D support don't have the surface DMA command and
|
||||
* we'll be using a CPU blit, and the framebuffer should be moved out
|
||||
* of VRAM.
|
||||
*/
|
||||
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
|
||||
false, cpu_blit);
|
||||
ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
|
||||
SVGA3D_READ_HOST_VRAM;
|
||||
ddirty.left = ddirty.top = S32_MAX;
|
||||
|
@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
|
|||
|
||||
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
|
||||
0, 0, num_clips, increment, &ddirty.base);
|
||||
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
|
||||
user_fence_rep);
|
||||
|
||||
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
|
||||
user_fence_rep);
|
||||
return ret;
|
||||
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
|||
struct vmw_framebuffer_surface *vfbs =
|
||||
container_of(framebuffer, typeof(*vfbs), base);
|
||||
struct vmw_stdu_dirty sdirty;
|
||||
struct vmw_validation_ctx ctx;
|
||||
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
|
||||
int ret;
|
||||
|
||||
if (!srf)
|
||||
srf = &vfbs->surface->res;
|
||||
|
||||
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
|
||||
ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
if (vfbs->is_bo_proxy) {
|
||||
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
|
||||
if (ret)
|
||||
|
@ -954,9 +966,14 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
|
|||
dest_x, dest_y, num_clips, inc,
|
||||
&sdirty.base);
|
||||
out_finish:
|
||||
vmw_kms_helper_resource_finish(&ctx, out_fence);
|
||||
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
|
||||
NULL);
|
||||
|
||||
return ret;
|
||||
|
||||
out_unref:
|
||||
vmw_validation_unref_lists(&val_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
|
|||
*/
|
||||
|
||||
INIT_LIST_HEAD(&srf->view_list);
|
||||
vmw_resource_activate(res, vmw_hw_surface_destroy);
|
||||
res->hw_destroy = vmw_hw_surface_destroy;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
128;
|
||||
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
|
||||
|
||||
num_sizes = 0;
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
|
||||
|
@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
num_sizes == 0)
|
||||
return -EINVAL;
|
||||
|
||||
size = vmw_user_surface_size + 128 +
|
||||
size = vmw_user_surface_size +
|
||||
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
|
||||
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
|
||||
|
||||
|
@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
rep->sid = user_srf->prime.base.hash.key;
|
||||
rep->sid = user_srf->prime.base.handle;
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
|
@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("copy_to_user failed %p %u\n",
|
||||
user_sizes, srf->num_sizes);
|
||||
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
|
||||
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -1613,9 +1613,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
|||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
128;
|
||||
VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
|
||||
|
||||
size = vmw_user_surface_size + 128;
|
||||
size = vmw_user_surface_size;
|
||||
|
||||
/* Define a surface based on the parameters. */
|
||||
ret = vmw_surface_gb_priv_define(dev,
|
||||
|
@ -1687,7 +1687,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
rep->handle = user_srf->prime.base.hash.key;
|
||||
rep->handle = user_srf->prime.base.handle;
|
||||
rep->backup_size = res->backup_size;
|
||||
if (res->backup) {
|
||||
rep->buffer_map_handle =
|
||||
|
@ -1749,7 +1749,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
|
|||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not add a reference to a GB surface "
|
||||
"backup buffer.\n");
|
||||
(void) ttm_ref_object_base_unref(tfile, base->hash.key,
|
||||
(void) ttm_ref_object_base_unref(tfile, base->handle,
|
||||
TTM_REF_USAGE);
|
||||
goto out_bad_resource;
|
||||
}
|
||||
|
@ -1763,7 +1763,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
|
|||
rep->creq.base.array_size = srf->array_size;
|
||||
rep->creq.base.buffer_handle = backup_handle;
|
||||
rep->creq.base.base_size = srf->base_size;
|
||||
rep->crep.handle = user_srf->prime.base.hash.key;
|
||||
rep->crep.handle = user_srf->prime.base.handle;
|
||||
rep->crep.backup_size = srf->res.backup_size;
|
||||
rep->crep.buffer_handle = backup_handle;
|
||||
rep->crep.buffer_map_handle =
|
||||
|
|
|
@ -0,0 +1,770 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
#include <linux/slab.h>
|
||||
#include "vmwgfx_validation.h"
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
/**
|
||||
* struct vmw_validation_bo_node - Buffer object validation metadata.
|
||||
* @base: Metadata used for TTM reservation- and validation.
|
||||
* @hash: A hash entry used for the duplicate detection hash table.
|
||||
* @as_mob: Validate as mob.
|
||||
* @cpu_blit: Validate for cpu blit access.
|
||||
*
|
||||
* Bit fields are used since these structures are allocated and freed in
|
||||
* large numbers and space conservation is desired.
|
||||
*/
|
||||
struct vmw_validation_bo_node {
|
||||
struct ttm_validate_buffer base;
|
||||
struct drm_hash_item hash;
|
||||
u32 as_mob : 1;
|
||||
u32 cpu_blit : 1;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_validation_res_node - Resource validation metadata.
|
||||
* @head: List head for the resource validation list.
|
||||
* @hash: A hash entry used for the duplicate detection hash table.
|
||||
* @res: Reference counted resource pointer.
|
||||
* @new_backup: Non ref-counted pointer to new backup buffer to be assigned
|
||||
* to a resource.
|
||||
* @new_backup_offset: Offset into the new backup mob for resources that can
|
||||
* share MOBs.
|
||||
* @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
|
||||
* the command stream provides a mob bind operation.
|
||||
* @switching_backup: The validation process is switching backup MOB.
|
||||
* @first_usage: True iff the resource has been seen only once in the current
|
||||
* validation batch.
|
||||
* @reserved: Whether the resource is currently reserved by this process.
|
||||
* @private: Optionally additional memory for caller-private data.
|
||||
*
|
||||
* Bit fields are used since these structures are allocated and freed in
|
||||
* large numbers and space conservation is desired.
|
||||
*/
|
||||
struct vmw_validation_res_node {
|
||||
struct list_head head;
|
||||
struct drm_hash_item hash;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_buffer_object *new_backup;
|
||||
unsigned long new_backup_offset;
|
||||
u32 no_buffer_needed : 1;
|
||||
u32 switching_backup : 1;
|
||||
u32 first_usage : 1;
|
||||
u32 reserved : 1;
|
||||
unsigned long private[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* vmw_validation_mem_alloc - Allocate kernel memory from the validation
|
||||
* context based allocator
|
||||
* @ctx: The validation context
|
||||
* @size: The number of bytes to allocated.
|
||||
*
|
||||
* The memory allocated may not exceed PAGE_SIZE, and the returned
|
||||
* address is aligned to sizeof(long). All memory allocated this way is
|
||||
* reclaimed after validation when calling any of the exported functions:
|
||||
* vmw_validation_unref_lists()
|
||||
* vmw_validation_revert()
|
||||
* vmw_validation_done()
|
||||
*
|
||||
* Return: Pointer to the allocated memory on success. NULL on failure.
|
||||
*/
|
||||
void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
|
||||
unsigned int size)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
size = vmw_validation_align(size);
|
||||
if (size > PAGE_SIZE)
|
||||
return NULL;
|
||||
|
||||
if (ctx->mem_size_left < size) {
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
list_add_tail(&page->lru, &ctx->page_list);
|
||||
ctx->page_address = page_address(page);
|
||||
ctx->mem_size_left = PAGE_SIZE;
|
||||
}
|
||||
|
||||
addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
|
||||
ctx->mem_size_left -= size;
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_mem_free - Free all memory allocated using
|
||||
* vmw_validation_mem_alloc()
|
||||
* @ctx: The validation context
|
||||
*
|
||||
* All memory previously allocated for this context using
|
||||
* vmw_validation_mem_alloc() is freed.
|
||||
*/
|
||||
static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
|
||||
{
|
||||
struct page *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
|
||||
list_del_init(&entry->lru);
|
||||
__free_page(entry);
|
||||
}
|
||||
|
||||
ctx->mem_size_left = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
|
||||
* validation context's lists.
|
||||
* @ctx: The validation context to search.
|
||||
* @vbo: The buffer object to search for.
|
||||
*
|
||||
* Return: Pointer to the struct vmw_validation_bo_node referencing the
|
||||
* duplicate, or NULL if none found.
|
||||
*/
|
||||
static struct vmw_validation_bo_node *
|
||||
vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
|
||||
struct vmw_buffer_object *vbo)
|
||||
{
|
||||
struct vmw_validation_bo_node *bo_node = NULL;
|
||||
|
||||
if (!ctx->merge_dups)
|
||||
return NULL;
|
||||
|
||||
if (ctx->ht) {
|
||||
struct drm_hash_item *hash;
|
||||
|
||||
if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
|
||||
bo_node = container_of(hash, typeof(*bo_node), hash);
|
||||
} else {
|
||||
struct vmw_validation_bo_node *entry;
|
||||
|
||||
list_for_each_entry(entry, &ctx->bo_list, base.head) {
|
||||
if (entry->base.bo == &vbo->base) {
|
||||
bo_node = entry;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bo_node;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_find_res_dup - Find a duplicate resource entry in the
|
||||
* validation context's lists.
|
||||
* @ctx: The validation context to search.
|
||||
* @vbo: The buffer object to search for.
|
||||
*
|
||||
* Return: Pointer to the struct vmw_validation_bo_node referencing the
|
||||
* duplicate, or NULL if none found.
|
||||
*/
|
||||
static struct vmw_validation_res_node *
|
||||
vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
|
||||
struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_validation_res_node *res_node = NULL;
|
||||
|
||||
if (!ctx->merge_dups)
|
||||
return NULL;
|
||||
|
||||
if (ctx->ht) {
|
||||
struct drm_hash_item *hash;
|
||||
|
||||
if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
|
||||
res_node = container_of(hash, typeof(*res_node), hash);
|
||||
} else {
|
||||
struct vmw_validation_res_node *entry;
|
||||
|
||||
list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
|
||||
if (entry->res == res) {
|
||||
res_node = entry;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(entry, &ctx->resource_list, head) {
|
||||
if (entry->res == res) {
|
||||
res_node = entry;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
out:
|
||||
return res_node;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_add_bo - Add a buffer object to the validation context.
|
||||
* @ctx: The validation context.
|
||||
* @vbo: The buffer object.
|
||||
* @as_mob: Validate as mob, otherwise suitable for GMR operations.
|
||||
* @cpu_blit: Validate in a page-mappable location.
|
||||
*
|
||||
* Return: Zero on success, negative error code otherwise.
|
||||
*/
|
||||
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
|
||||
struct vmw_buffer_object *vbo,
|
||||
bool as_mob,
|
||||
bool cpu_blit)
|
||||
{
|
||||
struct vmw_validation_bo_node *bo_node;
|
||||
|
||||
bo_node = vmw_validation_find_bo_dup(ctx, vbo);
|
||||
if (bo_node) {
|
||||
if (bo_node->as_mob != as_mob ||
|
||||
bo_node->cpu_blit != cpu_blit) {
|
||||
DRM_ERROR("Inconsistent buffer usage.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
struct ttm_validate_buffer *val_buf;
|
||||
int ret;
|
||||
|
||||
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
|
||||
if (!bo_node)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ctx->ht) {
|
||||
bo_node->hash.key = (unsigned long) vbo;
|
||||
ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize a buffer "
|
||||
"validation entry.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
val_buf = &bo_node->base;
|
||||
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
|
||||
if (!val_buf->bo)
|
||||
return -ESRCH;
|
||||
val_buf->shared = false;
|
||||
list_add_tail(&val_buf->head, &ctx->bo_list);
|
||||
bo_node->as_mob = as_mob;
|
||||
bo_node->cpu_blit = cpu_blit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_add_resource - Add a resource to the validation context.
|
||||
* @ctx: The validation context.
|
||||
* @res: The resource.
|
||||
* @priv_size: Size of private, additional metadata.
|
||||
* @p_node: Output pointer of additional metadata address.
|
||||
* @first_usage: Whether this was the first time this resource was seen.
|
||||
*
|
||||
* Return: Zero on success, negative error code otherwise.
|
||||
*/
|
||||
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
|
||||
struct vmw_resource *res,
|
||||
size_t priv_size,
|
||||
void **p_node,
|
||||
bool *first_usage)
|
||||
{
|
||||
struct vmw_validation_res_node *node;
|
||||
int ret;
|
||||
|
||||
node = vmw_validation_find_res_dup(ctx, res);
|
||||
if (node) {
|
||||
node->first_usage = 0;
|
||||
goto out_fill;
|
||||
}
|
||||
|
||||
node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
|
||||
if (!node) {
|
||||
DRM_ERROR("Failed to allocate a resource validation "
|
||||
"entry.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (ctx->ht) {
|
||||
node->hash.key = (unsigned long) res;
|
||||
ret = drm_ht_insert_item(ctx->ht, &node->hash);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize a resource validation "
|
||||
"entry.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
node->res = vmw_resource_reference_unless_doomed(res);
|
||||
if (!node->res)
|
||||
return -ESRCH;
|
||||
|
||||
node->first_usage = 1;
|
||||
if (!res->dev_priv->has_mob) {
|
||||
list_add_tail(&node->head, &ctx->resource_list);
|
||||
} else {
|
||||
switch (vmw_res_type(res)) {
|
||||
case vmw_res_context:
|
||||
case vmw_res_dx_context:
|
||||
list_add(&node->head, &ctx->resource_ctx_list);
|
||||
break;
|
||||
case vmw_res_cotable:
|
||||
list_add_tail(&node->head, &ctx->resource_ctx_list);
|
||||
break;
|
||||
default:
|
||||
list_add_tail(&node->head, &ctx->resource_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out_fill:
|
||||
if (first_usage)
|
||||
*first_usage = node->first_usage;
|
||||
if (p_node)
|
||||
*p_node = &node->private;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_res_switch_backup - Register a backup MOB switch during
|
||||
* validation.
|
||||
* @ctx: The validation context.
|
||||
* @val_private: The additional meta-data pointer returned when the
|
||||
* resource was registered with the validation context. Used to identify
|
||||
* the resource.
|
||||
* @vbo: The new backup buffer object MOB. This buffer object needs to have
|
||||
* already been registered with the validation context.
|
||||
* @backup_offset: Offset into the new backup MOB.
|
||||
*/
|
||||
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
|
||||
void *val_private,
|
||||
struct vmw_buffer_object *vbo,
|
||||
unsigned long backup_offset)
|
||||
{
|
||||
struct vmw_validation_res_node *val;
|
||||
|
||||
val = container_of(val_private, typeof(*val), private);
|
||||
|
||||
val->switching_backup = 1;
|
||||
if (val->first_usage)
|
||||
val->no_buffer_needed = 1;
|
||||
|
||||
val->new_backup = vbo;
|
||||
val->new_backup_offset = backup_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_res_reserve - Reserve all resources registered with this
|
||||
* validation context.
|
||||
* @ctx: The validation context.
|
||||
* @intr: Use interruptible waits when possible.
|
||||
*
|
||||
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
|
||||
* code on failure.
|
||||
*/
|
||||
int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
|
||||
bool intr)
|
||||
{
|
||||
struct vmw_validation_res_node *val;
|
||||
int ret = 0;
|
||||
|
||||
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_list, head) {
|
||||
struct vmw_resource *res = val->res;
|
||||
|
||||
ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
|
||||
if (ret)
|
||||
goto out_unreserve;
|
||||
|
||||
val->reserved = 1;
|
||||
if (res->backup) {
|
||||
struct vmw_buffer_object *vbo = res->backup;
|
||||
|
||||
ret = vmw_validation_add_bo
|
||||
(ctx, vbo, vmw_resource_needs_backup(res),
|
||||
false);
|
||||
if (ret)
|
||||
goto out_unreserve;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unreserve:
|
||||
vmw_validation_res_unreserve(ctx, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_res_unreserve - Unreserve all reserved resources
|
||||
* registered with this validation context.
|
||||
* @ctx: The validation context.
|
||||
* @backoff: Whether this is a backoff- of a commit-type operation. This
|
||||
* is used to determine whether to switch backup MOBs or not.
|
||||
*/
|
||||
void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
|
||||
bool backoff)
|
||||
{
|
||||
struct vmw_validation_res_node *val;
|
||||
|
||||
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_list, head) {
|
||||
if (val->reserved)
|
||||
vmw_resource_unreserve(val->res,
|
||||
!backoff &&
|
||||
val->switching_backup,
|
||||
val->new_backup,
|
||||
val->new_backup_offset);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_bo_validate_single - Validate a single buffer object.
|
||||
* @bo: The TTM buffer object base.
|
||||
* @interruptible: Whether to perform waits interruptible if possible.
|
||||
* @validate_as_mob: Whether to validate in MOB memory.
|
||||
*
|
||||
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
|
||||
* code on failure.
|
||||
*/
|
||||
int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool validate_as_mob)
|
||||
{
|
||||
struct vmw_buffer_object *vbo =
|
||||
container_of(bo, struct vmw_buffer_object, base);
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = interruptible,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (vbo->pin_count > 0)
|
||||
return 0;
|
||||
|
||||
if (validate_as_mob)
|
||||
return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
|
||||
|
||||
/**
|
||||
* Put BO in VRAM if there is space, otherwise as a GMR.
|
||||
* If there is no space in VRAM and GMR ids are all used up,
|
||||
* start evicting GMRs to make room. If the DMA buffer can't be
|
||||
* used as a GMR, this will return -ENOMEM.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
|
||||
if (ret == 0 || ret == -ERESTARTSYS)
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* If that failed, try VRAM again, this time evicting
|
||||
* previous contents.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_bo_validate - Validate all buffer objects registered with
|
||||
* the validation context.
|
||||
* @ctx: The validation context.
|
||||
* @intr: Whether to perform waits interruptible if possible.
|
||||
*
|
||||
* Return: Zero on success, -ERESTARTSYS if interrupted,
|
||||
* negative error code on failure.
|
||||
*/
|
||||
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
|
||||
{
|
||||
struct vmw_validation_bo_node *entry;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(entry, &ctx->bo_list, base.head) {
|
||||
if (entry->cpu_blit) {
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = intr,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
|
||||
ret = ttm_bo_validate(entry->base.bo,
|
||||
&vmw_nonfixed_placement, &ctx);
|
||||
} else {
|
||||
ret = vmw_validation_bo_validate_single
|
||||
(entry->base.bo, intr, entry->as_mob);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_res_validate - Validate all resources registered with the
|
||||
* validation context.
|
||||
* @ctx: The validation context.
|
||||
* @intr: Whether to perform waits interruptible if possible.
|
||||
*
|
||||
* Before this function is called, all resource backup buffers must have
|
||||
* been validated.
|
||||
*
|
||||
* Return: Zero on success, -ERESTARTSYS if interrupted,
|
||||
* negative error code on failure.
|
||||
*/
|
||||
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
|
||||
{
|
||||
struct vmw_validation_res_node *val;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_list, head) {
|
||||
struct vmw_resource *res = val->res;
|
||||
struct vmw_buffer_object *backup = res->backup;
|
||||
|
||||
ret = vmw_resource_validate(res, intr);
|
||||
if (ret) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to validate resource.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Check if the resource switched backup buffer */
|
||||
if (backup && res->backup && (backup != res->backup)) {
|
||||
struct vmw_buffer_object *vbo = res->backup;
|
||||
|
||||
ret = vmw_validation_add_bo
|
||||
(ctx, vbo, vmw_resource_needs_backup(res),
|
||||
false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_drop_ht - Reset the hash table used for duplicate finding
|
||||
* and unregister it from this validation context.
|
||||
* @ctx: The validation context.
|
||||
*
|
||||
* The hash table used for duplicate finding is an expensive resource and
|
||||
* may be protected by mutexes that may cause deadlocks during resource
|
||||
* unreferencing if held. After resource- and buffer object registering,
|
||||
* there is no longer any use for this hash table, so allow freeing it
|
||||
* either to shorten any mutex locking time, or before resources- and
|
||||
* buffer objects are freed during validation context cleanup.
|
||||
*/
|
||||
void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
|
||||
{
|
||||
struct vmw_validation_bo_node *entry;
|
||||
struct vmw_validation_res_node *val;
|
||||
|
||||
if (!ctx->ht)
|
||||
return;
|
||||
|
||||
list_for_each_entry(entry, &ctx->bo_list, base.head)
|
||||
(void) drm_ht_remove_item(ctx->ht, &entry->hash);
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_list, head)
|
||||
(void) drm_ht_remove_item(ctx->ht, &val->hash);
|
||||
|
||||
list_for_each_entry(val, &ctx->resource_ctx_list, head)
|
||||
(void) drm_ht_remove_item(ctx->ht, &val->hash);
|
||||
|
||||
ctx->ht = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_unref_lists - Unregister previously registered buffer
|
||||
* object and resources.
|
||||
* @ctx: The validation context.
|
||||
*
|
||||
* Note that this function may cause buffer object- and resource destructors
|
||||
* to be invoked.
|
||||
*/
|
||||
void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
|
||||
{
|
||||
struct vmw_validation_bo_node *entry;
|
||||
struct vmw_validation_res_node *val;
|
||||
|
||||
list_for_each_entry(entry, &ctx->bo_list, base.head)
|
||||
ttm_bo_unref(&entry->base.bo);
|
||||
|
||||
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
|
||||
list_for_each_entry(val, &ctx->resource_list, head)
|
||||
vmw_resource_unreference(&val->res);
|
||||
|
||||
/*
|
||||
* No need to detach each list entry since they are all freed with
|
||||
* vmw_validation_free_mem. Just make the inaccessible.
|
||||
*/
|
||||
INIT_LIST_HEAD(&ctx->bo_list);
|
||||
INIT_LIST_HEAD(&ctx->resource_list);
|
||||
|
||||
vmw_validation_mem_free(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_prepare - Prepare a validation context for command
|
||||
* submission.
|
||||
* @ctx: The validation context.
|
||||
* @mutex: The mutex used to protect resource reservation.
|
||||
* @intr: Whether to perform waits interruptible if possible.
|
||||
*
|
||||
* Note that the single reservation mutex @mutex is an unfortunate
|
||||
* construct. Ideally resource reservation should be moved to per-resource
|
||||
* ww_mutexes.
|
||||
* If this functions doesn't return Zero to indicate success, all resources
|
||||
* are left unreserved but still referenced.
|
||||
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
|
||||
* on error.
|
||||
*/
|
||||
int vmw_validation_prepare(struct vmw_validation_context *ctx,
|
||||
struct mutex *mutex,
|
||||
bool intr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (mutex) {
|
||||
if (intr)
|
||||
ret = mutex_lock_interruptible(mutex);
|
||||
else
|
||||
mutex_lock(mutex);
|
||||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
ctx->res_mutex = mutex;
|
||||
ret = vmw_validation_res_reserve(ctx, intr);
|
||||
if (ret)
|
||||
goto out_no_res_reserve;
|
||||
|
||||
ret = vmw_validation_bo_reserve(ctx, intr);
|
||||
if (ret)
|
||||
goto out_no_bo_reserve;
|
||||
|
||||
ret = vmw_validation_bo_validate(ctx, intr);
|
||||
if (ret)
|
||||
goto out_no_validate;
|
||||
|
||||
ret = vmw_validation_res_validate(ctx, intr);
|
||||
if (ret)
|
||||
goto out_no_validate;
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_validate:
|
||||
vmw_validation_bo_backoff(ctx);
|
||||
out_no_bo_reserve:
|
||||
vmw_validation_res_unreserve(ctx, true);
|
||||
out_no_res_reserve:
|
||||
if (mutex)
|
||||
mutex_unlock(mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_revert - Revert validation actions if command submission
|
||||
* failed.
|
||||
*
|
||||
* @ctx: The validation context.
|
||||
*
|
||||
* The caller still needs to unref resources after a call to this function.
|
||||
*/
|
||||
void vmw_validation_revert(struct vmw_validation_context *ctx)
|
||||
{
|
||||
vmw_validation_bo_backoff(ctx);
|
||||
vmw_validation_res_unreserve(ctx, true);
|
||||
if (ctx->res_mutex)
|
||||
mutex_unlock(ctx->res_mutex);
|
||||
vmw_validation_unref_lists(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_cone - Commit validation actions after command submission
|
||||
* success.
|
||||
* @ctx: The validation context.
|
||||
* @fence: Fence with which to fence all buffer objects taking part in the
|
||||
* command submission.
|
||||
*
|
||||
* The caller does NOT need to unref resources after a call to this function.
|
||||
*/
|
||||
void vmw_validation_done(struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj *fence)
|
||||
{
|
||||
vmw_validation_bo_fence(ctx, fence);
|
||||
vmw_validation_res_unreserve(ctx, false);
|
||||
if (ctx->res_mutex)
|
||||
mutex_unlock(ctx->res_mutex);
|
||||
vmw_validation_unref_lists(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_preload_bo - Preload the validation memory allocator for a
|
||||
* call to vmw_validation_add_bo().
|
||||
* @ctx: Pointer to the validation context.
|
||||
*
|
||||
* Iff this function returns successfully, the next call to
|
||||
* vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
|
||||
* but voids the guarantee.
|
||||
*
|
||||
* Returns: Zero if successful, %-EINVAL otherwise.
|
||||
*/
|
||||
int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
|
||||
{
|
||||
unsigned int size = sizeof(struct vmw_validation_bo_node);
|
||||
|
||||
if (!vmw_validation_mem_alloc(ctx, size))
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->mem_size_left += size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_preload_res - Preload the validation memory allocator for a
|
||||
* call to vmw_validation_add_res().
|
||||
* @ctx: Pointer to the validation context.
|
||||
* @size: Size of the validation node extra data. See below.
|
||||
*
|
||||
* Iff this function returns successfully, the next call to
|
||||
* vmw_validation_add_res() with the same or smaller @size is guaranteed not to
|
||||
* sleep. An error is not fatal but voids the guarantee.
|
||||
*
|
||||
* Returns: Zero if successful, %-EINVAL otherwise.
|
||||
*/
|
||||
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
|
||||
unsigned int size)
|
||||
{
|
||||
size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
|
||||
size) +
|
||||
vmw_validation_align(sizeof(struct vmw_validation_bo_node));
|
||||
if (!vmw_validation_mem_alloc(ctx, size))
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->mem_size_left += size;
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,227 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
#ifndef _VMWGFX_VALIDATION_H_
|
||||
#define _VMWGFX_VALIDATION_H_
|
||||
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/ww_mutex.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
|
||||
/**
|
||||
* struct vmw_validation_context - Per command submission validation context
|
||||
* @ht: Hash table used to find resource- or buffer object duplicates
|
||||
* @resource_list: List head for resource validation metadata
|
||||
* @resource_ctx_list: List head for resource validation metadata for
|
||||
* resources that need to be validated before those in @resource_list
|
||||
* @bo_list: List head for buffer objects
|
||||
* @page_list: List of pages used by the memory allocator
|
||||
* @ticket: Ticked used for ww mutex locking
|
||||
* @res_mutex: Pointer to mutex used for resource reserving
|
||||
* @merge_dups: Whether to merge metadata for duplicate resources or
|
||||
* buffer objects
|
||||
* @mem_size_left: Free memory left in the last page in @page_list
|
||||
* @page_address: Kernel virtual address of the last page in @page_list
|
||||
*/
|
||||
struct vmw_validation_context {
|
||||
struct drm_open_hash *ht;
|
||||
struct list_head resource_list;
|
||||
struct list_head resource_ctx_list;
|
||||
struct list_head bo_list;
|
||||
struct list_head page_list;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct mutex *res_mutex;
|
||||
unsigned int merge_dups;
|
||||
unsigned int mem_size_left;
|
||||
u8 *page_address;
|
||||
};
|
||||
|
||||
struct vmw_buffer_object;
|
||||
struct vmw_resource;
|
||||
struct vmw_fence_obj;
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* DECLARE_VAL_CONTEXT - Declare a validation context with initialization
|
||||
* @_name: The name of the variable
|
||||
* @_ht: The hash table used to find dups or NULL if none
|
||||
* @_merge_dups: Whether to merge duplicate buffer object- or resource
|
||||
* entries. If set to true, ideally a hash table pointer should be supplied
|
||||
* as well unless the number of resources and buffer objects per validation
|
||||
* is known to be very small
|
||||
*/
|
||||
#endif
|
||||
#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
|
||||
struct vmw_validation_context _name = \
|
||||
{ .ht = _ht, \
|
||||
.resource_list = LIST_HEAD_INIT((_name).resource_list), \
|
||||
.resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
|
||||
.bo_list = LIST_HEAD_INIT((_name).bo_list), \
|
||||
.page_list = LIST_HEAD_INIT((_name).page_list), \
|
||||
.res_mutex = NULL, \
|
||||
.merge_dups = _merge_dups, \
|
||||
.mem_size_left = 0, \
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_has_bos - return whether the validation context has
|
||||
* any buffer objects registered.
|
||||
*
|
||||
* @ctx: The validation context
|
||||
* Returns: Whether any buffer objects are registered
|
||||
*/
|
||||
static inline bool
|
||||
vmw_validation_has_bos(struct vmw_validation_context *ctx)
|
||||
{
|
||||
return !list_empty(&ctx->bo_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_set_ht - Register a hash table for duplicate finding
|
||||
* @ctx: The validation context
|
||||
* @ht: Pointer to a hash table to use for duplicate finding
|
||||
* This function is intended to be used if the hash table wasn't
|
||||
* available at validation context declaration time
|
||||
*/
|
||||
static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
|
||||
struct drm_open_hash *ht)
|
||||
{
|
||||
ctx->ht = ht;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_bo_reserve - Reserve buffer objects registered with a
|
||||
* validation context
|
||||
* @ctx: The validation context
|
||||
* @intr: Perform waits interruptible
|
||||
*
|
||||
* Return: Zero on success, -ERESTARTSYS when interrupted, negative error
|
||||
* code on failure
|
||||
*/
|
||||
static inline int
|
||||
vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
|
||||
bool intr)
|
||||
{
|
||||
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
|
||||
NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_bo_backoff - Unreserve buffer objects registered with a
|
||||
* validation context
|
||||
* @ctx: The validation context
|
||||
*
|
||||
* This function unreserves the buffer objects previously reserved using
|
||||
* vmw_validation_bo_reserve. It's typically used as part of an error path
|
||||
*/
|
||||
static inline void
|
||||
vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
|
||||
{
|
||||
ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_bo_fence - Unreserve and fence buffer objects registered
|
||||
* with a validation context
|
||||
* @ctx: The validation context
|
||||
*
|
||||
* This function unreserves the buffer objects previously reserved using
|
||||
* vmw_validation_bo_reserve, and fences them with a fence object.
|
||||
*/
|
||||
static inline void
|
||||
vmw_validation_bo_fence(struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj *fence)
|
||||
{
|
||||
ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
|
||||
(void *) fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_context_init - Initialize a validation context
|
||||
* @ctx: Pointer to the validation context to initialize
|
||||
*
|
||||
* This function initializes a validation context with @merge_dups set
|
||||
* to false
|
||||
*/
|
||||
static inline void
|
||||
vmw_validation_context_init(struct vmw_validation_context *ctx)
|
||||
{
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
INIT_LIST_HEAD(&ctx->resource_list);
|
||||
INIT_LIST_HEAD(&ctx->resource_ctx_list);
|
||||
INIT_LIST_HEAD(&ctx->bo_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_validation_align - Align a validation memory allocation
|
||||
* @val: The size to be aligned
|
||||
*
|
||||
* Returns: @val aligned to the granularity used by the validation memory
|
||||
* allocator.
|
||||
*/
|
||||
static inline unsigned int vmw_validation_align(unsigned int val)
|
||||
{
|
||||
return ALIGN(val, sizeof(long));
|
||||
}
|
||||
|
||||
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
|
||||
struct vmw_buffer_object *vbo,
|
||||
bool as_mob, bool cpu_blit);
|
||||
int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool validate_as_mob);
|
||||
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
|
||||
void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
|
||||
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
|
||||
struct vmw_resource *res,
|
||||
size_t priv_size,
|
||||
void **p_node,
|
||||
bool *first_usage);
|
||||
void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
|
||||
int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
|
||||
bool intr);
|
||||
void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
|
||||
bool backoff);
|
||||
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
|
||||
void *val_private,
|
||||
struct vmw_buffer_object *vbo,
|
||||
unsigned long backup_offset);
|
||||
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
|
||||
|
||||
int vmw_validation_prepare(struct vmw_validation_context *ctx,
|
||||
struct mutex *mutex, bool intr);
|
||||
void vmw_validation_revert(struct vmw_validation_context *ctx);
|
||||
void vmw_validation_done(struct vmw_validation_context *ctx,
|
||||
struct vmw_fence_obj *fence);
|
||||
|
||||
void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
|
||||
unsigned int size);
|
||||
int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
|
||||
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
|
||||
unsigned int size);
|
||||
#endif
|
|
@ -312,6 +312,24 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
|
|||
return bo;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
|
||||
* its refcount has already reached zero.
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Used to reference a TTM buffer object in lookups where the object is removed
|
||||
* from the lookup structure during the destructor and for RCU lookups.
|
||||
*
|
||||
* Returns: @bo if the referencing was successful, NULL otherwise.
|
||||
*/
|
||||
static inline __must_check struct ttm_buffer_object *
|
||||
ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (!kref_get_unless_zero(&bo->kref))
|
||||
return NULL;
|
||||
return bo;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_wait - wait for buffer idle.
|
||||
*
|
||||
|
|
Загрузка…
Ссылка в новой задаче