drm/i915: Store a direct lookup from object handle to vma
The advent of full-ppgtt lead to an extra indirection between the object and its binding. That extra indirection has a noticeable impact on how fast we can convert from the user handles to our internal vma for execbuffer. In order to bypass the extra indirection, we use a resizable hashtable to jump from the object to the per-ctx vma. rhashtable was considered but we don't need the online resizing feature and the extra complexity proved to undermine its usefulness. Instead, we simply reallocate the hastable on demand in a background task and serialize it before iterating. In non-full-ppgtt modes, multiple files and multiple contexts can share the same vma. This leads to having multiple possible handle->vma links, so we only use the first to establish the fast path. The majority of buffers are not shared and so we should still be able to realise speedups with multiple clients. v2: Prettier names, more magic. v3: Many style tweaks, most notably hiding the misuse of execobj[].rsvd2 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Родитель
4c9c0d0974
Коммит
4ff4b44cbb
|
@ -1998,6 +1998,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
"\tvma hashtable size=%u (actual %lu), count=%u\n",
|
||||
ctx->vma_lut.ht_size,
|
||||
BIT(ctx->vma_lut.ht_bits),
|
||||
ctx->vma_lut.ht_count);
|
||||
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
|
|
@ -3261,6 +3261,10 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
|||
if (vma->vm->file == fpriv)
|
||||
i915_vma_close(vma);
|
||||
|
||||
vma = obj->vma_hashed;
|
||||
if (vma && vma->ctx->file_priv == fpriv)
|
||||
i915_vma_unlink_ctx(vma);
|
||||
|
||||
if (i915_gem_object_is_active(obj) &&
|
||||
!i915_gem_object_has_active_reference(obj)) {
|
||||
i915_gem_object_set_active_reference(obj);
|
||||
|
@ -4254,7 +4258,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
|
||||
INIT_LIST_HEAD(&obj->global_link);
|
||||
INIT_LIST_HEAD(&obj->userfault_link);
|
||||
INIT_LIST_HEAD(&obj->obj_exec_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
|
||||
|
|
|
@ -85,6 +85,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/log2.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
@ -92,6 +93,70 @@
|
|||
|
||||
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
|
||||
|
||||
/* Initial size (as log2) to preallocate the handle->object hashtable */
|
||||
#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
|
||||
|
||||
static void resize_vma_ht(struct work_struct *work)
|
||||
{
|
||||
struct i915_gem_context_vma_lut *lut =
|
||||
container_of(work, typeof(*lut), resize);
|
||||
unsigned int bits, new_bits, size, i;
|
||||
struct hlist_head *new_ht;
|
||||
|
||||
GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
|
||||
|
||||
bits = 1 + ilog2(4*lut->ht_count/3 + 1);
|
||||
new_bits = min_t(unsigned int,
|
||||
max(bits, VMA_HT_BITS),
|
||||
sizeof(unsigned int) * BITS_PER_BYTE - 1);
|
||||
if (new_bits == lut->ht_bits)
|
||||
goto out;
|
||||
|
||||
new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!new_ht)
|
||||
new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
|
||||
if (!new_ht)
|
||||
/* Pretend resize succeeded and stop calling us for a bit! */
|
||||
goto out;
|
||||
|
||||
size = BIT(lut->ht_bits);
|
||||
for (i = 0; i < size; i++) {
|
||||
struct i915_vma *vma;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
|
||||
hlist_add_head(&vma->ctx_node,
|
||||
&new_ht[hash_32(vma->ctx_handle,
|
||||
new_bits)]);
|
||||
}
|
||||
kvfree(lut->ht);
|
||||
lut->ht = new_ht;
|
||||
lut->ht_bits = new_bits;
|
||||
out:
|
||||
smp_store_release(&lut->ht_size, BIT(bits));
|
||||
GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
|
||||
}
|
||||
|
||||
static void vma_lut_free(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
|
||||
unsigned int i, size;
|
||||
|
||||
if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
|
||||
cancel_work_sync(&lut->resize);
|
||||
|
||||
size = BIT(lut->ht_bits);
|
||||
for (i = 0; i < size; i++) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
|
||||
vma->obj->vma_hashed = NULL;
|
||||
vma->ctx = NULL;
|
||||
}
|
||||
}
|
||||
kvfree(lut->ht);
|
||||
}
|
||||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
|
@ -101,6 +166,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
trace_i915_context_free(ctx);
|
||||
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
||||
|
||||
vma_lut_free(ctx);
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
|
@ -118,6 +184,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
|
||||
kfree(ctx->name);
|
||||
put_pid(ctx->pid);
|
||||
|
||||
list_del(&ctx->link);
|
||||
|
||||
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
|
||||
|
@ -201,13 +268,24 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
|||
ctx->i915 = dev_priv;
|
||||
ctx->priority = I915_PRIORITY_NORMAL;
|
||||
|
||||
ctx->vma_lut.ht_bits = VMA_HT_BITS;
|
||||
ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
|
||||
BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
|
||||
ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
|
||||
sizeof(*ctx->vma_lut.ht),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->vma_lut.ht)
|
||||
goto err_out;
|
||||
|
||||
INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
|
||||
|
||||
/* Default context will never have a file_priv */
|
||||
ret = DEFAULT_CONTEXT_HANDLE;
|
||||
if (file_priv) {
|
||||
ret = idr_alloc(&file_priv->context_idr, ctx,
|
||||
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_out;
|
||||
goto err_lut;
|
||||
}
|
||||
ctx->user_handle = ret;
|
||||
|
||||
|
@ -248,6 +326,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
|||
err_pid:
|
||||
put_pid(ctx->pid);
|
||||
idr_remove(&file_priv->context_idr, ctx->user_handle);
|
||||
err_lut:
|
||||
kvfree(ctx->vma_lut.ht);
|
||||
err_out:
|
||||
context_close(ctx);
|
||||
return ERR_PTR(ret);
|
||||
|
|
|
@ -143,6 +143,32 @@ struct i915_gem_context {
|
|||
/** ggtt_offset_bias: placement restriction for context objects */
|
||||
u32 ggtt_offset_bias;
|
||||
|
||||
struct i915_gem_context_vma_lut {
|
||||
/** ht_size: last request size to allocate the hashtable for. */
|
||||
unsigned int ht_size;
|
||||
#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
|
||||
/** ht_bits: real log2(size) of hashtable. */
|
||||
unsigned int ht_bits;
|
||||
/** ht_count: current number of entries inside the hashtable */
|
||||
unsigned int ht_count;
|
||||
|
||||
/** ht: the array of buckets comprising the simple hashtable */
|
||||
struct hlist_head *ht;
|
||||
|
||||
/**
|
||||
* resize: After an execbuf completes, we check the load factor
|
||||
* of the hashtable. If the hashtable is too full, or too empty,
|
||||
* we schedule a task to resize the hashtable. During the
|
||||
* resize, the entries are moved between different buckets and
|
||||
* so we cannot simultaneously read the hashtable as it is
|
||||
* being resized (unlike rhashtable). Therefore we treat the
|
||||
* active work as a strong barrier, pausing a subsequent
|
||||
* execbuf to wait for the resize worker to complete, if
|
||||
* required.
|
||||
*/
|
||||
struct work_struct resize;
|
||||
} vma_lut;
|
||||
|
||||
/** engine: per-engine logical HW state */
|
||||
struct intel_context {
|
||||
struct i915_vma *state;
|
||||
|
|
|
@ -75,37 +75,42 @@ struct i915_execbuffer {
|
|||
unsigned int page;
|
||||
bool use_64bit_reloc : 1;
|
||||
} reloc_cache;
|
||||
int and;
|
||||
union {
|
||||
struct i915_vma **lut;
|
||||
int lut_mask;
|
||||
struct hlist_head *buckets;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* As an alternative to creating a hashtable of handle-to-vma for a batch,
|
||||
* we used the last available reserved field in the execobject[] and stash
|
||||
* a link from the execobj to its vma.
|
||||
*/
|
||||
#define __exec_to_vma(ee) (ee)->rsvd2
|
||||
#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))
|
||||
|
||||
static int eb_create(struct i915_execbuffer *eb)
|
||||
{
|
||||
eb->lut = NULL;
|
||||
if (eb->args->flags & I915_EXEC_HANDLE_LUT) {
|
||||
unsigned int size = eb->args->buffer_count;
|
||||
size *= sizeof(struct i915_vma *);
|
||||
eb->lut = kmalloc(size,
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if ((eb->args->flags & I915_EXEC_HANDLE_LUT) == 0) {
|
||||
unsigned int size = 1 + ilog2(eb->args->buffer_count);
|
||||
|
||||
do {
|
||||
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
|
||||
GFP_TEMPORARY |
|
||||
__GFP_NORETRY |
|
||||
__GFP_NOWARN);
|
||||
if (eb->buckets)
|
||||
break;
|
||||
} while (--size);
|
||||
|
||||
if (unlikely(!eb->buckets)) {
|
||||
eb->buckets = kzalloc(sizeof(struct hlist_head),
|
||||
GFP_TEMPORARY);
|
||||
if (unlikely(!eb->buckets))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!eb->lut) {
|
||||
unsigned int size = eb->args->buffer_count;
|
||||
unsigned int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
|
||||
while (count > 2*size)
|
||||
count >>= 1;
|
||||
eb->lut = kzalloc(count * sizeof(struct hlist_head),
|
||||
GFP_TEMPORARY);
|
||||
if (!eb->lut)
|
||||
return -ENOMEM;
|
||||
|
||||
eb->and = count - 1;
|
||||
eb->lut_mask = size;
|
||||
} else {
|
||||
eb->and = -eb->args->buffer_count;
|
||||
eb->lut_mask = -eb->args->buffer_count;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -142,14 +147,160 @@ eb_reset(struct i915_execbuffer *eb)
|
|||
vma->exec_entry = NULL;
|
||||
}
|
||||
|
||||
if (eb->and >= 0)
|
||||
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
||||
if (eb->lut_mask >= 0)
|
||||
memset(eb->buckets, 0,
|
||||
sizeof(struct hlist_head) << eb->lut_mask);
|
||||
}
|
||||
|
||||
static bool
|
||||
eb_add_vma(struct i915_execbuffer *eb, struct i915_vma *vma, int i)
|
||||
{
|
||||
if (unlikely(vma->exec_entry)) {
|
||||
DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
|
||||
eb->exec[i].handle, i);
|
||||
return false;
|
||||
}
|
||||
list_add_tail(&vma->exec_link, &eb->vmas);
|
||||
|
||||
vma->exec_entry = &eb->exec[i];
|
||||
if (eb->lut_mask >= 0) {
|
||||
vma->exec_handle = eb->exec[i].handle;
|
||||
hlist_add_head(&vma->exec_node,
|
||||
&eb->buckets[hash_32(vma->exec_handle,
|
||||
eb->lut_mask)]);
|
||||
}
|
||||
|
||||
i915_vma_get(vma);
|
||||
__exec_to_vma(&eb->exec[i]) = (uintptr_t)vma;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct hlist_head *
|
||||
ht_head(const struct i915_gem_context *ctx, u32 handle)
|
||||
{
|
||||
return &ctx->vma_lut.ht[hash_32(handle, ctx->vma_lut.ht_bits)];
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ht_needs_resize(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return (4*ctx->vma_lut.ht_count > 3*ctx->vma_lut.ht_size ||
|
||||
4*ctx->vma_lut.ht_count + 1 < ctx->vma_lut.ht_size);
|
||||
}
|
||||
|
||||
static int
|
||||
eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
{
|
||||
#define INTERMEDIATE BIT(0)
|
||||
const int count = eb->args->buffer_count;
|
||||
struct i915_vma *vma;
|
||||
int slow_pass = -1;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&eb->vmas);
|
||||
|
||||
if (unlikely(eb->ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS))
|
||||
flush_work(&eb->ctx->vma_lut.resize);
|
||||
GEM_BUG_ON(eb->ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
__exec_to_vma(&eb->exec[i]) = 0;
|
||||
|
||||
hlist_for_each_entry(vma,
|
||||
ht_head(eb->ctx, eb->exec[i].handle),
|
||||
ctx_node) {
|
||||
if (vma->ctx_handle != eb->exec[i].handle)
|
||||
continue;
|
||||
|
||||
if (!eb_add_vma(eb, vma, i))
|
||||
return -EINVAL;
|
||||
|
||||
goto next_vma;
|
||||
}
|
||||
|
||||
if (slow_pass < 0)
|
||||
slow_pass = i;
|
||||
next_vma: ;
|
||||
}
|
||||
|
||||
if (slow_pass < 0)
|
||||
return 0;
|
||||
|
||||
spin_lock(&eb->file->table_lock);
|
||||
/* Grab a reference to the object and release the lock so we can lookup
|
||||
* or create the VMA without using GFP_ATOMIC */
|
||||
for (i = slow_pass; i < count; i++) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if (__exec_to_vma(&eb->exec[i]))
|
||||
continue;
|
||||
|
||||
obj = to_intel_bo(idr_find(&eb->file->object_idr,
|
||||
eb->exec[i].handle));
|
||||
if (unlikely(!obj)) {
|
||||
spin_unlock(&eb->file->table_lock);
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
eb->exec[i].handle, i);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
__exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
|
||||
}
|
||||
spin_unlock(&eb->file->table_lock);
|
||||
|
||||
for (i = slow_pass; i < count; i++) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if ((__exec_to_vma(&eb->exec[i]) & INTERMEDIATE) == 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* NOTE: We can leak any vmas created here when something fails
|
||||
* later on. But that's no issue since vma_unbind can deal with
|
||||
* vmas which are not actually bound. And since only
|
||||
* lookup_or_create exists as an interface to get at the vma
|
||||
* from the (obj, vm) we don't run the risk of creating
|
||||
* duplicated vmas for the same vm.
|
||||
*/
|
||||
obj = u64_to_ptr(struct drm_i915_gem_object,
|
||||
__exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
|
||||
vma = i915_vma_instance(obj, eb->vm, NULL);
|
||||
if (unlikely(IS_ERR(vma))) {
|
||||
DRM_DEBUG("Failed to lookup VMA\n");
|
||||
return PTR_ERR(vma);
|
||||
}
|
||||
|
||||
/* First come, first served */
|
||||
if (!vma->ctx) {
|
||||
vma->ctx = eb->ctx;
|
||||
vma->ctx_handle = eb->exec[i].handle;
|
||||
hlist_add_head(&vma->ctx_node,
|
||||
ht_head(eb->ctx, eb->exec[i].handle));
|
||||
eb->ctx->vma_lut.ht_count++;
|
||||
if (i915_vma_is_ggtt(vma)) {
|
||||
GEM_BUG_ON(obj->vma_hashed);
|
||||
obj->vma_hashed = vma;
|
||||
}
|
||||
}
|
||||
|
||||
if (!eb_add_vma(eb, vma, i))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ht_needs_resize(eb->ctx)) {
|
||||
eb->ctx->vma_lut.ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
|
||||
queue_work(system_highpri_wq, &eb->ctx->vma_lut.resize);
|
||||
}
|
||||
|
||||
return 0;
|
||||
#undef INTERMEDIATE
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
eb_get_batch(struct i915_execbuffer *eb)
|
||||
{
|
||||
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link);
|
||||
struct i915_vma *vma =
|
||||
exec_to_vma(&eb->exec[eb->args->buffer_count - 1]);
|
||||
|
||||
/*
|
||||
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
||||
|
@ -166,113 +317,18 @@ eb_get_batch(struct i915_execbuffer *eb)
|
|||
return vma;
|
||||
}
|
||||
|
||||
static int
|
||||
eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
static struct i915_vma *
|
||||
eb_get_vma(struct i915_execbuffer *eb, unsigned long handle)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head objects;
|
||||
int i, ret;
|
||||
|
||||
INIT_LIST_HEAD(&eb->vmas);
|
||||
|
||||
INIT_LIST_HEAD(&objects);
|
||||
spin_lock(&eb->file->table_lock);
|
||||
/* Grab a reference to the object and release the lock so we can lookup
|
||||
* or create the VMA without using GFP_ATOMIC */
|
||||
for (i = 0; i < eb->args->buffer_count; i++) {
|
||||
obj = to_intel_bo(idr_find(&eb->file->object_idr, eb->exec[i].handle));
|
||||
if (obj == NULL) {
|
||||
spin_unlock(&eb->file->table_lock);
|
||||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
eb->exec[i].handle, i);
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->obj_exec_link)) {
|
||||
spin_unlock(&eb->file->table_lock);
|
||||
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
||||
obj, eb->exec[i].handle, i);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
i915_gem_object_get(obj);
|
||||
list_add_tail(&obj->obj_exec_link, &objects);
|
||||
}
|
||||
spin_unlock(&eb->file->table_lock);
|
||||
|
||||
i = 0;
|
||||
while (!list_empty(&objects)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
|
||||
/*
|
||||
* NOTE: We can leak any vmas created here when something fails
|
||||
* later on. But that's no issue since vma_unbind can deal with
|
||||
* vmas which are not actually bound. And since only
|
||||
* lookup_or_create exists as an interface to get at the vma
|
||||
* from the (obj, vm) we don't run the risk of creating
|
||||
* duplicated vmas for the same vm.
|
||||
*/
|
||||
vma = i915_vma_instance(obj, eb->vm, NULL);
|
||||
if (unlikely(IS_ERR(vma))) {
|
||||
DRM_DEBUG("Failed to lookup VMA\n");
|
||||
ret = PTR_ERR(vma);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Transfer ownership from the objects list to the vmas list. */
|
||||
list_add_tail(&vma->exec_link, &eb->vmas);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
|
||||
vma->exec_entry = &eb->exec[i];
|
||||
if (eb->and < 0) {
|
||||
eb->lut[i] = vma;
|
||||
} else {
|
||||
u32 handle =
|
||||
eb->args->flags & I915_EXEC_HANDLE_LUT ?
|
||||
i : eb->exec[i].handle;
|
||||
vma->exec_handle = handle;
|
||||
hlist_add_head(&vma->exec_node,
|
||||
&eb->buckets[handle & eb->and]);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
err:
|
||||
while (!list_empty(&objects)) {
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
/*
|
||||
* Objects already transfered to the vmas list will be unreferenced by
|
||||
* eb_destroy.
|
||||
*/
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct i915_vma *eb_get_vma(struct i915_execbuffer *eb, unsigned long handle)
|
||||
{
|
||||
if (eb->and < 0) {
|
||||
if (handle >= -eb->and)
|
||||
if (eb->lut_mask < 0) {
|
||||
if (handle >= -eb->lut_mask)
|
||||
return NULL;
|
||||
return eb->lut[handle];
|
||||
return exec_to_vma(&eb->exec[handle]);
|
||||
} else {
|
||||
struct hlist_head *head;
|
||||
struct i915_vma *vma;
|
||||
|
||||
head = &eb->buckets[handle & eb->and];
|
||||
head = &eb->buckets[hash_32(handle, eb->lut_mask)];
|
||||
hlist_for_each_entry(vma, head, exec_node) {
|
||||
if (vma->exec_handle == handle)
|
||||
return vma;
|
||||
|
@ -296,7 +352,7 @@ static void eb_destroy(struct i915_execbuffer *eb)
|
|||
|
||||
i915_gem_context_put(eb->ctx);
|
||||
|
||||
if (eb->buckets)
|
||||
if (eb->lut_mask >= 0)
|
||||
kfree(eb->buckets);
|
||||
}
|
||||
|
||||
|
@ -916,7 +972,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
|
|||
need_fence =
|
||||
(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
|
||||
needs_unfenced_map) &&
|
||||
i915_gem_object_is_tiled(obj);
|
||||
i915_gem_object_is_tiled(vma->obj);
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
if (entry->flags & EXEC_OBJECT_PINNED)
|
||||
|
|
|
@ -86,6 +86,7 @@ struct drm_i915_gem_object {
|
|||
* They are also added to @vma_list for easy iteration.
|
||||
*/
|
||||
struct rb_root vma_tree;
|
||||
struct i915_vma *vma_hashed;
|
||||
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
|
@ -100,9 +101,6 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
struct list_head userfault_link;
|
||||
|
||||
/** Used in execbuf to temporarily hold a ref */
|
||||
struct list_head obj_exec_link;
|
||||
|
||||
struct list_head batch_pool_link;
|
||||
I915_SELFTEST_DECLARE(struct list_head st_link);
|
||||
|
||||
|
|
|
@ -99,6 +99,11 @@
|
|||
__T; \
|
||||
})
|
||||
|
||||
#define u64_to_ptr(T, x) ({ \
|
||||
typecheck(u64, x); \
|
||||
(T *)(uintptr_t)(x); \
|
||||
})
|
||||
|
||||
#define __mask_next_bit(mask) ({ \
|
||||
int __idx = ffs(mask) - 1; \
|
||||
mask &= ~BIT(__idx); \
|
||||
|
|
|
@ -590,11 +590,31 @@ static void i915_vma_destroy(struct i915_vma *vma)
|
|||
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
|
||||
}
|
||||
|
||||
void i915_vma_unlink_ctx(struct i915_vma *vma)
|
||||
{
|
||||
struct i915_gem_context *ctx = vma->ctx;
|
||||
|
||||
if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
|
||||
cancel_work_sync(&ctx->vma_lut.resize);
|
||||
ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
|
||||
}
|
||||
|
||||
__hlist_del(&vma->ctx_node);
|
||||
ctx->vma_lut.ht_count--;
|
||||
|
||||
if (i915_vma_is_ggtt(vma))
|
||||
vma->obj->vma_hashed = NULL;
|
||||
vma->ctx = NULL;
|
||||
}
|
||||
|
||||
void i915_vma_close(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(i915_vma_is_closed(vma));
|
||||
vma->flags |= I915_VMA_CLOSED;
|
||||
|
||||
if (vma->ctx)
|
||||
i915_vma_unlink_ctx(vma);
|
||||
|
||||
list_del(&vma->obj_link);
|
||||
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ struct i915_vma {
|
|||
|
||||
struct list_head obj_link; /* Link in the object's VMA list */
|
||||
struct rb_node obj_node;
|
||||
struct hlist_node obj_hash;
|
||||
|
||||
/** This vma's place in the execbuf reservation list */
|
||||
struct list_head exec_link;
|
||||
|
@ -110,8 +111,12 @@ struct i915_vma {
|
|||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
u32 exec_handle;
|
||||
|
||||
struct i915_gem_context *ctx;
|
||||
struct hlist_node ctx_node;
|
||||
u32 ctx_handle;
|
||||
};
|
||||
|
||||
struct i915_vma *
|
||||
|
@ -235,6 +240,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
|
|||
u64 size, u64 alignment, u64 flags);
|
||||
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
|
||||
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
||||
void i915_vma_unlink_ctx(struct i915_vma *vma);
|
||||
void i915_vma_close(struct i915_vma *vma);
|
||||
|
||||
int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
|
|
|
@ -40,10 +40,18 @@ mock_context(struct drm_i915_private *i915,
|
|||
INIT_LIST_HEAD(&ctx->link);
|
||||
ctx->i915 = i915;
|
||||
|
||||
ctx->vma_lut.ht_bits = VMA_HT_BITS;
|
||||
ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
|
||||
ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
|
||||
sizeof(*ctx->vma_lut.ht),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->vma_lut.ht)
|
||||
goto err_free;
|
||||
|
||||
ret = ida_simple_get(&i915->context_hw_ida,
|
||||
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
goto err_vma_ht;
|
||||
ctx->hw_id = ret;
|
||||
|
||||
if (name) {
|
||||
|
@ -58,6 +66,8 @@ mock_context(struct drm_i915_private *i915,
|
|||
|
||||
return ctx;
|
||||
|
||||
err_vma_ht:
|
||||
kvfree(ctx->vma_lut.ht);
|
||||
err_free:
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
|
|
Загрузка…
Ссылка в новой задаче