drm: Convert prime dma-buf <-> handle to rbtree

Currently we use a linear walk to lookup a handle and return a dma-buf,
and vice versa. A long overdue TODO task is to convert that to a
hashtable. Since the initial implementation of dma-buf/prime, we now
have resizeable hashtables we can use (and now a future task is to RCU
enable the lookup!). However, this patch opts to use an rbtree instead
to provide O(lgN) lookups (and insertion, deletion). rbtrees were chosen
over using the RCU backed resizable hashtable to firstly avoid the
reallocations (rbtrees can be embedded entirely within the parent
struct) and to favour simpler code with predictable worst case
behaviour. In simple testing, the difference between using the constant
lookup and insertion of the rhashtable and the rbtree was less than 10%
of the wall time (igt/benchmarks/prime_lookup) - both are dramatic
improvements over the existing linear lists.

v2: Favour rbtree over rhashtable

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=94631
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20160926204414.23222-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-09-26 21:44:14 +01:00 коммит произвёл Daniel Vetter
Родитель 188af070d4
Коммит 077675c1e8
2 изменённых файлов: 77 добавлений и 13 удалений

Просмотреть файл

@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/dma-buf.h>
#include <linux/rbtree.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
@ -61,9 +62,11 @@
*/
struct drm_prime_member {
struct list_head entry;
struct dma_buf *dma_buf;
uint32_t handle;
struct rb_node dmabuf_rb;
struct rb_node handle_rb;
};
struct drm_prime_attachment {
@ -75,6 +78,7 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
struct rb_node **p, *rb;
member = kmalloc(sizeof(*member), GFP_KERNEL);
if (!member)
@ -83,18 +87,56 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
get_dma_buf(dma_buf);
member->dma_buf = dma_buf;
member->handle = handle;
list_add(&member->entry, &prime_fpriv->head);
rb = NULL;
p = &prime_fpriv->dmabufs.rb_node;
while (*p) {
struct drm_prime_member *pos;
rb = *p;
pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (dma_buf > pos->dma_buf)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&member->dmabuf_rb, rb, p);
rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
rb = NULL;
p = &prime_fpriv->handles.rb_node;
while (*p) {
struct drm_prime_member *pos;
rb = *p;
pos = rb_entry(rb, struct drm_prime_member, handle_rb);
if (handle > pos->handle)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&member->handle_rb, rb, p);
rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
return 0;
}
static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle)
{
struct drm_prime_member *member;
struct rb_node *rb;
list_for_each_entry(member, &prime_fpriv->head, entry) {
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
member = rb_entry(rb, struct drm_prime_member, handle_rb);
if (member->handle == handle)
return member->dma_buf;
else if (member->handle < handle)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
@ -104,14 +146,23 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
struct dma_buf *dma_buf,
uint32_t *handle)
{
struct drm_prime_member *member;
struct rb_node *rb;
list_for_each_entry(member, &prime_fpriv->head, entry) {
rb = prime_fpriv->dmabufs.rb_node;
while (rb) {
struct drm_prime_member *member;
member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (member->dma_buf == dma_buf) {
*handle = member->handle;
return 0;
} else if (member->dma_buf < dma_buf) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
return -ENOENT;
}
@ -166,13 +217,24 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf)
{
struct drm_prime_member *member, *safe;
struct rb_node *rb;
list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
rb = prime_fpriv->dmabufs.rb_node;
while (rb) {
struct drm_prime_member *member;
member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (member->dma_buf == dma_buf) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
dma_buf_put(dma_buf);
list_del(&member->entry);
kfree(member);
return;
} else if (member->dma_buf < dma_buf) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
}
@ -759,12 +821,13 @@ EXPORT_SYMBOL(drm_prime_gem_destroy);
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
INIT_LIST_HEAD(&prime_fpriv->head);
mutex_init(&prime_fpriv->lock);
prime_fpriv->dmabufs = RB_ROOT;
prime_fpriv->handles = RB_ROOT;
}
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
/* by now drm_gem_release should've made sure the list is empty */
WARN_ON(!list_empty(&prime_fpriv->head));
WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
}

Просмотреть файл

@ -51,6 +51,7 @@
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
@ -371,10 +372,10 @@ struct drm_pending_event {
we deliver the event, for tracing only */
};
/* initial implementaton using a linked list - todo hashtab */
struct drm_prime_file_private {
struct list_head head;
struct mutex lock;
struct rb_root dmabufs;
struct rb_root handles;
};
/** File private data */