From 6c1e963cc5771c93d4ed7aa8bdd4322a7c918e9b Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Nov 2012 11:31:51 +0000 Subject: [PATCH] drm/ttm: Optimize reservation slightly Reservation locking currently always takes place under the LRU spinlock. Hence, strictly there is no need for an atomic_cmpxchg call; we can use atomic_read followed by atomic_write since nobody else will ever reserve without the lru spinlock held. At least on Intel this should remove a locked bus cycle on successful reserve. Note that thit commit may be obsoleted by the cross-device reservation work. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d1e5326d442c..5f61f133b419 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, struct ttm_bo_global *glob = bo->glob; int ret; - while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { + while (unlikely(atomic_read(&bo->reserved) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ @@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, return ret; } + atomic_set(&bo->reserved, 1); if (use_sequence) { /** * Wake up waiters that may need to recheck for deadlock,