drm/ttm: Optimize reservation slightly
Reservation locking currently always takes place under the LRU spinlock. Hence, strictly there is no need for an atomic_cmpxchg call; we can use atomic_read followed by atomic_write since nobody else will ever reserve without the lru spinlock held. At least on Intel this should remove a locked bus cycle on successful reserve. Note that thit commit may be obsoleted by the cross-device reservation work. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Родитель
cdad05216c
Коммит
6c1e963cc5
|
@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|||
struct ttm_bo_global *glob = bo->glob;
|
||||
int ret;
|
||||
|
||||
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
|
||||
while (unlikely(atomic_read(&bo->reserved) != 0)) {
|
||||
/**
|
||||
* Deadlock avoidance for multi-bo reserving.
|
||||
*/
|
||||
|
@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
atomic_set(&bo->reserved, 1);
|
||||
if (use_sequence) {
|
||||
/**
|
||||
* Wake up waiters that may need to recheck for deadlock,
|
||||
|
|
Загрузка…
Ссылка в новой задаче