drm/vmwgfx: Fix a circular locking dependency bug.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Родитель
a87897edba
Коммит
85b9e4878f
|
@ -113,6 +113,7 @@ struct vmw_fifo_state {
|
|||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct mutex fifo_mutex;
|
||||
struct rw_semaphore rwsem;
|
||||
};
|
||||
|
||||
|
@ -213,7 +214,7 @@ struct vmw_private {
|
|||
* Fencing and IRQs.
|
||||
*/
|
||||
|
||||
uint32_t fence_seq;
|
||||
atomic_t fence_seq;
|
||||
wait_queue_head_t fence_queue;
|
||||
wait_queue_head_t fifo_queue;
|
||||
atomic_t fence_queue_waiters;
|
||||
|
|
|
@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
fifo->reserved_size = 0;
|
||||
fifo->using_bounce_buffer = false;
|
||||
|
||||
mutex_init(&fifo->fifo_mutex);
|
||||
init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
|
@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|||
(unsigned int) min,
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
dev_priv->fence_seq = dev_priv->last_read_sequence;
|
||||
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
|
||||
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
return vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
|
@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
int ret;
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
mutex_lock(&fifo_state->fifo_mutex);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
}
|
||||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
up_write(&fifo_state->rwsem);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
|
||||
}
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
if (fifo_state->using_bounce_buffer || reserveable) {
|
||||
next_cmd += bytes;
|
||||
if (next_cmd >= max)
|
||||
|
@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
|||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
up_write(&fifo_state->rwsem);
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
||||
|
@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
|||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
down_write(&fifo_state->rwsem);
|
||||
*sequence = dev_priv->fence_seq;
|
||||
up_write(&fifo_state->rwsem);
|
||||
*sequence = atomic_read(&dev_priv->fence_seq);
|
||||
ret = -ENOMEM;
|
||||
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
|
||||
false, 3*HZ);
|
||||
|
@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
|
|||
}
|
||||
|
||||
do {
|
||||
*sequence = dev_priv->fence_seq++;
|
||||
*sequence = atomic_add_return(1, &dev_priv->fence_seq);
|
||||
} while (*sequence == 0);
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
||||
|
|
|
@ -84,20 +84,13 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
|
|||
vmw_fifo_idle(dev_priv, sequence))
|
||||
return true;
|
||||
|
||||
/**
|
||||
* Below is to signal stale fences that have wrapped.
|
||||
* First, block fence submission.
|
||||
*/
|
||||
|
||||
down_read(&fifo_state->rwsem);
|
||||
|
||||
/**
|
||||
* Then check if the sequence is higher than what we've actually
|
||||
* emitted. Then the fence is stale and signaled.
|
||||
*/
|
||||
|
||||
ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
|
||||
up_read(&fifo_state->rwsem);
|
||||
ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
|
||||
> VMW_FENCE_WRAP);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
|
||||
if (fifo_idle)
|
||||
down_read(&fifo_state->rwsem);
|
||||
signal_seq = dev_priv->fence_seq;
|
||||
signal_seq = atomic_read(&dev_priv->fence_seq);
|
||||
ret = 0;
|
||||
|
||||
for (;;) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче