drm/i915/ringbuffer: Make IRQ refcnting atomic
In order to enforce the correct memory barriers for irq get/put, we need to perform the actual counting using atomic operations. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Родитель
8d5203ca62
Коммит
b13c2b96bf
|
@ -2000,17 +2000,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
|
||||||
trace_i915_gem_request_wait_begin(dev, seqno);
|
trace_i915_gem_request_wait_begin(dev, seqno);
|
||||||
|
|
||||||
ring->waiting_seqno = seqno;
|
ring->waiting_seqno = seqno;
|
||||||
ring->irq_get(ring);
|
ret = -ENODEV;
|
||||||
if (interruptible)
|
if (ring->irq_get(ring)) {
|
||||||
ret = wait_event_interruptible(ring->irq_queue,
|
if (interruptible)
|
||||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
ret = wait_event_interruptible(ring->irq_queue,
|
||||||
|| atomic_read(&dev_priv->mm.wedged));
|
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||||
else
|
|| atomic_read(&dev_priv->mm.wedged));
|
||||||
wait_event(ring->irq_queue,
|
else
|
||||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
wait_event(ring->irq_queue,
|
||||||
|| atomic_read(&dev_priv->mm.wedged));
|
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||||
|
|| atomic_read(&dev_priv->mm.wedged));
|
||||||
|
|
||||||
ring->irq_put(ring);
|
ring->irq_put(ring);
|
||||||
|
}
|
||||||
ring->waiting_seqno = 0;
|
ring->waiting_seqno = 0;
|
||||||
|
|
||||||
trace_i915_gem_request_wait_end(dev, seqno);
|
trace_i915_gem_request_wait_end(dev, seqno);
|
||||||
|
@ -3157,14 +3159,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||||
* generation is designed to be run atomically and so is
|
* generation is designed to be run atomically and so is
|
||||||
* lockless.
|
* lockless.
|
||||||
*/
|
*/
|
||||||
ring->irq_get(ring);
|
if (ring->irq_get(ring)) {
|
||||||
ret = wait_event_interruptible(ring->irq_queue,
|
ret = wait_event_interruptible(ring->irq_queue,
|
||||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||||
|| atomic_read(&dev_priv->mm.wedged));
|
|| atomic_read(&dev_priv->mm.wedged));
|
||||||
ring->irq_put(ring);
|
ring->irq_put(ring);
|
||||||
|
|
||||||
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
|
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
|
|
|
@ -1186,10 +1186,9 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
|
||||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||||
|
|
||||||
if (dev_priv->trace_irq_seqno == 0)
|
if (dev_priv->trace_irq_seqno == 0 &&
|
||||||
ring->irq_get(ring);
|
ring->irq_get(ring))
|
||||||
|
dev_priv->trace_irq_seqno = seqno;
|
||||||
dev_priv->trace_irq_seqno = seqno;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
||||||
|
@ -1211,10 +1210,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
||||||
if (master_priv->sarea_priv)
|
if (master_priv->sarea_priv)
|
||||||
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||||
|
|
||||||
ring->irq_get(ring);
|
ret = -ENODEV;
|
||||||
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
|
if (ring->irq_get(ring)) {
|
||||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
|
||||||
ring->irq_put(ring);
|
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||||
|
ring->irq_put(ring);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
|
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
|
||||||
|
|
|
@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring)
|
||||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
render_ring_get_irq(struct intel_ring_buffer *ring)
|
render_ring_get_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
|
||||||
if (dev->irq_enabled && ++ring->irq_refcount == 1) {
|
if (!dev->irq_enabled)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||||
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (HAS_PCH_SPLIT(dev))
|
||||||
ironlake_enable_graphics_irq(dev_priv,
|
ironlake_enable_graphics_irq(dev_priv,
|
||||||
GT_USER_INTERRUPT);
|
GT_USER_INTERRUPT);
|
||||||
else
|
else
|
||||||
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
|
||||||
BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
|
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
||||||
if (dev->irq_enabled && --ring->irq_refcount == 0) {
|
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
|
||||||
if (dev->irq_enabled && ++ring->irq_refcount == 1) {
|
if (!dev->irq_enabled)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||||
ironlake_enable_graphics_irq(dev_priv, flag);
|
ironlake_enable_graphics_irq(dev_priv, flag);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
|
||||||
if (dev->irq_enabled && --ring->irq_refcount == 0) {
|
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
@ -447,16 +454,15 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
static void
|
|
||||||
bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
|
return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||||
}
|
}
|
||||||
static void
|
static void
|
||||||
bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
|
ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -846,16 +852,16 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ring buffer for Video Codec for Gen6+ */
|
/* ring buffer for Video Codec for Gen6+ */
|
||||||
|
@ -876,16 +882,16 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
||||||
|
|
||||||
/* Blitter support (SandyBridge+) */
|
/* Blitter support (SandyBridge+) */
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
blt_ring_get_irq(struct intel_ring_buffer *ring)
|
blt_ring_get_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
|
return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
blt_ring_put_irq(struct intel_ring_buffer *ring)
|
blt_ring_put_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
|
ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -54,8 +54,8 @@ struct intel_ring_buffer {
|
||||||
u32 irq_seqno; /* last seq seem at irq time */
|
u32 irq_seqno; /* last seq seem at irq time */
|
||||||
u32 waiting_seqno;
|
u32 waiting_seqno;
|
||||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||||
u32 irq_refcount;
|
atomic_t irq_refcount;
|
||||||
void (*irq_get)(struct intel_ring_buffer *ring);
|
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
||||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||||
|
|
||||||
int (*init)(struct intel_ring_buffer *ring);
|
int (*init)(struct intel_ring_buffer *ring);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче