diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index cda280d157da..ee06c8781cd4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } +/** + * r600_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up (r6xx-evergreen). + * Returns true if the engine appears to be locked up, false if not. + */ +bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 dma_status_reg; + + dma_status_reg = RREG32(DMA_STATUS_REG); + if (dma_status_reg & DMA_IDLE) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + int r600_asic_reset(struct radeon_device *rdev) { return r600_gpu_soft_reset(rdev); @@ -1594,6 +1617,7 @@ static void r600_gpu_init(struct radeon_device *rdev) WREG32(GB_TILING_CONFIG, tiling_config); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); + WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); @@ -1871,6 +1895,7 @@ void r600_cp_stop(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); WREG32(SCRATCH_UMSK, 0); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; } int r600_init_microcode(struct radeon_device *rdev) @@ -2196,6 +2221,128 @@ void r600_cp_fini(struct radeon_device *rdev) radeon_scratch_free(rdev, ring->rptr_save_reg); } +/* + * DMA + * Starting with R600, the GPU has an asynchronous + * DMA engine. The programming model is very similar + * to the 3D engine (ring buffer, IBs, etc.), but the + * DMA controller has it's own packet format that is + * different form the PM4 format used by the 3D engine. + * It supports copying data, writing embedded data, + * solid fills, and a number of other things. It also + * has support for tiling/detiling of buffers. + */ +/** + * r600_dma_stop - stop the async dma engine + * + * @rdev: radeon_device pointer + * + * Stop the async dma engine (r6xx-evergreen). + */ +void r600_dma_stop(struct radeon_device *rdev) +{ + u32 rb_cntl = RREG32(DMA_RB_CNTL); + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + + rb_cntl &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL, rb_cntl); + + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; +} + +/** + * r600_dma_resume - setup and start the async dma engine + * + * @rdev: radeon_device pointer + * + * Set up the DMA ring buffer and enable it. (r6xx-evergreen). + * Returns 0 for success, error for failure. + */ +int r600_dma_resume(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + u32 rb_cntl, dma_cntl; + u32 rb_bufsz; + int r; + + /* Reset dma */ + if (rdev->family >= CHIP_RV770) + WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); + else + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); + WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = drm_order(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(DMA_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(DMA_RB_RPTR, 0); + WREG32(DMA_RB_WPTR, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(DMA_RB_RPTR_ADDR_HI, + upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); + WREG32(DMA_RB_RPTR_ADDR_LO, + ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); + + if (rdev->wb.enabled) + rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; + + WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); + + /* enable DMA IBs */ + WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); + + dma_cntl = RREG32(DMA_CNTL); + dma_cntl &= ~CTXEMPTY_INT_ENABLE; + WREG32(DMA_CNTL, dma_cntl); + + if (rdev->family >= CHIP_RV770) + WREG32(DMA_MODE, 1); + + ring->wptr = 0; + WREG32(DMA_RB_WPTR, ring->wptr << 2); + + ring->rptr = RREG32(DMA_RB_RPTR) >> 2; + + WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); + + ring->ready = true; + + r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); + if (r) { + ring->ready = false; + return r; + } + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + + return 0; +} + +/** + * r600_dma_fini - tear down the async dma engine + * + * @rdev: radeon_device pointer + * + * Stop the async dma engine and free the ring (r6xx-evergreen). + */ +void r600_dma_fini(struct radeon_device *rdev) +{ + r600_dma_stop(rdev); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); +} /* * GPU scratch registers helpers function. @@ -2252,6 +2399,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } +/** + * r600_dma_ring_test - simple async dma engine test + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (r6xx-SI). + * Returns 0 for success, error for failure. + */ +int r600_dma_ring_test(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ring_lock(rdev, ring, 4); + if (r) { + DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); + return r; + } + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); + radeon_ring_write(ring, 0xDEADBEEF); + radeon_ring_unlock_commit(rdev, ring); + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < rdev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/* + * CP fences/semaphores + */ + void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -2315,6 +2520,58 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); } +/* + * DMA fences/semaphores + */ + +/** + * r600_dma_fence_ring_emit - emit a fence on the DMA ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (r6xx-r7xx). + */ +void r600_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + /* write the fence */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); + radeon_ring_write(ring, fence->seq); + /* generate an interrupt */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); +} + +/** + * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @semaphore: radeon semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (r6xx-SI). + */ +void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 s = emit_wait ? 0 : 1; + + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(addr) & 0xff); +} + int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, @@ -2334,6 +2591,80 @@ int r600_copy_blit(struct radeon_device *rdev, return 0; } +/** + * r600_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (r6xx-r7xx). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xffff); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFF) + cur_size_in_dw = 0xFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size) @@ -2349,7 +2680,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) static int r600_startup(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_ring *ring; int r; /* enable pcie gen2 link */ @@ -2394,6 +2725,12 @@ static int r600_startup(struct radeon_device *rdev) return r; } + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -2403,12 +2740,20 @@ static int r600_startup(struct radeon_device *rdev) } r600_irq_set(rdev); + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); - if (r) return r; + + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, + DMA_RB_RPTR, DMA_RB_WPTR, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + if (r) + return r; + r = r600_cp_load_microcode(rdev); if (r) return r; @@ -2416,6 +2761,10 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; + r = r600_dma_resume(rdev); + if (r) + return r; + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -2471,7 +2820,7 @@ int r600_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r600_cp_stop(rdev); - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; + r600_dma_stop(rdev); r600_irq_suspend(rdev); radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); @@ -2544,6 +2893,9 @@ int r600_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -2556,6 +2908,7 @@ int r600_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r600_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -2572,6 +2925,7 @@ void r600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_blit_fini(rdev); r600_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -2674,6 +3028,104 @@ free_scratch: return r; } +/** + * r600_dma_ib_test - test an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test a simple IB in the DMA ring (r6xx-SI). + * Returns 0 on success, error on failure. + */ +int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + struct radeon_ib ib; + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp = 0; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); + if (r) { + DRM_ERROR("radeon: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); + ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; + ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; + ib.ptr[3] = 0xDEADBEEF; + ib.length_dw = 4; + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + radeon_ib_free(rdev, &ib); + DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); + return r; + } + r = radeon_fence_wait(ib.fence, false); + if (r) { + DRM_ERROR("radeon: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < rdev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); + } else { + DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + radeon_ib_free(rdev, &ib); + return r; +} + +/** + * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (r6xx-r7xx). + */ +void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 4; + while ((next_rptr & 7) != 5) + next_rptr++; + next_rptr += 3; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); + radeon_ring_write(ring, next_rptr); + } + + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); + radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + /* * Interrupts * @@ -2865,6 +3317,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) u32 tmp; WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; + WREG32(DMA_CNTL, tmp); WREG32(GRBM_INT_CNTL, 0); WREG32(DxMODE_INT_MASK, 0); WREG32(D1GRPH_INTERRUPT_CONTROL, 0); @@ -3006,6 +3460,7 @@ int r600_irq_set(struct radeon_device *rdev) u32 grbm_int_cntl = 0; u32 hdmi0, hdmi1; u32 d1grph = 0, d2grph = 0; + u32 dma_cntl; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -3040,12 +3495,19 @@ int r600_irq_set(struct radeon_device *rdev) hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; } + dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("r600_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } + + if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { + DRM_DEBUG("r600_irq_set: sw int dma\n"); + dma_cntl |= TRAP_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("r600_irq_set: vblank 0\n"); @@ -3090,6 +3552,7 @@ int r600_irq_set(struct radeon_device *rdev) } WREG32(CP_INT_CNTL, cp_int_cntl); + WREG32(DMA_CNTL, dma_cntl); WREG32(DxMODE_INT_MASK, mode_int); WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); @@ -3469,6 +3932,10 @@ restart_ih: DRM_DEBUG("IH: CP EOP\n"); radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; + case 224: /* DMA trap event */ + DRM_DEBUG("IH: DMA trap\n"); + radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); + break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index fa6f37099ba9..a596c554a3a0 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -590,9 +590,59 @@ #define WAIT_2D_IDLECLEAN_bit (1 << 16) #define WAIT_3D_IDLECLEAN_bit (1 << 17) +/* async DMA */ +#define DMA_TILING_CONFIG 0x3ec4 +#define DMA_CONFIG 0x3e4c + +#define DMA_RB_CNTL 0xd000 +# define DMA_RB_ENABLE (1 << 0) +# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ +# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ +# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) +# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ +# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ +#define DMA_RB_BASE 0xd004 +#define DMA_RB_RPTR 0xd008 +#define DMA_RB_WPTR 0xd00c + +#define DMA_RB_RPTR_ADDR_HI 0xd01c +#define DMA_RB_RPTR_ADDR_LO 0xd020 + +#define DMA_IB_CNTL 0xd024 +# define DMA_IB_ENABLE (1 << 0) +# define DMA_IB_SWAP_ENABLE (1 << 4) +#define DMA_IB_RPTR 0xd028 +#define DMA_CNTL 0xd02c +# define TRAP_ENABLE (1 << 0) +# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) +# define SEM_WAIT_INT_ENABLE (1 << 2) +# define DATA_SWAP_ENABLE (1 << 3) +# define FENCE_SWAP_ENABLE (1 << 4) +# define CTXEMPTY_INT_ENABLE (1 << 28) +#define DMA_STATUS_REG 0xd034 +# define DMA_IDLE (1 << 0) +#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044 +#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048 +#define DMA_MODE 0xd0bc + +/* async DMA packets */ +#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ + (((t) & 0x1) << 23) | \ + (((s) & 0x1) << 22) | \ + (((n) & 0xFFFF) << 0)) +/* async DMA Packet types */ +#define DMA_PACKET_WRITE 0x2 +#define DMA_PACKET_COPY 0x3 +#define DMA_PACKET_INDIRECT_BUFFER 0x4 +#define DMA_PACKET_SEMAPHORE 0x5 +#define DMA_PACKET_FENCE 0x6 +#define DMA_PACKET_TRAP 0x7 +#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */ +#define DMA_PACKET_NOP 0xf + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) -# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ +# define IH_RB_SIZE(x) ((x) << 1) /* log2 */ # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ @@ -637,7 +687,9 @@ #define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20 #define SRBM_SOFT_RESET 0xe60 +# define SOFT_RESET_DMA (1 << 12) # define SOFT_RESET_RLC (1 << 13) +# define RV770_SOFT_RESET_DMA (1 << 20) #define CP_INT_CNTL 0xc124 # define CNTX_BUSY_INT_ENABLE (1 << 19) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8c42d54c2e26..461bf53709f5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -109,7 +109,7 @@ extern int radeon_lockup_timeout; #define RADEON_BIOS_NUM_SCRATCH 8 /* max number of rings */ -#define RADEON_NUM_RINGS 3 +#define RADEON_NUM_RINGS 4 /* fence seq are set to this number when signaled */ #define RADEON_FENCE_SIGNALED_SEQ 0LL @@ -122,6 +122,9 @@ extern int radeon_lockup_timeout; #define CAYMAN_RING_TYPE_CP1_INDEX 1 #define CAYMAN_RING_TYPE_CP2_INDEX 2 +/* R600+ has an async dma ring */ +#define R600_RING_TYPE_DMA_INDEX 3 + /* hardcode those limit for now */ #define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20) @@ -787,6 +790,11 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); +/* r600 async dma */ +void r600_dma_stop(struct radeon_device *rdev); +int r600_dma_resume(struct radeon_device *rdev); +void r600_dma_fini(struct radeon_device *rdev); + /* * CS. */ @@ -883,6 +891,7 @@ struct radeon_wb { #define RADEON_WB_CP_RPTR_OFFSET 1024 #define RADEON_WB_CP1_RPTR_OFFSET 1280 #define RADEON_WB_CP2_RPTR_OFFSET 1536 +#define R600_WB_DMA_RPTR_OFFSET 1792 #define R600_WB_IH_WPTR_OFFSET 2048 #define R600_WB_EVENT_OFFSET 3072 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 654520b95ab7..3cf9b29fb53f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &r600_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -963,8 +972,8 @@ static struct radeon_asic r600_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &r600_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, @@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &r600_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -1038,8 +1056,8 @@ static struct radeon_asic rs780_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &r600_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, @@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &r600_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -1113,8 +1140,8 @@ static struct radeon_asic rv770_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &r600_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5e3a0e5c6be1..70a5b1f0e43e 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -309,6 +309,14 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); +void r600_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); +void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); +bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r600_asic_reset(struct radeon_device *rdev); int r600_set_surface_reg(struct radeon_device *rdev, int reg, @@ -316,11 +324,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t offset, uint32_t obj_size); void r600_clear_surface_reg(struct radeon_device *rdev, int reg); int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); +int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); +int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); +int r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, struct radeon_fence **fence); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 79814a08c8e5..87c979c4f721 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); WREG32(SCRATCH_UMSK, 0); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; } static int rv770_cp_load_microcode(struct radeon_device *rdev) @@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); + WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); + WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0); @@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev) static int rv770_startup(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_ring *ring; int r; /* enable pcie gen2 link */ @@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev) return r; } + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev) } r600_irq_set(rdev); + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; + + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, + DMA_RB_RPTR, DMA_RB_WPTR, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + if (r) + return r; + r = rv770_cp_load_microcode(rdev); if (r) return r; @@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; + r = r600_dma_resume(rdev); + if (r) + return r; + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r700_cp_stop(rdev); - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; + r600_dma_stop(rdev); r600_irq_suspend(rdev); radeon_wb_disable(rdev); rv770_pcie_gart_disable(rdev); @@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev) { r600_blit_fini(rdev); r700_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index e2d9dc8e751e..20e29d23d348 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -109,6 +109,9 @@ #define PIPE_TILING__SHIFT 1 #define PIPE_TILING__MASK 0x0000000e +#define DMA_TILING_CONFIG 0x3ec8 +#define DMA_TILING_CONFIG2 0xd0b8 + #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 @@ -358,6 +361,26 @@ #define WAIT_UNTIL 0x8040 +/* async DMA */ +#define DMA_RB_RPTR 0xd008 +#define DMA_RB_WPTR 0xd00c + +/* async DMA packets */ +#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ + (((t) & 0x1) << 23) | \ + (((s) & 0x1) << 22) | \ + (((n) & 0xFFFF) << 0)) +/* async DMA Packet types */ +#define DMA_PACKET_WRITE 0x2 +#define DMA_PACKET_COPY 0x3 +#define DMA_PACKET_INDIRECT_BUFFER 0x4 +#define DMA_PACKET_SEMAPHORE 0x5 +#define DMA_PACKET_FENCE 0x6 +#define DMA_PACKET_TRAP 0x7 +#define DMA_PACKET_CONSTANT_FILL 0xd +#define DMA_PACKET_NOP 0xf + + #define SRBM_STATUS 0x0E50 /* DCE 3.2 HDMI */