Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "This has a bunch of nouveau fixes, as Ben has been hibernating and has lots of small fixes for lots of bugs across nouveau. Radeon has one major fix for hdmi/dp audio regression that is larger than Alex would like, but seems to fix up a fair few bugs, along with some misc fixes. And a few msm fixes, one of which is also a bit large. But nothing in here seems insane or crazy for this stage, just more than I'd like" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (33 commits) drm/msm/mdp5: release SMB (shared memory blocks) in various cases drm/msm: change to uninterruptible wait in atomic commit drm/msm: mdp4: Fix drm_framebuffer dereference crash drm/msm: fix msm_gem_prime_get_sg_table() drm/amdgpu: add new parameter to seperate map and unmap drm/amdgpu: hdp_flush is not needed for inside IB drm/amdgpu: different emit_ib for gfx and compute drm/amdgpu: information leak in amdgpu_info_ioctl() drm/amdgpu: clean up init sequence for failures drm/radeon/combios: add some validation of lvds values drm/radeon: rework audio modeset to handle non-audio hdmi features drm/radeon: rework audio detect (v4) drm/amdgpu: Drop drm/ prefix for including drm.h in amdgpu_drm.h drm/radeon: Drop drm/ prefix for including drm.h in radeon_drm.h drm/nouveau/nouveau/ttm: fix tiled system memory with Maxwell drm/nouveau/kms/nv50-: guard against enabling cursor on disabled heads drm/nouveau/fbcon/g80: reduce PUSH_SPACE alloc, fire ring on accel init drm/nouveau/fbcon/gf100-: reduce RING_SPACE allocation drm/nouveau/fbcon/nv11-: correctly account for ring space usage drm/nouveau/bios: add proper support for opcode 0x59 ...
This commit is contained in:
Коммит
fd56d1d66a
|
@ -1866,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
|||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||
|
||||
struct amdgpu_ip_block_status {
|
||||
bool valid;
|
||||
bool sw;
|
||||
bool hw;
|
||||
};
|
||||
|
||||
struct amdgpu_device {
|
||||
struct device *dev;
|
||||
struct drm_device *ddev;
|
||||
|
@ -2008,7 +2014,7 @@ struct amdgpu_device {
|
|||
|
||||
const struct amdgpu_ip_block_version *ip_blocks;
|
||||
int num_ip_blocks;
|
||||
bool *ip_block_enabled;
|
||||
struct amdgpu_ip_block_status *ip_block_status;
|
||||
struct mutex mn_lock;
|
||||
DECLARE_HASHTABLE(mn_hash, 7);
|
||||
|
||||
|
|
|
@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
|
||||
if (adev->ip_block_enabled == NULL)
|
||||
adev->ip_block_status = kcalloc(adev->num_ip_blocks,
|
||||
sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
|
||||
if (adev->ip_block_status == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (adev->ip_blocks == NULL) {
|
||||
|
@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
DRM_ERROR("disabled ip block: %d\n", i);
|
||||
adev->ip_block_enabled[i] = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
} else {
|
||||
if (adev->ip_blocks[i].funcs->early_init) {
|
||||
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
|
||||
if (r == -ENOENT)
|
||||
adev->ip_block_enabled[i] = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
else if (r)
|
||||
return r;
|
||||
else
|
||||
adev->ip_block_enabled[i] = true;
|
||||
adev->ip_block_status[i].valid = true;
|
||||
} else {
|
||||
adev->ip_block_enabled[i] = true;
|
||||
adev->ip_block_status[i].valid = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
adev->ip_block_status[i].sw = true;
|
||||
/* need to do gmc hw init early so we can allocate gpu mem */
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
r = amdgpu_vram_scratch_init(adev);
|
||||
|
@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||
r = amdgpu_wb_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
adev->ip_block_status[i].hw = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].sw)
|
||||
continue;
|
||||
/* gmc hw init is done early */
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
|
||||
|
@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
adev->ip_block_status[i].hw = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
|
|||
int i = 0, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
/* enable clockgating to save power */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
|
@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_wb_fini(adev);
|
||||
|
@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||
return r;
|
||||
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
adev->ip_block_status[i].hw = false;
|
||||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].sw)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
adev->ip_block_enabled[i] = false;
|
||||
adev->ip_block_status[i].sw = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
/* ungate blocks so that suspend can properly shut them down */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
|
@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->resume(adev);
|
||||
if (r)
|
||||
|
@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
amdgpu_fence_driver_fini(adev);
|
||||
amdgpu_fbdev_fini(adev);
|
||||
r = amdgpu_fini(adev);
|
||||
kfree(adev->ip_block_enabled);
|
||||
adev->ip_block_enabled = NULL;
|
||||
kfree(adev->ip_block_status);
|
||||
adev->ip_block_status = NULL;
|
||||
adev->accel_working = false;
|
||||
/* free i2c buses */
|
||||
amdgpu_i2c_fini(adev);
|
||||
|
|
|
@ -449,7 +449,7 @@ out:
|
|||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va)
|
||||
struct amdgpu_bo_va *bo_va, uint32_t operation)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry *vm_bos;
|
||||
|
@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP)
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
|
@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
|
||||
amdgpu_gem_va_update_vm(adev, bo_va);
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
|
|
|
@ -180,17 +180,17 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
|||
if (vm) {
|
||||
/* do context switch */
|
||||
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
|
||||
|
||||
if (ring->funcs->emit_gds_switch)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
}
|
||||
|
||||
if (vm && ring->funcs->emit_gds_switch)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
|
||||
old_ctx = ring->current_ctx;
|
||||
for (i = 0; i < num_ibs; ++i) {
|
||||
ib = &ibs[i];
|
||||
|
|
|
@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_enabled[i]) {
|
||||
adev->ip_block_status[i].valid) {
|
||||
ip.hw_ip_version_major = adev->ip_blocks[i].major;
|
||||
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
|
||||
ip.capabilities_flags = 0;
|
||||
|
@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_enabled[i] &&
|
||||
adev->ip_block_status[i].valid &&
|
||||
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
||||
count++;
|
||||
|
||||
|
@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
return n ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_DEV_INFO: {
|
||||
struct drm_amdgpu_info_device dev_info;
|
||||
struct drm_amdgpu_info_device dev_info = {};
|
||||
struct amdgpu_cu_info cu_info;
|
||||
|
||||
dev_info.device_id = dev->pdev->device;
|
||||
|
|
|
@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
|||
* sheduling on the ring. This function schedules the IB
|
||||
* on the gfx ring for execution by the GPU.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
bool need_ctx_switch = ring->current_ctx != ib->ctx;
|
||||
|
@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
/* drop the CE preamble IB for the same context */
|
||||
if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
|
||||
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
|
||||
!need_ctx_switch)
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
|
||||
return;
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
|
||||
if (need_ctx_switch)
|
||||
next_rptr += 2;
|
||||
|
||||
next_rptr += 4;
|
||||
|
@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
|
||||
if (need_ctx_switch) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 header, control = 0;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
next_rptr += 4;
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
|
||||
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_test_ib - basic ring IB test
|
||||
*
|
||||
|
@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
|
@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
|||
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
|
|
|
@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 0x20); /* poll interval */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
bool need_ctx_switch = ring->current_ctx != ib->ctx;
|
||||
|
@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
/* drop the CE preamble IB for the same context */
|
||||
if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
|
||||
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
|
||||
!need_ctx_switch)
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
|
||||
return;
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
|
||||
if (need_ctx_switch)
|
||||
next_rptr += 2;
|
||||
|
||||
next_rptr += 4;
|
||||
|
@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
|
||||
if (need_ctx_switch) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 header, control = 0;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
|
||||
next_rptr += 4;
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
|
||||
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
u64 seq, unsigned flags)
|
||||
{
|
||||
|
@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
|
@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
|
|
|
@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
|
|||
uint32_t op_mode = 0;
|
||||
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
|
||||
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
|
||||
enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
|
||||
enum mdp4_frame_format frame_type;
|
||||
|
||||
if (!(crtc && fb)) {
|
||||
DBG("%s: disabled!", mdp4_plane->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
frame_type = mdp4_get_frame_format(fb);
|
||||
|
||||
/* src values are in Q16 fixed point, convert to integer: */
|
||||
src_x = src_x >> 16;
|
||||
src_y = src_y >> 16;
|
||||
|
|
|
@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
|
|||
|
||||
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
|
||||
|
||||
for (i = 0; i < nplanes; i++) {
|
||||
struct drm_plane *plane = state->planes[i];
|
||||
struct drm_plane_state *plane_state = state->plane_states[i];
|
||||
|
||||
if (!plane)
|
||||
continue;
|
||||
|
||||
mdp5_plane_complete_commit(plane, plane_state);
|
||||
}
|
||||
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
|
|
|
@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
|
|||
struct drm_mode_object *obj);
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_flip(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
|
||||
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
|
||||
|
|
|
@ -31,8 +31,6 @@ struct mdp5_plane {
|
|||
|
||||
uint32_t nformats;
|
||||
uint32_t formats[32];
|
||||
|
||||
bool enabled;
|
||||
};
|
||||
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
|
||||
|
||||
|
@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
|
|||
return state->fb && state->crtc;
|
||||
}
|
||||
|
||||
static int mdp5_plane_disable(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
enum mdp5_pipe pipe = mdp5_plane->pipe;
|
||||
|
||||
DBG("%s: disable", mdp5_plane->name);
|
||||
|
||||
if (mdp5_kms) {
|
||||
/* Release the memory we requested earlier from the SMP: */
|
||||
mdp5_smp_release(mdp5_kms->smp, pipe);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mdp5_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
|
@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
|
|||
|
||||
if (!plane_enabled(state)) {
|
||||
to_mdp5_plane_state(state)->pending = true;
|
||||
mdp5_plane_disable(plane);
|
||||
} else if (to_mdp5_plane_state(state)->mode_changed) {
|
||||
int ret;
|
||||
to_mdp5_plane_state(state)->pending = true;
|
||||
|
@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
|
|||
return mdp5_plane->flush_mask;
|
||||
}
|
||||
|
||||
/* called after vsync in thread context */
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
enum mdp5_pipe pipe = mdp5_plane->pipe;
|
||||
|
||||
if (!plane_enabled(plane->state)) {
|
||||
DBG("%s: free SMP", mdp5_plane->name);
|
||||
mdp5_smp_release(mdp5_kms->smp, pipe);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
|
||||
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
|
||||
|
|
|
@ -34,22 +34,44 @@
|
|||
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
|
||||
*
|
||||
* For each block that can be dynamically allocated, it can be either
|
||||
* free, or pending/in-use by a client. The updates happen in three steps:
|
||||
* free:
|
||||
* The block is free.
|
||||
*
|
||||
* pending:
|
||||
* The block is allocated to some client and not free.
|
||||
*
|
||||
* configured:
|
||||
* The block is allocated to some client, and assigned to that
|
||||
* client in MDP5_MDP_SMP_ALLOC registers.
|
||||
*
|
||||
* inuse:
|
||||
* The block is being actively used by a client.
|
||||
*
|
||||
* The updates happen in the following steps:
|
||||
*
|
||||
* 1) mdp5_smp_request():
|
||||
* When plane scanout is setup, calculate required number of
|
||||
* blocks needed per client, and request. Blocks not inuse or
|
||||
* pending by any other client are added to client's pending
|
||||
* set.
|
||||
* blocks needed per client, and request. Blocks neither inuse nor
|
||||
* configured nor pending by any other client are added to client's
|
||||
* pending set.
|
||||
* For shrinking, blocks in pending but not in configured can be freed
|
||||
* directly, but those already in configured will be freed later by
|
||||
* mdp5_smp_commit.
|
||||
*
|
||||
* 2) mdp5_smp_configure():
|
||||
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
|
||||
* are configured for the union(pending, inuse)
|
||||
* Current pending is copied to configured.
|
||||
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
|
||||
* concurrently for the same pipe.
|
||||
*
|
||||
* 3) mdp5_smp_commit():
|
||||
* After next vblank, copy pending -> inuse. Optionally update
|
||||
* After next vblank, copy configured -> inuse. Optionally update
|
||||
* MDP5_SMP_ALLOC registers if there are newly unused blocks
|
||||
*
|
||||
* 4) mdp5_smp_release():
|
||||
* Must be called after the pipe is disabled and no longer uses any SMB
|
||||
*
|
||||
* On the next vblank after changes have been committed to hw, the
|
||||
* client's pending blocks become it's in-use blocks (and no-longer
|
||||
* in-use blocks become available to other clients).
|
||||
|
@ -77,6 +99,9 @@ struct mdp5_smp {
|
|||
struct mdp5_client_smp_state client_state[MAX_CLIENTS];
|
||||
};
|
||||
|
||||
static void update_smp_state(struct mdp5_smp *smp,
|
||||
u32 cid, mdp5_smp_state_t *assigned);
|
||||
|
||||
static inline
|
||||
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
|
||||
{
|
||||
|
@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
|
|||
for (i = cur_nblks; i > nblks; i--) {
|
||||
int blk = find_first_bit(ps->pending, cnt);
|
||||
clear_bit(blk, ps->pending);
|
||||
/* don't clear in global smp_state until _commit() */
|
||||
|
||||
/* clear in global smp_state if not in configured
|
||||
* otherwise until _commit()
|
||||
*/
|
||||
if (!test_bit(blk, ps->configured))
|
||||
clear_bit(blk, smp->state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
|
|||
/* Release SMP blocks for all clients of the pipe */
|
||||
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
{
|
||||
int i, nblks;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
int cnt = smp->blk_cnt;
|
||||
|
||||
for (i = 0; i < pipe2nclients(pipe); i++) {
|
||||
mdp5_smp_state_t assigned;
|
||||
u32 cid = pipe2client(pipe, i);
|
||||
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
||||
|
||||
spin_lock_irqsave(&smp->state_lock, flags);
|
||||
|
||||
/* clear hw assignment */
|
||||
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
|
||||
update_smp_state(smp, CID_UNUSED, &assigned);
|
||||
|
||||
/* free to global pool */
|
||||
bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
|
||||
bitmap_andnot(smp->state, smp->state, assigned, cnt);
|
||||
|
||||
/* clear client's infor */
|
||||
bitmap_zero(ps->pending, cnt);
|
||||
bitmap_zero(ps->configured, cnt);
|
||||
bitmap_zero(ps->inuse, cnt);
|
||||
|
||||
spin_unlock_irqrestore(&smp->state_lock, flags);
|
||||
}
|
||||
|
||||
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
|
||||
smp_request_block(smp, pipe2client(pipe, i), 0);
|
||||
set_fifo_thresholds(smp, pipe, 0);
|
||||
}
|
||||
|
||||
|
@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
|||
u32 cid = pipe2client(pipe, i);
|
||||
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
||||
|
||||
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
|
||||
/*
|
||||
* if vblank has not happened since last smp_configure
|
||||
* skip the configure for now
|
||||
*/
|
||||
if (!bitmap_equal(ps->inuse, ps->configured, cnt))
|
||||
continue;
|
||||
|
||||
bitmap_copy(ps->configured, ps->pending, cnt);
|
||||
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
|
||||
update_smp_state(smp, cid, &assigned);
|
||||
}
|
||||
}
|
||||
|
||||
/* step #3: after vblank, copy pending -> inuse: */
|
||||
/* step #3: after vblank, copy configured -> inuse: */
|
||||
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
{
|
||||
int cnt = smp->blk_cnt;
|
||||
|
@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
|||
* using, which can be released and made available to other
|
||||
* clients:
|
||||
*/
|
||||
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
|
||||
if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&smp->state_lock, flags);
|
||||
|
@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
|||
update_smp_state(smp, CID_UNUSED, &released);
|
||||
}
|
||||
|
||||
bitmap_copy(ps->inuse, ps->pending, cnt);
|
||||
bitmap_copy(ps->inuse, ps->configured, cnt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
struct mdp5_client_smp_state {
|
||||
mdp5_smp_state_t inuse;
|
||||
mdp5_smp_state_t configured;
|
||||
mdp5_smp_state_t pending;
|
||||
};
|
||||
|
||||
|
|
|
@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
|
|||
|
||||
timeout = ktime_add_ms(ktime_get(), 1000);
|
||||
|
||||
ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
|
||||
if (ret) {
|
||||
WARN_ON(ret); // TODO unswap state back? or??
|
||||
commit_destroy(c);
|
||||
return ret;
|
||||
}
|
||||
/* uninterruptible wait */
|
||||
msm_wait_fence(dev, c->fence, &timeout, false);
|
||||
|
||||
complete_commit(c);
|
||||
|
||||
|
|
|
@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
|
|||
* Fences:
|
||||
*/
|
||||
|
||||
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout)
|
||||
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout , bool interruptible)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
int ret;
|
||||
|
@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
|||
remaining_jiffies = timespec_to_jiffies(&ts);
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(priv->fence_event,
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout(priv->fence_event,
|
||||
fence_completed(dev, fence),
|
||||
remaining_jiffies);
|
||||
else
|
||||
ret = wait_event_timeout(priv->fence_event,
|
||||
fence_completed(dev, fence),
|
||||
remaining_jiffies);
|
||||
|
||||
|
@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return msm_wait_fence_interruptable(dev, args->fence, &timeout);
|
||||
return msm_wait_fence(dev, args->fence, &timeout, true);
|
||||
}
|
||||
|
||||
static const struct drm_ioctl_desc msm_ioctls[] = {
|
||||
|
|
|
@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
|
|||
|
||||
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
||||
|
||||
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout);
|
||||
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout, bool interruptible);
|
||||
int msm_queue_fence_cb(struct drm_device *dev,
|
||||
struct msm_fence_cb *cb, uint32_t fence);
|
||||
void msm_update_fence(struct drm_device *dev, uint32_t fence);
|
||||
|
|
|
@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
|
|||
if (op & MSM_PREP_NOSYNC)
|
||||
timeout = NULL;
|
||||
|
||||
ret = msm_wait_fence_interruptable(dev, fence, timeout);
|
||||
ret = msm_wait_fence(dev, fence, timeout, true);
|
||||
}
|
||||
|
||||
/* TODO cache maintenance */
|
||||
|
|
|
@ -23,8 +23,12 @@
|
|||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
BUG_ON(!msm_obj->sgt); /* should have already pinned! */
|
||||
return msm_obj->sgt;
|
||||
int npages = obj->size >> PAGE_SHIFT;
|
||||
|
||||
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
|
||||
return NULL;
|
||||
|
||||
return drm_prime_pages_to_sg(msm_obj->pages, npages);
|
||||
}
|
||||
|
||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
|
|
|
@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
|
|||
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
|
||||
nvif_client_fini(&cli->base);
|
||||
usif_client_fini(cli);
|
||||
kfree(cli);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
|
|||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
|
||||
mutex_lock(&cli->mutex);
|
||||
if (cli->abi16)
|
||||
nouveau_abi16_fini(cli->abi16);
|
||||
mutex_unlock(&cli->mutex);
|
||||
|
||||
mutex_lock(&drm->client.mutex);
|
||||
list_del(&cli->head);
|
||||
|
|
|
@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IOMMU_API)
|
||||
|
||||
static void nouveau_platform_probe_iommu(struct device *dev,
|
||||
struct nouveau_platform_gpu *gpu)
|
||||
{
|
||||
|
@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void nouveau_platform_probe_iommu(struct device *dev,
|
||||
struct nouveau_platform_gpu *gpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void nouveau_platform_remove_iommu(struct device *dev,
|
||||
struct nouveau_platform_gpu *gpu)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int nouveau_platform_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct nouveau_platform_gpu *gpu;
|
||||
|
|
|
@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
|||
node->page_shift = 12;
|
||||
|
||||
switch (drm->device.info.family) {
|
||||
case NV_DEVICE_INFO_V0_TNT:
|
||||
case NV_DEVICE_INFO_V0_CELSIUS:
|
||||
case NV_DEVICE_INFO_V0_KELVIN:
|
||||
case NV_DEVICE_INFO_V0_RANKINE:
|
||||
case NV_DEVICE_INFO_V0_CURIE:
|
||||
break;
|
||||
case NV_DEVICE_INFO_V0_TESLA:
|
||||
if (drm->device.info.chipset != 0x50)
|
||||
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
|
||||
break;
|
||||
case NV_DEVICE_INFO_V0_FERMI:
|
||||
case NV_DEVICE_INFO_V0_KEPLER:
|
||||
case NV_DEVICE_INFO_V0_MAXWELL:
|
||||
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
|
||||
break;
|
||||
default:
|
||||
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
|
||||
drm->device.info.family);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (RING_SPACE(chan, 49)) {
|
||||
if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
|
|||
{
|
||||
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
|
||||
|
||||
if (show && nv_crtc->cursor.nvbo)
|
||||
if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
|
||||
nv50_crtc_cursor_show(nv_crtc);
|
||||
else
|
||||
nv50_crtc_cursor_hide(nv_crtc);
|
||||
|
|
|
@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = RING_SPACE(chan, 59);
|
||||
ret = RING_SPACE(chan, 58);
|
||||
if (ret) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return ret;
|
||||
|
@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
|||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb->vma.offset));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma.offset));
|
||||
FIRE_RING(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = RING_SPACE(chan, 60);
|
||||
ret = RING_SPACE(chan, 58);
|
||||
if (ret) {
|
||||
WARN_ON(1);
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
|
|
|
@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
|
|||
case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
|
||||
default:
|
||||
nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
|
||||
return 0x0000;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -165,15 +165,31 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
|
||||
{
|
||||
struct nvkm_object *obj = (void *)chan;
|
||||
struct gk104_fifo_priv *priv = (void *)obj->engine;
|
||||
|
||||
nv_wr32(priv, 0x002634, chan->base.chid);
|
||||
if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
|
||||
nv_error(priv, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
||||
struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_bar *bar = nvkm_bar(parent);
|
||||
struct gk104_fifo_priv *priv = (void *)parent->engine;
|
||||
struct gk104_fifo_base *base = (void *)parent->parent;
|
||||
struct gk104_fifo_chan *chan = (void *)parent;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
switch (nv_engidx(object->engine)) {
|
||||
case NVDEV_ENGINE_SW : return 0;
|
||||
|
@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_wr32(priv, 0x002634, chan->base.chid);
|
||||
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
|
||||
nv_error(priv, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, nvkm_client_name(chan));
|
||||
if (suspend)
|
||||
return -EBUSY;
|
||||
}
|
||||
ret = gk104_fifo_chan_kick(chan);
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
|
||||
if (addr) {
|
||||
nv_wo32(base, addr + 0x00, 0x00000000);
|
||||
|
@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
|||
gk104_fifo_runlist_update(priv, chan->engine);
|
||||
}
|
||||
|
||||
gk104_fifo_chan_kick(chan);
|
||||
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
|
||||
return nvkm_fifo_channel_fini(&chan->base, suspend);
|
||||
}
|
||||
|
|
|
@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
|
|||
gf100_gr_zbc_clear_depth(priv, index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until GR goes idle. GR is considered idle if it is disabled by the
|
||||
* MC (0x200) register, or GR is not busy and a context switch is not in
|
||||
* progress.
|
||||
*/
|
||||
int
|
||||
gf100_gr_wait_idle(struct gf100_gr_priv *priv)
|
||||
{
|
||||
unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
|
||||
bool gr_enabled, ctxsw_active, gr_busy;
|
||||
|
||||
do {
|
||||
/*
|
||||
* required to make sure FIFO_ENGINE_STATUS (0x2640) is
|
||||
* up-to-date
|
||||
*/
|
||||
nv_rd32(priv, 0x400700);
|
||||
|
||||
gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
|
||||
ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
|
||||
gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
|
||||
|
||||
if (!gr_enabled || (!gr_busy && !ctxsw_active))
|
||||
return 0;
|
||||
} while (time_before(jiffies, end_jiffies));
|
||||
|
||||
nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
|
||||
gr_enabled, ctxsw_active, gr_busy);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
void
|
||||
gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
|
||||
{
|
||||
|
@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
|
|||
|
||||
while (addr < next) {
|
||||
nv_wr32(priv, 0x400200, addr);
|
||||
nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
|
||||
/**
|
||||
* Wait for GR to go idle after submitting a
|
||||
* GO_IDLE bundle
|
||||
*/
|
||||
if ((addr & 0xffff) == 0xe100)
|
||||
gf100_gr_wait_idle(priv);
|
||||
nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
|
||||
addr += init->pitch;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -181,6 +181,7 @@ struct gf100_gr_oclass {
|
|||
int ppc_nr;
|
||||
};
|
||||
|
||||
int gf100_gr_wait_idle(struct gf100_gr_priv *);
|
||||
void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
|
||||
void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
|
||||
void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
|
||||
|
|
|
@ -332,9 +332,12 @@ static void
|
|||
nvkm_perfctx_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_pm *ppm = (void *)object->engine;
|
||||
struct nvkm_perfctx *ctx = (void *)object;
|
||||
|
||||
mutex_lock(&nv_subdev(ppm)->mutex);
|
||||
nvkm_engctx_destroy(&ppm->context->base);
|
||||
ppm->context = NULL;
|
||||
nvkm_engctx_destroy(&ctx->base);
|
||||
if (ppm->context == ctx)
|
||||
ppm->context = NULL;
|
||||
mutex_unlock(&nv_subdev(ppm)->mutex);
|
||||
}
|
||||
|
||||
|
@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
mutex_lock(&nv_subdev(ppm)->mutex);
|
||||
if (ppm->context == NULL)
|
||||
ppm->context = ctx;
|
||||
if (ctx != ppm->context)
|
||||
ret = -EBUSY;
|
||||
mutex_unlock(&nv_subdev(ppm)->mutex);
|
||||
|
||||
if (ctx != ppm->context)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct nvkm_oclass
|
||||
|
|
|
@ -1284,6 +1284,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INIT_PLL_INDIRECT - opcode 0x59
|
||||
*
|
||||
*/
|
||||
static void
|
||||
init_pll_indirect(struct nvbios_init *init)
|
||||
{
|
||||
struct nvkm_bios *bios = init->bios;
|
||||
u32 reg = nv_ro32(bios, init->offset + 1);
|
||||
u16 addr = nv_ro16(bios, init->offset + 5);
|
||||
u32 freq = (u32)nv_ro16(bios, addr) * 1000;
|
||||
|
||||
trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
|
||||
reg, addr, freq);
|
||||
init->offset += 7;
|
||||
|
||||
init_prog_pll(init, reg, freq);
|
||||
}
|
||||
|
||||
/**
|
||||
* INIT_ZM_REG_INDIRECT - opcode 0x5a
|
||||
*
|
||||
*/
|
||||
static void
|
||||
init_zm_reg_indirect(struct nvbios_init *init)
|
||||
{
|
||||
struct nvkm_bios *bios = init->bios;
|
||||
u32 reg = nv_ro32(bios, init->offset + 1);
|
||||
u16 addr = nv_ro16(bios, init->offset + 5);
|
||||
u32 data = nv_ro32(bios, addr);
|
||||
|
||||
trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
|
||||
reg, addr, data);
|
||||
init->offset += 7;
|
||||
|
||||
init_wr32(init, addr, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* INIT_SUB_DIRECT - opcode 0x5b
|
||||
*
|
||||
|
@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
|
|||
[0x56] = { init_condition_time },
|
||||
[0x57] = { init_ltime },
|
||||
[0x58] = { init_zm_reg_sequence },
|
||||
[0x59] = { init_pll_indirect },
|
||||
[0x5a] = { init_zm_reg_indirect },
|
||||
[0x5b] = { init_sub_direct },
|
||||
[0x5c] = { init_jump },
|
||||
[0x5e] = { init_i2c_if },
|
||||
|
|
|
@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
|
|||
struct gt215_clk_info *info)
|
||||
{
|
||||
struct gt215_clk_priv *priv = (void *)clock;
|
||||
u32 oclk, sclk, sdiv, diff;
|
||||
u32 oclk, sclk, sdiv;
|
||||
s32 diff;
|
||||
|
||||
info->clk = 0;
|
||||
|
||||
|
|
|
@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
|
|||
nv_wr32(priv, 0x12004c, 0x4);
|
||||
nv_wr32(priv, 0x122204, 0x2);
|
||||
nv_rd32(priv, 0x122204);
|
||||
|
||||
/*
|
||||
* Bug: increase clock timeout to avoid operation failure at high
|
||||
* gpcclk rate.
|
||||
*/
|
||||
nv_wr32(priv, 0x122354, 0x800);
|
||||
nv_wr32(priv, 0x128328, 0x800);
|
||||
nv_wr32(priv, 0x124320, 0x800);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
|
|||
{
|
||||
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
|
||||
struct nv04_instobj_priv *node = (void *)object;
|
||||
struct nvkm_subdev *subdev = (void *)priv;
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
nvkm_mm_free(&priv->heap, &node->mem);
|
||||
mutex_unlock(&subdev->mutex);
|
||||
|
||||
nvkm_instobj_destroy(&node->base);
|
||||
}
|
||||
|
||||
|
@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
|
||||
struct nv04_instobj_priv *node;
|
||||
struct nvkm_instobj_args *args = data;
|
||||
struct nvkm_subdev *subdev = (void *)priv;
|
||||
int ret;
|
||||
|
||||
if (!args->align)
|
||||
|
@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
|
||||
args->align, &node->mem);
|
||||
mutex_unlock(&subdev->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
|||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
ENCODER_MODE_IS_DP(encoder_mode)))
|
||||
radeon_audio_mode_set(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
|
|
|
@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
|
|||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
u32 offset;
|
||||
|
||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||
if (!dig || !dig->afmt || !dig->pin)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
|
||||
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
|
||||
WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
|
||||
AFMT_AUDIO_SRC_SELECT(dig->pin->id));
|
||||
}
|
||||
|
||||
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector, struct drm_display_mode *mode)
|
||||
struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
u32 tmp = 0, offset;
|
||||
u32 tmp = 0;
|
||||
|
||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||
if (!dig || !dig->afmt || !dig->pin)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
if (connector->latency_present[1])
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
|
||||
|
@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
|
|||
else
|
||||
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
|
||||
}
|
||||
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
|
||||
WREG32_ENDPOINT(dig->pin->offset,
|
||||
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
|
||||
}
|
||||
|
||||
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
|
||||
u8 *sadb, int sad_count)
|
||||
u8 *sadb, int sad_count)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
u32 offset, tmp;
|
||||
u32 tmp;
|
||||
|
||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||
if (!dig || !dig->afmt || !dig->pin)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
/* program the speaker allocation */
|
||||
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
|
||||
tmp = RREG32_ENDPOINT(dig->pin->offset,
|
||||
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
|
||||
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
|
||||
/* set HDMI mode */
|
||||
tmp |= HDMI_CONNECTION;
|
||||
|
@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
|
|||
tmp |= SPEAKER_ALLOCATION(sadb[0]);
|
||||
else
|
||||
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
|
||||
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
|
||||
WREG32_ENDPOINT(dig->pin->offset,
|
||||
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
|
||||
}
|
||||
|
||||
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
|
||||
u8 *sadb, int sad_count)
|
||||
u8 *sadb, int sad_count)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
u32 offset, tmp;
|
||||
u32 tmp;
|
||||
|
||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||
if (!dig || !dig->afmt || !dig->pin)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
/* program the speaker allocation */
|
||||
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
|
||||
tmp = RREG32_ENDPOINT(dig->pin->offset,
|
||||
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
|
||||
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
|
||||
/* set DP mode */
|
||||
tmp |= DP_CONNECTION;
|
||||
|
@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
|
|||
tmp |= SPEAKER_ALLOCATION(sadb[0]);
|
||||
else
|
||||
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
|
||||
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
|
||||
WREG32_ENDPOINT(dig->pin->offset,
|
||||
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
|
||||
}
|
||||
|
||||
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
|
||||
struct cea_sad *sads, int sad_count)
|
||||
struct cea_sad *sads, int sad_count)
|
||||
{
|
||||
u32 offset;
|
||||
int i;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
|
@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
|
|||
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
|
||||
};
|
||||
|
||||
if (!dig || !dig->afmt || !dig->afmt->pin)
|
||||
if (!dig || !dig->afmt || !dig->pin)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
|
||||
u32 value = 0;
|
||||
u8 stereo_freqs = 0;
|
||||
|
@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
|
|||
|
||||
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
|
||||
|
||||
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
|
||||
WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
/* Two dtos; generally use dto0 for HDMI */
|
||||
u32 value = 0;
|
||||
|
@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
/* Two dtos; generally use dto1 for DP */
|
||||
u32 value = 0;
|
||||
|
|
|
@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
|
|||
static void radeon_audio_enable(struct radeon_device *rdev,
|
||||
struct r600_audio_pin *pin, u8 enable_mask)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
int pin_count = 0;
|
||||
|
||||
if (!pin)
|
||||
return;
|
||||
|
||||
if (rdev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
|
||||
if (radeon_encoder_is_digital(encoder)) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
dig = radeon_encoder->enc_priv;
|
||||
if (dig->pin == pin)
|
||||
pin_count++;
|
||||
}
|
||||
}
|
||||
|
||||
if ((pin_count > 1) && (enable_mask == 0))
|
||||
return;
|
||||
}
|
||||
|
||||
if (rdev->audio.funcs->enable)
|
||||
rdev->audio.funcs->enable(rdev, pin, enable_mask);
|
||||
}
|
||||
|
@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
|
|||
|
||||
static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct cea_sad *sads;
|
||||
int sad_count;
|
||||
|
||||
list_for_each_entry(connector,
|
||||
&encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||
if (!connector)
|
||||
return;
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
|
||||
if (sad_count <= 0) {
|
||||
|
@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
|
|||
}
|
||||
BUG_ON(!sads);
|
||||
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
|
||||
radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
|
||||
|
||||
|
@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
|
|||
|
||||
static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
u8 *sadb = NULL;
|
||||
int sad_count;
|
||||
|
||||
list_for_each_entry(connector,
|
||||
&encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||
if (!connector)
|
||||
return;
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_speaker_allocation(
|
||||
radeon_connector_edid(connector), &sadb);
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
|
||||
&sadb);
|
||||
if (sad_count < 0) {
|
||||
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
|
||||
sad_count);
|
||||
|
@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
|
|||
}
|
||||
|
||||
static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector = 0;
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
list_for_each_entry(connector,
|
||||
&encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||
if (!connector)
|
||||
return;
|
||||
}
|
||||
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
|
||||
radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
|
||||
|
@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
|
|||
}
|
||||
|
||||
void radeon_audio_detect(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_connector_status status)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig;
|
||||
|
||||
if (!connector || !connector->encoder)
|
||||
return;
|
||||
|
||||
rdev = connector->encoder->dev->dev_private;
|
||||
|
||||
if (!radeon_audio_chipset_supported(rdev))
|
||||
return;
|
||||
|
||||
radeon_encoder = to_radeon_encoder(connector->encoder);
|
||||
if (!radeon_encoder_is_digital(encoder))
|
||||
return;
|
||||
|
||||
dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (status == connector_status_connected) {
|
||||
if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
|
||||
radeon_encoder->audio = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
|
@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
|
|||
radeon_encoder->audio = rdev->audio.hdmi_funcs;
|
||||
}
|
||||
|
||||
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
|
||||
if (!dig->pin)
|
||||
dig->pin = radeon_audio_get_pin(encoder);
|
||||
radeon_audio_enable(rdev, dig->pin, 0xf);
|
||||
} else {
|
||||
radeon_audio_enable(rdev, dig->pin, 0);
|
||||
dig->pin = NULL;
|
||||
}
|
||||
} else {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
radeon_audio_enable(rdev, dig->pin, 0);
|
||||
dig->pin = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
|
|||
}
|
||||
|
||||
static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
|
||||
struct hdmi_avi_infoframe frame;
|
||||
int err;
|
||||
|
||||
list_for_each_entry(connector,
|
||||
&encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
if (!connector)
|
||||
return -EINVAL;
|
||||
|
||||
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
|
||||
if (err < 0) {
|
||||
|
@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (dig && dig->afmt &&
|
||||
radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
|
||||
if (dig && dig->afmt && radeon_encoder->audio &&
|
||||
radeon_encoder->audio->set_avi_packet)
|
||||
radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
|
||||
buffer, sizeof(buffer));
|
||||
|
||||
|
@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
|
|||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
radeon_audio_set_mute(encoder, true);
|
||||
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
radeon_audio_set_dto(encoder, mode->clock);
|
||||
radeon_audio_set_vbi_packet(encoder);
|
||||
radeon_hdmi_set_color_depth(encoder);
|
||||
radeon_audio_update_acr(encoder, mode->clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
if (!connector)
|
||||
return;
|
||||
|
||||
radeon_audio_set_mute(encoder, false);
|
||||
if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
|
||||
radeon_audio_set_mute(encoder, true);
|
||||
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
radeon_audio_set_dto(encoder, mode->clock);
|
||||
radeon_audio_set_vbi_packet(encoder);
|
||||
radeon_hdmi_set_color_depth(encoder);
|
||||
radeon_audio_update_acr(encoder, mode->clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
|
||||
radeon_audio_set_mute(encoder, false);
|
||||
} else {
|
||||
radeon_hdmi_set_color_depth(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
|||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
|
||||
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
|
||||
else
|
||||
radeon_audio_set_dto(encoder, dig_connector->dp_clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
if (!connector)
|
||||
return;
|
||||
|
||||
if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
|
||||
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
|
||||
else
|
||||
radeon_audio_set_dto(encoder, dig_connector->dp_clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_audio_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
|
|
|
@ -68,7 +68,8 @@ struct radeon_audio_funcs
|
|||
|
||||
int radeon_audio_init(struct radeon_device *rdev);
|
||||
void radeon_audio_detect(struct drm_connector *connector,
|
||||
enum drm_connector_status status);
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_connector_status status);
|
||||
u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
|
||||
u32 offset, u32 reg);
|
||||
void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
|
||||
|
|
|
@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
|
|||
|
||||
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
|
||||
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
|
||||
u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
|
||||
|
||||
if (hss > lvds->native_mode.hdisplay)
|
||||
hss = (10 - 1) * 8;
|
||||
|
||||
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
|
||||
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
|
||||
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
|
||||
(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
|
||||
hss;
|
||||
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
|
||||
(RBIOS8(tmp + 23) * 8);
|
||||
|
||||
|
|
|
@ -1379,8 +1379,16 @@ out:
|
|||
/* updated in get modes as well since we need to know if it's analog or digital */
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
if (radeon_audio != 0)
|
||||
radeon_audio_detect(connector, ret);
|
||||
if ((radeon_audio != 0) && radeon_connector->use_digital) {
|
||||
const struct drm_connector_helper_funcs *connector_funcs =
|
||||
connector->helper_private;
|
||||
|
||||
encoder = connector_funcs->best_encoder(connector);
|
||||
if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
|
||||
radeon_connector_get_edid(connector);
|
||||
radeon_audio_detect(connector, encoder, ret);
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
|
@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
if (radeon_audio != 0)
|
||||
radeon_audio_detect(connector, ret);
|
||||
if ((radeon_audio != 0) && encoder) {
|
||||
radeon_connector_get_edid(connector);
|
||||
radeon_audio_detect(connector, encoder, ret);
|
||||
}
|
||||
|
||||
out:
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
|
|
|
@ -237,7 +237,6 @@ struct radeon_afmt {
|
|||
int offset;
|
||||
bool last_buffer_filled_status;
|
||||
int id;
|
||||
struct r600_audio_pin *pin;
|
||||
};
|
||||
|
||||
struct radeon_mode_info {
|
||||
|
@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
|
|||
uint8_t backlight_level;
|
||||
int panel_mode;
|
||||
struct radeon_afmt *afmt;
|
||||
struct r600_audio_pin *pin;
|
||||
int active_mst_links;
|
||||
};
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#ifndef __AMDGPU_DRM_H__
|
||||
#define __AMDGPU_DRM_H__
|
||||
|
||||
#include <drm/drm.h>
|
||||
#include "drm.h"
|
||||
|
||||
#define DRM_AMDGPU_GEM_CREATE 0x00
|
||||
#define DRM_AMDGPU_GEM_MMAP 0x01
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#ifndef __RADEON_DRM_H__
|
||||
#define __RADEON_DRM_H__
|
||||
|
||||
#include <drm/drm.h>
|
||||
#include "drm.h"
|
||||
|
||||
/* WARNING: If you change any of these defines, make sure to change the
|
||||
* defines in the X server file (radeon_sarea.h)
|
||||
|
|
Загрузка…
Ссылка в новой задаче