Merge branch 'drm-next-3.13' of git://people.freedesktop.org/~agd5f/linux into drm-next
Initial pull request for radeon drm-next 3.13. Highlights: - Enable DPM on a number of asics by default - Enable audio by default - Dynamically power down dGPUs on PowerXpress systems - Lots of bug fixes * 'drm-next-3.13' of git://people.freedesktop.org/~agd5f/linux: (36 commits) drm/radeon: don't share PPLLs on DCE4.1 drm/radeon/dpm: fix typo in setting smc flag drm/radeon: fixup locking inversion between, mmap_sem and reservations drm/radeon: clear the page directory using the DMA drm/radeon: initially clear page tables drm/radeon: drop CP page table updates & cleanup v2 drm/radeon: add vm_set_page tracepoint drm/radeon: rework and fix reset detection v2 drm/radeon: don't use PACKET2 on CIK drm/radeon: fix UVD destroy IB size drm/radeon: activate UVD clocks before sending the destroy msg drm/radeon/si: fix define for MC_SEQ_TRAIN_WAKEUP_CNTL drm/radeon: fix endian handling in rlc buffer setup drm/radeon/dpm: retain user selected performance level across state changes drm/radeon: disable force performance state when thermal state is active drm/radeon: enable DPM by default on r7xx asics drm/radeon: enable DPM by default on evergreen asics drm/radeon: enable DPM by default on BTC asics drm/radeon: enable DPM by default on SI asics drm/radeon: enable DPM by default on SUMO/PALM APUs ...
This commit is contained in:
Коммит
bbf1f8bfef
|
@ -1753,7 +1753,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
if (pll != ATOM_PPLL_INVALID)
|
||||
return pll;
|
||||
}
|
||||
} else {
|
||||
} else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
|
||||
/* use the same PPLL for all monitors with the same clock */
|
||||
pll = radeon_get_shared_nondp_ppll(crtc);
|
||||
if (pll != ATOM_PPLL_INVALID)
|
||||
|
|
|
@ -1644,19 +1644,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
|
|||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
|
||||
/* enable the transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
|
||||
} else {
|
||||
/* setup and enable the encoder and transmitter */
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
|
||||
/* some dce3.x boards have a bug in their transmitter control table.
|
||||
* ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
|
||||
* does the same thing and more.
|
||||
*/
|
||||
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
|
||||
(rdev->family != CHIP_RS880))
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
|
||||
}
|
||||
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
|
@ -1674,16 +1666,11 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
|
|||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
/* disable the transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
|
||||
} else if (ASIC_IS_DCE4(rdev)) {
|
||||
/* disable the transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
|
||||
} else {
|
||||
/* disable the encoder and transmitter */
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
|
||||
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
|
||||
}
|
||||
|
@ -2392,6 +2379,15 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
|
|||
|
||||
/* this is needed for the pll/ss setup to work correctly in some cases */
|
||||
atombios_set_encoder_crtc_source(encoder);
|
||||
/* set up the FMT blocks */
|
||||
if (ASIC_IS_DCE8(rdev))
|
||||
dce8_program_fmt(encoder);
|
||||
else if (ASIC_IS_DCE4(rdev))
|
||||
dce4_program_fmt(encoder);
|
||||
else if (ASIC_IS_DCE3(rdev))
|
||||
dce3_program_fmt(encoder);
|
||||
else if (ASIC_IS_AVIVO(rdev))
|
||||
avivo_program_fmt(encoder);
|
||||
}
|
||||
|
||||
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
|
||||
|
|
|
@ -67,11 +67,6 @@ extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
|
|||
extern int cik_sdma_resume(struct radeon_device *rdev);
|
||||
extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
|
||||
extern void cik_sdma_fini(struct radeon_device *rdev);
|
||||
extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
static void cik_rlc_stop(struct radeon_device *rdev);
|
||||
static void cik_pcie_gen3_enable(struct radeon_device *rdev);
|
||||
static void cik_program_aspm(struct radeon_device *rdev);
|
||||
|
@ -3094,6 +3089,85 @@ void cik_semaphore_ring_emit(struct radeon_device *rdev,
|
|||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_copy_cpdma - copy pages using the CP DMA engine
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @num_gpu_pages: number of GPU pages to xfer
|
||||
* @fence: radeon fence object
|
||||
*
|
||||
* Copy GPU paging using the CP DMA engine (CIK+).
|
||||
* Used by the radeon ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
int cik_copy_cpdma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence)
|
||||
{
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
int ring_index = rdev->asic->copy.blit_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ring_index];
|
||||
u32 size_in_bytes, cur_size_in_bytes, control;
|
||||
int i, num_loops;
|
||||
int r = 0;
|
||||
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
|
||||
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
|
||||
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: moving bo (%d).\n", r);
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (radeon_fence_need_sync(*fence, ring->idx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
|
||||
ring->idx);
|
||||
radeon_fence_note_sync(*fence, ring->idx);
|
||||
} else {
|
||||
radeon_semaphore_free(rdev, &sem, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_loops; i++) {
|
||||
cur_size_in_bytes = size_in_bytes;
|
||||
if (cur_size_in_bytes > 0x1fffff)
|
||||
cur_size_in_bytes = 0x1fffff;
|
||||
size_in_bytes -= cur_size_in_bytes;
|
||||
control = 0;
|
||||
if (size_in_bytes == 0)
|
||||
control |= PACKET3_DMA_DATA_CP_SYNC;
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
|
||||
radeon_ring_write(ring, control);
|
||||
radeon_ring_write(ring, lower_32_bits(src_offset));
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset));
|
||||
radeon_ring_write(ring, lower_32_bits(dst_offset));
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset));
|
||||
radeon_ring_write(ring, cur_size_in_bytes);
|
||||
src_offset += cur_size_in_bytes;
|
||||
dst_offset += cur_size_in_bytes;
|
||||
}
|
||||
|
||||
r = radeon_fence_emit(rdev, fence, ring->idx);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, *fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* IB stuff
|
||||
*/
|
||||
|
@ -4824,62 +4898,6 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_vm_set_page - update the page tables using sDMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using CP or sDMA (CIK).
|
||||
*/
|
||||
void cik_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
/* CP */
|
||||
while (count) {
|
||||
ndw = 2 + count * 2;
|
||||
if (ndw > 0x3FFE)
|
||||
ndw = 0x3FFE;
|
||||
|
||||
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
|
||||
ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(1));
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* DMA */
|
||||
cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* RLC
|
||||
* The RLC is a multi-purpose microengine that handles a
|
||||
|
@ -5546,7 +5564,7 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
|
||||
dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
|
||||
dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
|
||||
}
|
||||
bo_offset += CP_ME_TABLE_SIZE;
|
||||
}
|
||||
|
@ -5768,52 +5786,53 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
|
|||
if (buffer == NULL)
|
||||
return;
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
|
||||
buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
|
||||
buffer[count++] = 0x80000000;
|
||||
buffer[count++] = 0x80000000;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
|
||||
buffer[count++] = cpu_to_le32(0x80000000);
|
||||
buffer[count++] = cpu_to_le32(0x80000000);
|
||||
|
||||
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
|
||||
for (ext = sect->section; ext->extent != NULL; ++ext) {
|
||||
if (sect->id == SECT_CONTEXT) {
|
||||
buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
|
||||
buffer[count++] = ext->reg_index - 0xa000;
|
||||
buffer[count++] =
|
||||
cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
|
||||
buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
|
||||
for (i = 0; i < ext->reg_count; i++)
|
||||
buffer[count++] = ext->extent[i];
|
||||
buffer[count++] = cpu_to_le32(ext->extent[i]);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
|
||||
buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
||||
switch (rdev->family) {
|
||||
case CHIP_BONAIRE:
|
||||
buffer[count++] = 0x16000012;
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = cpu_to_le32(0x16000012);
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
buffer[count++] = 0x00000000; /* XXX */
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
buffer[count++] = 0x00000000; /* XXX */
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
default:
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
}
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
|
||||
buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
|
||||
buffer[count++] = 0;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
buffer[count++] = cpu_to_le32(0);
|
||||
}
|
||||
|
||||
static void cik_init_pg(struct radeon_device *rdev)
|
||||
|
@ -7108,7 +7127,7 @@ static int cik_startup(struct radeon_device *rdev)
|
|||
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
|
||||
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
|
||||
CP_RB0_RPTR, CP_RB0_WPTR,
|
||||
RADEON_CP_PACKET2);
|
||||
PACKET3(PACKET3_NOP, 0x3FFF));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -7418,6 +7437,70 @@ void cik_fini(struct radeon_device *rdev)
|
|||
rdev->bios = NULL;
|
||||
}
|
||||
|
||||
void dce8_program_fmt(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
int bpc = 0;
|
||||
u32 tmp = 0;
|
||||
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
|
||||
|
||||
if (connector) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
bpc = radeon_get_monitor_bpc(connector);
|
||||
dither = radeon_connector->dither;
|
||||
}
|
||||
|
||||
/* LVDS/eDP FMT is set up by atom */
|
||||
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
|
||||
return;
|
||||
|
||||
/* not needed for analog */
|
||||
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
|
||||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
|
||||
return;
|
||||
|
||||
if (bpc == 0)
|
||||
return;
|
||||
|
||||
switch (bpc) {
|
||||
case 6:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
|
||||
else
|
||||
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
|
||||
break;
|
||||
case 8:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_RGB_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
|
||||
else
|
||||
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
|
||||
break;
|
||||
case 10:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_RGB_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
|
||||
else
|
||||
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
|
||||
break;
|
||||
default:
|
||||
/* not needed */
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
|
||||
}
|
||||
|
||||
/* display watermark setup */
|
||||
/**
|
||||
* dce8_line_buffer_adjust - Set up the line buffer
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_trace.h"
|
||||
#include "cikd.h"
|
||||
|
||||
/* sdma */
|
||||
|
@ -653,11 +654,12 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
|||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
|
@ -669,16 +671,10 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
|||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
|
@ -689,7 +685,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
|||
if (ndw > 0x7FFFF)
|
||||
ndw = 0x7FFFF;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
@ -697,7 +693,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
|||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
|
|
|
@ -906,6 +906,39 @@
|
|||
#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
|
||||
# define STUTTER_ENABLE (1 << 0)
|
||||
|
||||
/* DCE8 FMT blocks */
|
||||
#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
|
||||
# define FMT_DYNAMIC_EXP_EN (1 << 0)
|
||||
# define FMT_DYNAMIC_EXP_MODE (1 << 4)
|
||||
/* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
|
||||
#define FMT_CONTROL 0x6fb8
|
||||
# define FMT_PIXEL_ENCODING (1 << 16)
|
||||
/* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
|
||||
#define FMT_BIT_DEPTH_CONTROL 0x6fc8
|
||||
# define FMT_TRUNCATE_EN (1 << 0)
|
||||
# define FMT_TRUNCATE_MODE (1 << 1)
|
||||
# define FMT_TRUNCATE_DEPTH(x) ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
|
||||
# define FMT_SPATIAL_DITHER_EN (1 << 8)
|
||||
# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
|
||||
# define FMT_SPATIAL_DITHER_DEPTH(x) ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
|
||||
# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
|
||||
# define FMT_RGB_RANDOM_ENABLE (1 << 14)
|
||||
# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
|
||||
# define FMT_TEMPORAL_DITHER_EN (1 << 16)
|
||||
# define FMT_TEMPORAL_DITHER_DEPTH(x) ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
|
||||
# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
|
||||
# define FMT_TEMPORAL_LEVEL (1 << 24)
|
||||
# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
|
||||
# define FMT_25FRC_SEL(x) ((x) << 26)
|
||||
# define FMT_50FRC_SEL(x) ((x) << 28)
|
||||
# define FMT_75FRC_SEL(x) ((x) << 30)
|
||||
#define FMT_CLAMP_CONTROL 0x6fe4
|
||||
# define FMT_CLAMP_DATA_EN (1 << 0)
|
||||
# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
|
||||
# define FMT_CLAMP_6BPC 0
|
||||
# define FMT_CLAMP_8BPC 1
|
||||
# define FMT_CLAMP_10BPC 2
|
||||
|
||||
#define GRBM_CNTL 0x8000
|
||||
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
|
||||
|
||||
|
@ -1714,6 +1747,68 @@
|
|||
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
|
||||
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
|
||||
#define PACKET3_DMA_DATA 0x50
|
||||
/* 1. header
|
||||
* 2. CONTROL
|
||||
* 3. SRC_ADDR_LO or DATA [31:0]
|
||||
* 4. SRC_ADDR_HI [31:0]
|
||||
* 5. DST_ADDR_LO [31:0]
|
||||
* 6. DST_ADDR_HI [7:0]
|
||||
* 7. COMMAND [30:21] | BYTE_COUNT [20:0]
|
||||
*/
|
||||
/* CONTROL */
|
||||
# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
|
||||
/* 0 - ME
|
||||
* 1 - PFP
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
* 2 - Bypass
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
|
||||
# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
|
||||
/* 0 - DST_ADDR using DAS
|
||||
* 1 - GDS
|
||||
* 3 - DST_ADDR using L2
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
|
||||
/* 0 - LRU
|
||||
* 1 - Stream
|
||||
* 2 - Bypass
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
|
||||
# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
|
||||
/* 0 - SRC_ADDR using SAS
|
||||
* 1 - GDS
|
||||
* 2 - DATA
|
||||
* 3 - SRC_ADDR using L2
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
|
||||
/* COMMAND */
|
||||
# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
|
||||
# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
|
||||
/* 0 - none
|
||||
* 1 - 8 in 16
|
||||
* 2 - 8 in 32
|
||||
* 3 - 8 in 64
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
|
||||
/* 0 - none
|
||||
* 1 - 8 in 16
|
||||
* 2 - 8 in 32
|
||||
* 3 - 8 in 64
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
|
||||
/* 0 - memory
|
||||
* 1 - register
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
|
||||
/* 0 - memory
|
||||
* 1 - register
|
||||
*/
|
||||
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
|
||||
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
|
||||
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
|
||||
#define PACKET3_AQUIRE_MEM 0x58
|
||||
#define PACKET3_REWIND 0x59
|
||||
#define PACKET3_LOAD_UCONFIG_REG 0x5E
|
||||
|
|
|
@ -102,6 +102,49 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
|
|||
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
|
||||
}
|
||||
|
||||
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
u32 tmp = 0, offset;
|
||||
|
||||
if (!dig->afmt->pin)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
if (connector->latency_present[1])
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
|
||||
AUDIO_LIPSYNC(connector->audio_latency[1]);
|
||||
else
|
||||
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
|
||||
} else {
|
||||
if (connector->latency_present[0])
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
|
||||
AUDIO_LIPSYNC(connector->audio_latency[0]);
|
||||
else
|
||||
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
|
||||
}
|
||||
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
|
||||
}
|
||||
|
||||
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
|
|
|
@ -1193,6 +1193,62 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
void dce4_program_fmt(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
int bpc = 0;
|
||||
u32 tmp = 0;
|
||||
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
|
||||
|
||||
if (connector) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
bpc = radeon_get_monitor_bpc(connector);
|
||||
dither = radeon_connector->dither;
|
||||
}
|
||||
|
||||
/* LVDS/eDP FMT is set up by atom */
|
||||
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
|
||||
return;
|
||||
|
||||
/* not needed for analog */
|
||||
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
|
||||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
|
||||
return;
|
||||
|
||||
if (bpc == 0)
|
||||
return;
|
||||
|
||||
switch (bpc) {
|
||||
case 6:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN);
|
||||
else
|
||||
tmp |= FMT_TRUNCATE_EN;
|
||||
break;
|
||||
case 8:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
|
||||
FMT_RGB_RANDOM_ENABLE |
|
||||
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
|
||||
else
|
||||
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
|
||||
break;
|
||||
case 10:
|
||||
default:
|
||||
/* not needed */
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
|
||||
}
|
||||
|
||||
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
|
||||
{
|
||||
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
|
||||
|
@ -3963,7 +4019,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
|
|||
if (rdev->family >= CHIP_TAHITI) {
|
||||
/* SI */
|
||||
for (i = 0; i < rdev->rlc.reg_list_size; i++)
|
||||
dst_ptr[i] = src_ptr[i];
|
||||
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
|
||||
} else {
|
||||
/* ON/LN/TN */
|
||||
/* format:
|
||||
|
@ -3977,10 +4033,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
|
|||
if (i < dws)
|
||||
data |= (src_ptr[i] >> 2) << 16;
|
||||
j = (((i - 1) * 3) / 2);
|
||||
dst_ptr[j] = data;
|
||||
dst_ptr[j] = cpu_to_le32(data);
|
||||
}
|
||||
j = ((i * 3) / 2);
|
||||
dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
|
||||
dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
|
||||
}
|
||||
radeon_bo_kunmap(rdev->rlc.save_restore_obj);
|
||||
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
|
||||
|
@ -4042,40 +4098,40 @@ int sumo_rlc_init(struct radeon_device *rdev)
|
|||
cik_get_csb_buffer(rdev, dst_ptr);
|
||||
} else if (rdev->family >= CHIP_TAHITI) {
|
||||
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
|
||||
dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
|
||||
dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
|
||||
dst_ptr[2] = rdev->rlc.clear_state_size;
|
||||
dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
|
||||
dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
|
||||
dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
|
||||
si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
|
||||
} else {
|
||||
reg_list_hdr_blk_index = 0;
|
||||
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
|
||||
data = upper_32_bits(reg_list_mc_addr);
|
||||
dst_ptr[reg_list_hdr_blk_index] = data;
|
||||
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
|
||||
reg_list_hdr_blk_index++;
|
||||
for (i = 0; cs_data[i].section != NULL; i++) {
|
||||
for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
|
||||
reg_num = cs_data[i].section[j].reg_count;
|
||||
data = reg_list_mc_addr & 0xffffffff;
|
||||
dst_ptr[reg_list_hdr_blk_index] = data;
|
||||
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
|
||||
reg_list_hdr_blk_index++;
|
||||
|
||||
data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
|
||||
dst_ptr[reg_list_hdr_blk_index] = data;
|
||||
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
|
||||
reg_list_hdr_blk_index++;
|
||||
|
||||
data = 0x08000000 | (reg_num * 4);
|
||||
dst_ptr[reg_list_hdr_blk_index] = data;
|
||||
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
|
||||
reg_list_hdr_blk_index++;
|
||||
|
||||
for (k = 0; k < reg_num; k++) {
|
||||
data = cs_data[i].section[j].extent[k];
|
||||
dst_ptr[reg_list_blk_index + k] = data;
|
||||
dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
|
||||
}
|
||||
reg_list_mc_addr += reg_num * 4;
|
||||
reg_list_blk_index += reg_num;
|
||||
}
|
||||
}
|
||||
dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
|
||||
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
|
||||
}
|
||||
radeon_bo_kunmap(rdev->rlc.clear_state_obj);
|
||||
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
|
||||
extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
|
||||
extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
|
||||
extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
/*
|
||||
* update the N and CTS parameters for a given pixel clock rate
|
||||
|
@ -58,6 +60,42 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
|
|||
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
|
||||
}
|
||||
|
||||
static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
u32 tmp = 0;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
DRM_ERROR("Couldn't find encoder's connector\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
if (connector->latency_present[1])
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
|
||||
AUDIO_LIPSYNC(connector->audio_latency[1]);
|
||||
else
|
||||
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
|
||||
} else {
|
||||
if (connector->latency_present[0])
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
|
||||
AUDIO_LIPSYNC(connector->audio_latency[0]);
|
||||
else
|
||||
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
|
||||
}
|
||||
WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
|
||||
}
|
||||
|
||||
static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
|
@ -68,8 +106,10 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
|||
int sad_count;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder)
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
|
@ -121,8 +161,10 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
|
|||
};
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder)
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
|
@ -321,8 +363,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
|
|||
if (ASIC_IS_DCE6(rdev)) {
|
||||
dce6_afmt_select_pin(encoder);
|
||||
dce6_afmt_write_sad_regs(encoder);
|
||||
dce6_afmt_write_latency_fields(encoder, mode);
|
||||
} else {
|
||||
evergreen_hdmi_write_sad_regs(encoder);
|
||||
dce4_afmt_write_latency_fields(encoder, mode);
|
||||
}
|
||||
|
||||
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
|
||||
|
|
|
@ -750,6 +750,44 @@
|
|||
* bit6 = 192 kHz
|
||||
*/
|
||||
|
||||
#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
|
||||
# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
|
||||
# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
|
||||
/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
|
||||
* 0 = use stream header
|
||||
* 1-7 = channel count - 1
|
||||
*/
|
||||
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
|
||||
# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
|
||||
# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
|
||||
/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
|
||||
* 0 = invalid
|
||||
* x = legal delay value
|
||||
* 255 = sync not supported
|
||||
*/
|
||||
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
|
||||
# define HBR_CAPABLE (1 << 0) /* enabled by default */
|
||||
|
||||
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
|
||||
# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
|
||||
# define DISPLAY_TYPE_NONE 0
|
||||
# define DISPLAY_TYPE_HDMI 1
|
||||
# define DISPLAY_TYPE_DP 2
|
||||
# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
|
||||
# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
|
||||
# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
|
||||
# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
|
||||
# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
|
||||
# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
|
||||
# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
|
||||
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
|
||||
# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
|
||||
# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
|
||||
# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
|
||||
# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
|
||||
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
|
||||
# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
|
||||
|
||||
#define AZ_HOT_PLUG_CONTROL 0x5e78
|
||||
# define AZ_FORCE_CODEC_WAKE (1 << 0)
|
||||
# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
|
||||
|
@ -1312,6 +1350,38 @@
|
|||
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
|
||||
# define DC_HPDx_EN (1 << 28)
|
||||
|
||||
/* DCE4/5/6 FMT blocks */
|
||||
#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
|
||||
# define FMT_DYNAMIC_EXP_EN (1 << 0)
|
||||
# define FMT_DYNAMIC_EXP_MODE (1 << 4)
|
||||
/* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
|
||||
#define FMT_CONTROL 0x6fb8
|
||||
# define FMT_PIXEL_ENCODING (1 << 16)
|
||||
/* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
|
||||
#define FMT_BIT_DEPTH_CONTROL 0x6fc8
|
||||
# define FMT_TRUNCATE_EN (1 << 0)
|
||||
# define FMT_TRUNCATE_DEPTH (1 << 4)
|
||||
# define FMT_SPATIAL_DITHER_EN (1 << 8)
|
||||
# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
|
||||
# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
|
||||
# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
|
||||
# define FMT_RGB_RANDOM_ENABLE (1 << 14)
|
||||
# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
|
||||
# define FMT_TEMPORAL_DITHER_EN (1 << 16)
|
||||
# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
|
||||
# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
|
||||
# define FMT_TEMPORAL_LEVEL (1 << 24)
|
||||
# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
|
||||
# define FMT_25FRC_SEL(x) ((x) << 26)
|
||||
# define FMT_50FRC_SEL(x) ((x) << 28)
|
||||
# define FMT_75FRC_SEL(x) ((x) << 30)
|
||||
#define FMT_CLAMP_CONTROL 0x6fe4
|
||||
# define FMT_CLAMP_DATA_EN (1 << 0)
|
||||
# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
|
||||
# define FMT_CLAMP_6BPC 0
|
||||
# define FMT_CLAMP_8BPC 1
|
||||
# define FMT_CLAMP_10BPC 2
|
||||
|
||||
/* ASYNC DMA */
|
||||
#define DMA_RB_RPTR 0xd008
|
||||
#define DMA_RB_WPTR 0xd00c
|
||||
|
|
|
@ -174,11 +174,6 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
|
|||
extern void evergreen_program_aspm(struct radeon_device *rdev);
|
||||
extern void sumo_rlc_fini(struct radeon_device *rdev);
|
||||
extern int sumo_rlc_init(struct radeon_device *rdev);
|
||||
extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
/* Firmware Names */
|
||||
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
|
||||
|
@ -2399,77 +2394,6 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
|
|||
block, mc_id);
|
||||
}
|
||||
|
||||
#define R600_ENTRY_VALID (1 << 0)
|
||||
#define R600_PTE_SYSTEM (1 << 1)
|
||||
#define R600_PTE_SNOOPED (1 << 2)
|
||||
#define R600_PTE_READABLE (1 << 5)
|
||||
#define R600_PTE_WRITEABLE (1 << 6)
|
||||
|
||||
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = 0;
|
||||
r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
|
||||
r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
|
||||
r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
r600_flags |= R600_PTE_SYSTEM;
|
||||
r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
|
||||
}
|
||||
return r600_flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_vm_set_page - update the page tables using the CP
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CP (cayman/TN).
|
||||
*/
|
||||
void cayman_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
while (count) {
|
||||
ndw = 1 + count * 2;
|
||||
if (ndw > 0x3FFF)
|
||||
ndw = 0x3FFF;
|
||||
|
||||
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_vm_flush - vm flush using the CP
|
||||
*
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_trace.h"
|
||||
#include "nid.h"
|
||||
|
||||
u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
@ -245,8 +246,7 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
|||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
* @r600_flags: hw access flags
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Update the page tables using the DMA (cayman/TN).
|
||||
*/
|
||||
|
@ -256,11 +256,12 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
|||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
|
||||
if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
|
@ -271,16 +272,16 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
|||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
} else if (flags & R600_PTE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
|
@ -291,7 +292,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
|||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
@ -299,7 +300,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
|||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
|
|
|
@ -124,6 +124,59 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void dce3_program_fmt(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
int bpc = 0;
|
||||
u32 tmp = 0;
|
||||
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
|
||||
|
||||
if (connector) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
bpc = radeon_get_monitor_bpc(connector);
|
||||
dither = radeon_connector->dither;
|
||||
}
|
||||
|
||||
/* LVDS FMT is set up by atom */
|
||||
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
|
||||
return;
|
||||
|
||||
/* not needed for analog */
|
||||
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
|
||||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
|
||||
return;
|
||||
|
||||
if (bpc == 0)
|
||||
return;
|
||||
|
||||
switch (bpc) {
|
||||
case 6:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= FMT_SPATIAL_DITHER_EN;
|
||||
else
|
||||
tmp |= FMT_TRUNCATE_EN;
|
||||
break;
|
||||
case 8:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
|
||||
else
|
||||
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
|
||||
break;
|
||||
case 10:
|
||||
default:
|
||||
/* not needed */
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
|
||||
}
|
||||
|
||||
/* get temperature in millidegrees */
|
||||
int rv6xx_get_temp(struct radeon_device *rdev)
|
||||
{
|
||||
|
|
|
@ -2328,13 +2328,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
|||
unsigned i;
|
||||
|
||||
kfree(parser->relocs);
|
||||
for (i = 0; i < parser->nchunks; i++) {
|
||||
kfree(parser->chunks[i].kdata);
|
||||
if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
|
||||
kfree(parser->chunks[i].kpage[0]);
|
||||
kfree(parser->chunks[i].kpage[1]);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < parser->nchunks; i++)
|
||||
drm_free_large(parser->chunks[i].kdata);
|
||||
kfree(parser->chunks);
|
||||
kfree(parser->chunks_array);
|
||||
}
|
||||
|
@ -2391,13 +2386,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
|||
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
|
||||
parser.ib.length_dw = ib_chunk->length_dw;
|
||||
*l = parser.ib.length_dw;
|
||||
r = r600_cs_parse(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
|
||||
r = -EFAULT;
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_cs_finish_pages(&parser);
|
||||
r = r600_cs_parse(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
|
|
|
@ -303,8 +303,10 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
|||
int sad_count;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder)
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
|
@ -356,8 +358,10 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
|
|||
};
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder)
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
|
|
|
@ -1199,6 +1199,34 @@
|
|||
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
|
||||
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
|
||||
|
||||
/* DCE3 FMT blocks */
|
||||
#define FMT_CONTROL 0x6700
|
||||
# define FMT_PIXEL_ENCODING (1 << 16)
|
||||
/* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
|
||||
#define FMT_BIT_DEPTH_CONTROL 0x6710
|
||||
# define FMT_TRUNCATE_EN (1 << 0)
|
||||
# define FMT_TRUNCATE_DEPTH (1 << 4)
|
||||
# define FMT_SPATIAL_DITHER_EN (1 << 8)
|
||||
# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
|
||||
# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
|
||||
# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
|
||||
# define FMT_RGB_RANDOM_ENABLE (1 << 14)
|
||||
# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
|
||||
# define FMT_TEMPORAL_DITHER_EN (1 << 16)
|
||||
# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
|
||||
# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
|
||||
# define FMT_TEMPORAL_LEVEL (1 << 24)
|
||||
# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
|
||||
# define FMT_25FRC_SEL(x) ((x) << 26)
|
||||
# define FMT_50FRC_SEL(x) ((x) << 28)
|
||||
# define FMT_75FRC_SEL(x) ((x) << 30)
|
||||
#define FMT_CLAMP_CONTROL 0x672c
|
||||
# define FMT_CLAMP_DATA_EN (1 << 0)
|
||||
# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
|
||||
# define FMT_CLAMP_6BPC 0
|
||||
# define FMT_CLAMP_8BPC 1
|
||||
# define FMT_CLAMP_10BPC 2
|
||||
|
||||
/* Power management */
|
||||
#define CG_SPLL_FUNC_CNTL 0x600
|
||||
# define SPLL_RESET (1 << 0)
|
||||
|
|
|
@ -98,6 +98,7 @@ extern int radeon_lockup_timeout;
|
|||
extern int radeon_fastfb;
|
||||
extern int radeon_dpm;
|
||||
extern int radeon_aspm;
|
||||
extern int radeon_runtime_pm;
|
||||
|
||||
/*
|
||||
* Copy from radeon_drv.h so we don't have to include both and have conflicting
|
||||
|
@ -327,7 +328,6 @@ struct radeon_fence_driver {
|
|||
/* sync_seq is protected by ring emission lock */
|
||||
uint64_t sync_seq[RADEON_NUM_RINGS];
|
||||
atomic64_t last_seq;
|
||||
unsigned long last_activity;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
|
@ -832,6 +832,12 @@ struct radeon_mec {
|
|||
#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
|
||||
#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
|
||||
|
||||
#define R600_PTE_VALID (1 << 0)
|
||||
#define R600_PTE_SYSTEM (1 << 1)
|
||||
#define R600_PTE_SNOOPED (1 << 2)
|
||||
#define R600_PTE_READABLE (1 << 5)
|
||||
#define R600_PTE_WRITEABLE (1 << 6)
|
||||
|
||||
struct radeon_vm {
|
||||
struct list_head list;
|
||||
struct list_head va;
|
||||
|
@ -967,12 +973,8 @@ struct radeon_cs_reloc {
|
|||
struct radeon_cs_chunk {
|
||||
uint32_t chunk_id;
|
||||
uint32_t length_dw;
|
||||
int kpage_idx[2];
|
||||
uint32_t *kpage[2];
|
||||
uint32_t *kdata;
|
||||
void __user *user_ptr;
|
||||
int last_copied_page;
|
||||
int last_page_index;
|
||||
};
|
||||
|
||||
struct radeon_cs_parser {
|
||||
|
@ -1007,8 +1009,15 @@ struct radeon_cs_parser {
|
|||
struct ww_acquire_ctx ticket;
|
||||
};
|
||||
|
||||
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
|
||||
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
|
||||
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
|
||||
{
|
||||
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
|
||||
|
||||
if (ibc->kdata)
|
||||
return ibc->kdata[idx];
|
||||
return p->ib.ptr[idx];
|
||||
}
|
||||
|
||||
|
||||
struct radeon_cs_packet {
|
||||
unsigned idx;
|
||||
|
@ -1675,8 +1684,6 @@ struct radeon_asic {
|
|||
struct {
|
||||
int (*init)(struct radeon_device *rdev);
|
||||
void (*fini)(struct radeon_device *rdev);
|
||||
|
||||
u32 pt_ring_index;
|
||||
void (*set_page)(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
|
@ -2170,6 +2177,7 @@ struct radeon_device {
|
|||
bool need_dma32;
|
||||
bool accel_working;
|
||||
bool fastfb_working; /* IGP feature*/
|
||||
bool needs_reset;
|
||||
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
|
||||
const struct firmware *me_fw; /* all family ME firmware */
|
||||
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
|
||||
|
@ -2212,6 +2220,9 @@ struct radeon_device {
|
|||
/* clock, powergating flags */
|
||||
u32 cg_flags;
|
||||
u32 pg_flags;
|
||||
|
||||
struct dev_pm_domain vga_pm_domain;
|
||||
bool have_disp_power_ref;
|
||||
};
|
||||
|
||||
int radeon_device_init(struct radeon_device *rdev,
|
||||
|
@ -2673,8 +2684,8 @@ extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
|
|||
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
|
||||
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
|
||||
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
|
||||
extern int radeon_resume_kms(struct drm_device *dev);
|
||||
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
|
||||
extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
|
||||
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
|
||||
extern void radeon_program_register_sequence(struct radeon_device *rdev,
|
||||
const u32 *registers,
|
||||
|
|
|
@ -1622,8 +1622,7 @@ static struct radeon_asic cayman_asic = {
|
|||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
.set_page = &cayman_dma_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
||||
|
@ -1723,8 +1722,7 @@ static struct radeon_asic trinity_asic = {
|
|||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
.set_page = &cayman_dma_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
||||
|
@ -1854,8 +1852,7 @@ static struct radeon_asic si_asic = {
|
|||
.vm = {
|
||||
.init = &si_vm_init,
|
||||
.fini = &si_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &si_vm_set_page,
|
||||
.set_page = &si_dma_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
|
||||
|
@ -1879,7 +1876,7 @@ static struct radeon_asic si_asic = {
|
|||
.hdmi_setmode = &evergreen_hdmi_setmode,
|
||||
},
|
||||
.copy = {
|
||||
.blit = NULL,
|
||||
.blit = &r600_copy_cpdma,
|
||||
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.dma = &si_copy_dma,
|
||||
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
|
@ -2000,8 +1997,7 @@ static struct radeon_asic ci_asic = {
|
|||
.vm = {
|
||||
.init = &cik_vm_init,
|
||||
.fini = &cik_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &cik_vm_set_page,
|
||||
.set_page = &cik_sdma_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
||||
|
@ -2100,8 +2096,7 @@ static struct radeon_asic kv_asic = {
|
|||
.vm = {
|
||||
.init = &cik_vm_init,
|
||||
.fini = &cik_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &cik_vm_set_page,
|
||||
.set_page = &cik_sdma_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
||||
|
|
|
@ -581,17 +581,18 @@ int cayman_vm_init(struct radeon_device *rdev);
|
|||
void cayman_vm_fini(struct radeon_device *rdev);
|
||||
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
|
||||
void cayman_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib);
|
||||
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
|
||||
int ni_dpm_init(struct radeon_device *rdev);
|
||||
|
@ -653,17 +654,17 @@ int si_irq_set(struct radeon_device *rdev);
|
|||
int si_irq_process(struct radeon_device *rdev);
|
||||
int si_vm_init(struct radeon_device *rdev);
|
||||
void si_vm_fini(struct radeon_device *rdev);
|
||||
void si_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int si_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence);
|
||||
void si_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
u32 si_get_xclk(struct radeon_device *rdev);
|
||||
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
|
||||
|
@ -705,6 +706,10 @@ int cik_copy_dma(struct radeon_device *rdev,
|
|||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence);
|
||||
int cik_copy_cpdma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence);
|
||||
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
|
@ -731,11 +736,11 @@ int cik_irq_process(struct radeon_device *rdev);
|
|||
int cik_vm_init(struct radeon_device *rdev);
|
||||
void cik_vm_fini(struct radeon_device *rdev);
|
||||
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
void cik_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
|
||||
|
|
|
@ -59,6 +59,10 @@ struct atpx_mux {
|
|||
u16 mux;
|
||||
} __packed;
|
||||
|
||||
bool radeon_is_px(void) {
|
||||
return radeon_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_atpx_call - call an ATPX method
|
||||
*
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
extern void
|
||||
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder,
|
||||
|
@ -411,6 +413,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
|
|||
}
|
||||
}
|
||||
|
||||
if (property == rdev->mode_info.dither_property) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
/* need to find digital encoder on connector */
|
||||
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
|
||||
if (!encoder)
|
||||
return 0;
|
||||
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
if (radeon_connector->dither != val) {
|
||||
radeon_connector->dither = val;
|
||||
radeon_property_change_mode(&radeon_encoder->base);
|
||||
}
|
||||
}
|
||||
|
||||
if (property == rdev->mode_info.underscan_property) {
|
||||
/* need to find digital encoder on connector */
|
||||
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
|
||||
|
@ -626,6 +643,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
|
|||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
int r;
|
||||
|
||||
r = pm_runtime_get_sync(connector->dev->dev);
|
||||
if (r < 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
if (encoder) {
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
@ -651,6 +673,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
|
|||
/* check acpi lid status ??? */
|
||||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -750,6 +774,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
|
|||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
bool dret = false;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
int r;
|
||||
|
||||
r = pm_runtime_get_sync(connector->dev->dev);
|
||||
if (r < 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
|
@ -790,9 +819,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
|
|||
* detected a monitor via load.
|
||||
*/
|
||||
if (radeon_connector->detected_by_load)
|
||||
return connector->status;
|
||||
else
|
||||
return ret;
|
||||
ret = connector->status;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (radeon_connector->dac_load_detect && encoder) {
|
||||
|
@ -817,6 +845,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
|
|||
}
|
||||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
out:
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -873,10 +906,15 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
|
|||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
int r;
|
||||
|
||||
if (!radeon_connector->dac_load_detect)
|
||||
return ret;
|
||||
|
||||
r = pm_runtime_get_sync(connector->dev->dev);
|
||||
if (r < 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (!encoder)
|
||||
ret = connector_status_disconnected;
|
||||
|
@ -887,6 +925,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
|
|||
if (ret == connector_status_connected)
|
||||
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -954,12 +994,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
|
|||
struct drm_encoder *encoder = NULL;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct drm_mode_object *obj;
|
||||
int i;
|
||||
int i, r;
|
||||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
bool dret = false, broken_edid = false;
|
||||
|
||||
if (!force && radeon_check_hpd_status_unchanged(connector))
|
||||
return connector->status;
|
||||
r = pm_runtime_get_sync(connector->dev->dev);
|
||||
if (r < 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
if (!force && radeon_check_hpd_status_unchanged(connector)) {
|
||||
ret = connector->status;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (radeon_connector->ddc_bus)
|
||||
dret = radeon_ddc_probe(radeon_connector, false);
|
||||
|
@ -1110,6 +1156,11 @@ out:
|
|||
|
||||
/* updated in get modes as well since we need to know if it's analog or digital */
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
exit:
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1377,9 +1428,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
|||
enum drm_connector_status ret = connector_status_disconnected;
|
||||
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
|
||||
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
|
||||
int r;
|
||||
|
||||
if (!force && radeon_check_hpd_status_unchanged(connector))
|
||||
return connector->status;
|
||||
r = pm_runtime_get_sync(connector->dev->dev);
|
||||
if (r < 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
if (!force && radeon_check_hpd_status_unchanged(connector)) {
|
||||
ret = connector->status;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
|
@ -1443,6 +1501,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
|||
}
|
||||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
out:
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1660,7 +1722,10 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
0);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_DISABLE);
|
||||
RADEON_AUDIO_AUTO);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = true;
|
||||
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
|
||||
|
@ -1757,7 +1822,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
if (ASIC_IS_DCE2(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_DISABLE);
|
||||
RADEON_AUDIO_AUTO);
|
||||
}
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
}
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
||||
radeon_connector->dac_load_detect = true;
|
||||
|
@ -1802,7 +1872,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
if (ASIC_IS_DCE2(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_DISABLE);
|
||||
RADEON_AUDIO_AUTO);
|
||||
}
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = true;
|
||||
|
@ -1846,7 +1921,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
if (ASIC_IS_DCE2(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_DISABLE);
|
||||
RADEON_AUDIO_AUTO);
|
||||
}
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
}
|
||||
connector->interlace_allowed = true;
|
||||
/* in theory with a DP to VGA converter... */
|
||||
|
|
|
@ -213,9 +213,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
return -EFAULT;
|
||||
}
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
p->chunks[i].kdata = NULL;
|
||||
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
||||
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
||||
p->chunk_relocs_idx = i;
|
||||
}
|
||||
|
@ -238,25 +236,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
|
||||
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
|
||||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
|
||||
size = p->chunks[i].length_dw * sizeof(uint32_t);
|
||||
p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
|
||||
if (p->chunks[i].kdata == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
|
||||
p->chunks[i].user_ptr, size)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
||||
p->cs_flags = p->chunks[i].kdata[0];
|
||||
if (p->chunks[i].length_dw > 1)
|
||||
ring = p->chunks[i].kdata[1];
|
||||
if (p->chunks[i].length_dw > 2)
|
||||
priority = (s32)p->chunks[i].kdata[2];
|
||||
}
|
||||
size = p->chunks[i].length_dw;
|
||||
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
p->chunks[i].user_ptr = cdata;
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
|
||||
continue;
|
||||
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
|
||||
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
|
||||
continue;
|
||||
}
|
||||
|
||||
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
||||
size *= sizeof(uint32_t);
|
||||
if (p->chunks[i].kdata == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
||||
p->cs_flags = p->chunks[i].kdata[0];
|
||||
if (p->chunks[i].length_dw > 1)
|
||||
ring = p->chunks[i].kdata[1];
|
||||
if (p->chunks[i].length_dw > 2)
|
||||
priority = (s32)p->chunks[i].kdata[2];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -279,34 +283,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
}
|
||||
}
|
||||
|
||||
/* deal with non-vm */
|
||||
if ((p->chunk_ib_idx != -1) &&
|
||||
((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
|
||||
(p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
|
||||
if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
|
||||
DRM_ERROR("cs IB too big: %d\n",
|
||||
p->chunks[p->chunk_ib_idx].length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
|
||||
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
|
||||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
|
||||
kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
|
||||
kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
|
||||
p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
|
||||
p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
|
||||
p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
|
||||
p->chunks[p->chunk_ib_idx].last_copied_page = -1;
|
||||
p->chunks[p->chunk_ib_idx].last_page_index =
|
||||
((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -340,13 +316,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|||
kfree(parser->track);
|
||||
kfree(parser->relocs);
|
||||
kfree(parser->relocs_ptr);
|
||||
for (i = 0; i < parser->nchunks; i++) {
|
||||
kfree(parser->chunks[i].kdata);
|
||||
if ((parser->rdev->flags & RADEON_IS_AGP)) {
|
||||
kfree(parser->chunks[i].kpage[0]);
|
||||
kfree(parser->chunks[i].kpage[1]);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < parser->nchunks; i++)
|
||||
drm_free_large(parser->chunks[i].kdata);
|
||||
kfree(parser->chunks);
|
||||
kfree(parser->chunks_array);
|
||||
radeon_ib_free(parser->rdev, &parser->ib);
|
||||
|
@ -356,7 +327,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|||
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
||||
struct radeon_cs_parser *parser)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk;
|
||||
int r;
|
||||
|
||||
if (parser->chunk_ib_idx == -1)
|
||||
|
@ -365,28 +335,11 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
|||
if (parser->cs_flags & RADEON_CS_USE_VM)
|
||||
return 0;
|
||||
|
||||
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
||||
/* Copy the packet into the IB, the parser will read from the
|
||||
* input memory (cached) and write to the IB (which can be
|
||||
* uncached).
|
||||
*/
|
||||
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
|
||||
NULL, ib_chunk->length_dw * 4);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->ib.length_dw = ib_chunk->length_dw;
|
||||
r = radeon_cs_parse(rdev, parser->ring, parser);
|
||||
if (r || parser->parser_error) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
return r;
|
||||
}
|
||||
r = radeon_cs_finish_pages(parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
|
||||
radeon_uvd_note_usage(rdev);
|
||||
|
@ -424,7 +377,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
|
|||
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
||||
struct radeon_cs_parser *parser)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk;
|
||||
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct radeon_vm *vm = &fpriv->vm;
|
||||
int r;
|
||||
|
@ -434,49 +386,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
|
||||
return 0;
|
||||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
|
||||
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
||||
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
|
||||
vm, ib_chunk->length_dw * 4);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get const ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->const_ib.is_const_ib = true;
|
||||
parser->const_ib.length_dw = ib_chunk->length_dw;
|
||||
/* Copy the packet into the IB */
|
||||
if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
|
||||
ib_chunk->length_dw * 4)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
if (parser->const_ib.length_dw) {
|
||||
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
||||
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
||||
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
|
||||
vm, ib_chunk->length_dw * 4);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->ib.length_dw = ib_chunk->length_dw;
|
||||
/* Copy the packet into the IB */
|
||||
if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
|
||||
ib_chunk->length_dw * 4)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
|
||||
if (r) {
|
||||
return r;
|
||||
|
@ -528,6 +444,62 @@ static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
|
|||
return r;
|
||||
}
|
||||
|
||||
static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk;
|
||||
struct radeon_vm *vm = NULL;
|
||||
int r;
|
||||
|
||||
if (parser->chunk_ib_idx == -1)
|
||||
return 0;
|
||||
|
||||
if (parser->cs_flags & RADEON_CS_USE_VM) {
|
||||
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
|
||||
vm = &fpriv->vm;
|
||||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
|
||||
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
||||
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
|
||||
vm, ib_chunk->length_dw * 4);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get const ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->const_ib.is_const_ib = true;
|
||||
parser->const_ib.length_dw = ib_chunk->length_dw;
|
||||
if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
|
||||
ib_chunk->user_ptr,
|
||||
ib_chunk->length_dw * 4))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
||||
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
||||
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
||||
|
||||
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
|
||||
vm, ib_chunk->length_dw * 4);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
parser->ib.length_dw = ib_chunk->length_dw;
|
||||
if (ib_chunk->kdata)
|
||||
memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
|
||||
else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
@ -553,10 +525,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
r = radeon_cs_parser_relocs(&parser);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS)
|
||||
|
||||
r = radeon_cs_ib_fill(rdev, &parser);
|
||||
if (!r) {
|
||||
r = radeon_cs_parser_relocs(&parser);
|
||||
if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to parse relocation %d!\n", r);
|
||||
}
|
||||
|
||||
if (r) {
|
||||
radeon_cs_parser_fini(&parser, r, false);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_cs_handle_lockup(rdev, r);
|
||||
|
@ -580,97 +557,6 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
int radeon_cs_finish_pages(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
|
||||
int i;
|
||||
int size = PAGE_SIZE;
|
||||
|
||||
for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
|
||||
if (i == ibc->last_page_index) {
|
||||
size = (ibc->length_dw * 4) % PAGE_SIZE;
|
||||
if (size == 0)
|
||||
size = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
|
||||
ibc->user_ptr + (i * PAGE_SIZE),
|
||||
size))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
|
||||
{
|
||||
int new_page;
|
||||
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
|
||||
int i;
|
||||
int size = PAGE_SIZE;
|
||||
bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
|
||||
false : true;
|
||||
|
||||
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
|
||||
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
|
||||
ibc->user_ptr + (i * PAGE_SIZE),
|
||||
PAGE_SIZE)) {
|
||||
p->parser_error = -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (pg_idx == ibc->last_page_index) {
|
||||
size = (ibc->length_dw * 4) % PAGE_SIZE;
|
||||
if (size == 0)
|
||||
size = PAGE_SIZE;
|
||||
}
|
||||
|
||||
new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
|
||||
if (copy1)
|
||||
ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
|
||||
|
||||
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
|
||||
ibc->user_ptr + (pg_idx * PAGE_SIZE),
|
||||
size)) {
|
||||
p->parser_error = -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* copy to IB for non single case */
|
||||
if (!copy1)
|
||||
memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
|
||||
|
||||
ibc->last_copied_page = pg_idx;
|
||||
ibc->kpage_idx[new_page] = pg_idx;
|
||||
|
||||
return new_page;
|
||||
}
|
||||
|
||||
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
|
||||
{
|
||||
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
|
||||
u32 pg_idx, pg_offset;
|
||||
u32 idx_value = 0;
|
||||
int new_page;
|
||||
|
||||
pg_idx = (idx * 4) / PAGE_SIZE;
|
||||
pg_offset = (idx * 4) % PAGE_SIZE;
|
||||
|
||||
if (ibc->kpage_idx[0] == pg_idx)
|
||||
return ibc->kpage[0][pg_offset/4];
|
||||
if (ibc->kpage_idx[1] == pg_idx)
|
||||
return ibc->kpage[1][pg_offset/4];
|
||||
|
||||
new_page = radeon_cs_update_pages(p, pg_idx);
|
||||
if (new_page < 0) {
|
||||
p->parser_error = new_page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
idx_value = ibc->kpage[new_page][pg_offset/4];
|
||||
return idx_value;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
|
|
|
@ -101,6 +101,12 @@ static const char radeon_family_name[][16] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_is_px(void);
|
||||
#else
|
||||
static inline bool radeon_is_px(void) { return false; }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* radeon_program_register_sequence - program an array of registers.
|
||||
*
|
||||
|
@ -1076,7 +1082,10 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
|
|||
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
|
||||
|
||||
if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
|
||||
return;
|
||||
|
||||
if (state == VGA_SWITCHEROO_ON) {
|
||||
unsigned d3_delay = dev->pdev->d3_delay;
|
||||
|
||||
|
@ -1087,7 +1096,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
|||
if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
|
||||
dev->pdev->d3_delay = 20;
|
||||
|
||||
radeon_resume_kms(dev);
|
||||
radeon_resume_kms(dev, true, true);
|
||||
|
||||
dev->pdev->d3_delay = d3_delay;
|
||||
|
||||
|
@ -1097,7 +1106,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
|||
printk(KERN_INFO "radeon: switched off\n");
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
radeon_suspend_kms(dev, pmm);
|
||||
radeon_suspend_kms(dev, true, true);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
||||
}
|
||||
}
|
||||
|
@ -1147,6 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
{
|
||||
int r, i;
|
||||
int dma_bits;
|
||||
bool runtime = false;
|
||||
|
||||
rdev->shutdown = false;
|
||||
rdev->dev = &pdev->dev;
|
||||
|
@ -1293,7 +1303,14 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
/* this will fail for cards that aren't VGA class devices, just
|
||||
* ignore it */
|
||||
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
|
||||
|
||||
if (radeon_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if ((radeon_runtime_pm == -1) && radeon_is_px())
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
|
||||
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
|
@ -1383,7 +1400,7 @@ void radeon_device_fini(struct radeon_device *rdev)
|
|||
* Returns 0 for success or an error on failure.
|
||||
* Called at driver suspend.
|
||||
*/
|
||||
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
|
||||
int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct drm_crtc *crtc;
|
||||
|
@ -1394,9 +1411,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
|
|||
if (dev == NULL || dev->dev_private == NULL) {
|
||||
return -ENODEV;
|
||||
}
|
||||
if (state.event == PM_EVENT_PRETHAW) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rdev = dev->dev_private;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
|
@ -1455,14 +1470,17 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
|
|||
radeon_agp_suspend(rdev);
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
if (state.event == PM_EVENT_SUSPEND) {
|
||||
if (suspend) {
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
}
|
||||
console_lock();
|
||||
radeon_fbdev_set_suspend(rdev, 1);
|
||||
console_unlock();
|
||||
|
||||
if (fbcon) {
|
||||
console_lock();
|
||||
radeon_fbdev_set_suspend(rdev, 1);
|
||||
console_unlock();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1475,7 +1493,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
|
|||
* Returns 0 for success or an error on failure.
|
||||
* Called at driver resume.
|
||||
*/
|
||||
int radeon_resume_kms(struct drm_device *dev)
|
||||
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
@ -1484,12 +1502,17 @@ int radeon_resume_kms(struct drm_device *dev)
|
|||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
console_lock();
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev)) {
|
||||
console_unlock();
|
||||
return -1;
|
||||
if (fbcon) {
|
||||
console_lock();
|
||||
}
|
||||
if (resume) {
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev)) {
|
||||
if (fbcon)
|
||||
console_unlock();
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/* resume AGP if in use */
|
||||
radeon_agp_resume(rdev);
|
||||
|
@ -1502,8 +1525,10 @@ int radeon_resume_kms(struct drm_device *dev)
|
|||
radeon_pm_resume(rdev);
|
||||
radeon_restore_bios_scratch_regs(rdev);
|
||||
|
||||
radeon_fbdev_set_suspend(rdev, 0);
|
||||
console_unlock();
|
||||
if (fbcon) {
|
||||
radeon_fbdev_set_suspend(rdev, 0);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
/* init dig PHYs, disp eng pll */
|
||||
if (rdev->is_atom_bios) {
|
||||
|
@ -1549,6 +1574,14 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
|||
int resched;
|
||||
|
||||
down_write(&rdev->exclusive_lock);
|
||||
|
||||
if (!rdev->needs_reset) {
|
||||
up_write(&rdev->exclusive_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rdev->needs_reset = false;
|
||||
|
||||
radeon_save_bios_scratch_regs(rdev);
|
||||
/* block TTM */
|
||||
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "atom.h"
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
|
@ -494,11 +495,55 @@ unlock_free:
|
|||
return r;
|
||||
}
|
||||
|
||||
static int
|
||||
radeon_crtc_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct radeon_device *rdev;
|
||||
struct drm_crtc *crtc;
|
||||
bool active = false;
|
||||
int ret;
|
||||
|
||||
if (!set || !set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
dev = set->crtc->dev;
|
||||
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_crtc_helper_set_config(set);
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
if (crtc->enabled)
|
||||
active = true;
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
|
||||
rdev = dev->dev_private;
|
||||
/* if we have active crtcs and we don't have a power ref,
|
||||
take the current one */
|
||||
if (active && !rdev->have_disp_power_ref) {
|
||||
rdev->have_disp_power_ref = true;
|
||||
return ret;
|
||||
}
|
||||
/* if we have no active crtcs, then drop the power ref
|
||||
we got before */
|
||||
if (!active && rdev->have_disp_power_ref) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
rdev->have_disp_power_ref = false;
|
||||
}
|
||||
|
||||
/* drop the power reference we got coming in here */
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
static const struct drm_crtc_funcs radeon_crtc_funcs = {
|
||||
.cursor_set = radeon_crtc_cursor_set,
|
||||
.cursor_move = radeon_crtc_cursor_move,
|
||||
.gamma_set = radeon_crtc_gamma_set,
|
||||
.set_config = drm_crtc_helper_set_config,
|
||||
.set_config = radeon_crtc_set_config,
|
||||
.destroy = radeon_crtc_destroy,
|
||||
.page_flip = radeon_crtc_page_flip,
|
||||
};
|
||||
|
@ -1178,6 +1223,12 @@ static struct drm_prop_enum_list radeon_audio_enum_list[] =
|
|||
{ RADEON_AUDIO_AUTO, "auto" },
|
||||
};
|
||||
|
||||
/* XXX support different dither options? spatial, temporal, both, etc. */
|
||||
static struct drm_prop_enum_list radeon_dither_enum_list[] =
|
||||
{ { RADEON_FMT_DITHER_DISABLE, "off" },
|
||||
{ RADEON_FMT_DITHER_ENABLE, "on" },
|
||||
};
|
||||
|
||||
static int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
{
|
||||
int sz;
|
||||
|
@ -1234,6 +1285,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
|
|||
"audio",
|
||||
radeon_audio_enum_list, sz);
|
||||
|
||||
sz = ARRAY_SIZE(radeon_dither_enum_list);
|
||||
rdev->mode_info.dither_property =
|
||||
drm_property_create_enum(rdev->ddev, 0,
|
||||
"dither",
|
||||
radeon_dither_enum_list, sz);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,8 +36,9 @@
|
|||
#include <drm/drm_pciids.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include "drm_crtc_helper.h"
|
||||
/*
|
||||
* KMS wrapper.
|
||||
* - 2.0.0 - initial interface
|
||||
|
@ -87,8 +88,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
|
|||
struct drm_file *file_priv);
|
||||
void radeon_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||
int radeon_resume_kms(struct drm_device *dev);
|
||||
int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
|
||||
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
|
||||
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
|
||||
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
|
||||
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
|
||||
|
@ -136,9 +137,11 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
|
|||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
void radeon_register_atpx_handler(void);
|
||||
void radeon_unregister_atpx_handler(void);
|
||||
bool radeon_is_px(void);
|
||||
#else
|
||||
static inline void radeon_register_atpx_handler(void) {}
|
||||
static inline void radeon_unregister_atpx_handler(void) {}
|
||||
static inline bool radeon_is_px(void) { return false; }
|
||||
#endif
|
||||
|
||||
int radeon_no_wb;
|
||||
|
@ -161,6 +164,7 @@ int radeon_lockup_timeout = 10000;
|
|||
int radeon_fastfb = 0;
|
||||
int radeon_dpm = -1;
|
||||
int radeon_aspm = -1;
|
||||
int radeon_runtime_pm = -1;
|
||||
|
||||
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
|
||||
module_param_named(no_wb, radeon_no_wb, int, 0444);
|
||||
|
@ -222,6 +226,9 @@ module_param_named(dpm, radeon_dpm, int, 0444);
|
|||
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
|
||||
module_param_named(aspm, radeon_aspm, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
|
||||
module_param_named(runpm, radeon_runtime_pm, int, 0444);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
radeon_PCI_IDS
|
||||
};
|
||||
|
@ -258,6 +265,7 @@ static int radeon_resume(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static const struct file_operations radeon_driver_old_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
|
@ -352,25 +360,144 @@ radeon_pci_remove(struct pci_dev *pdev)
|
|||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
static int radeon_pmops_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
return radeon_suspend_kms(dev, state);
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return radeon_suspend_kms(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int
|
||||
radeon_pci_resume(struct pci_dev *pdev)
|
||||
static int radeon_pmops_resume(struct device *dev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
return radeon_resume_kms(dev);
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return radeon_resume_kms(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int radeon_pmops_freeze(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return radeon_suspend_kms(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int radeon_pmops_thaw(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return radeon_resume_kms(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int radeon_pmops_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (radeon_runtime_pm == 0)
|
||||
return -EINVAL;
|
||||
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
|
||||
|
||||
ret = radeon_suspend_kms(drm_dev, false, false);
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3cold);
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_pmops_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
if (radeon_runtime_pm == 0)
|
||||
return -EINVAL;
|
||||
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
pci_set_master(pdev);
|
||||
|
||||
ret = radeon_resume_kms(drm_dev, false, false);
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_pmops_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (radeon_runtime_pm == 0)
|
||||
return -EBUSY;
|
||||
|
||||
/* are we PX enabled? */
|
||||
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - not px\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
|
||||
if (crtc->enabled) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_autosuspend(dev);
|
||||
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
|
||||
return 1;
|
||||
}
|
||||
|
||||
long radeon_drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_device *dev;
|
||||
long ret;
|
||||
dev = file_priv->minor->dev;
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops radeon_pm_ops = {
|
||||
.suspend = radeon_pmops_suspend,
|
||||
.resume = radeon_pmops_resume,
|
||||
.freeze = radeon_pmops_freeze,
|
||||
.thaw = radeon_pmops_thaw,
|
||||
.poweroff = radeon_pmops_freeze,
|
||||
.restore = radeon_pmops_resume,
|
||||
.runtime_suspend = radeon_pmops_runtime_suspend,
|
||||
.runtime_resume = radeon_pmops_runtime_resume,
|
||||
.runtime_idle = radeon_pmops_runtime_idle,
|
||||
};
|
||||
|
||||
static const struct file_operations radeon_driver_kms_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = radeon_drm_ioctl,
|
||||
.mmap = radeon_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
|
@ -379,6 +506,15 @@ static const struct file_operations radeon_driver_kms_fops = {
|
|||
#endif
|
||||
};
|
||||
|
||||
|
||||
static void
|
||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
radeon_driver_unload_kms(dev);
|
||||
}
|
||||
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP |
|
||||
|
@ -391,8 +527,6 @@ static struct drm_driver kms_driver = {
|
|||
.postclose = radeon_driver_postclose_kms,
|
||||
.lastclose = radeon_driver_lastclose_kms,
|
||||
.unload = radeon_driver_unload_kms,
|
||||
.suspend = radeon_suspend_kms,
|
||||
.resume = radeon_resume_kms,
|
||||
.get_vblank_counter = radeon_get_vblank_counter_kms,
|
||||
.enable_vblank = radeon_enable_vblank_kms,
|
||||
.disable_vblank = radeon_disable_vblank_kms,
|
||||
|
@ -449,8 +583,8 @@ static struct pci_driver radeon_kms_pci_driver = {
|
|||
.id_table = pciidlist,
|
||||
.probe = radeon_pci_probe,
|
||||
.remove = radeon_pci_remove,
|
||||
.suspend = radeon_pci_suspend,
|
||||
.resume = radeon_pci_resume,
|
||||
.driver.pm = &radeon_pm_ops,
|
||||
.shutdown = radeon_pci_shutdown,
|
||||
};
|
||||
|
||||
static int __init radeon_init(void)
|
||||
|
|
|
@ -113,6 +113,9 @@
|
|||
#define DRIVER_MINOR 33
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
long radeon_drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/* The rest of the file is DEPRECATED! */
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
|
||||
|
|
|
@ -190,10 +190,8 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
|
|||
}
|
||||
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
|
||||
|
||||
if (wake) {
|
||||
rdev->fence_drv[ring].last_activity = jiffies;
|
||||
if (wake)
|
||||
wake_up_all(&rdev->fence_queue);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -212,13 +210,13 @@ static void radeon_fence_destroy(struct kref *kref)
|
|||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
|
||||
* radeon_fence_seq_signaled - check if a fence sequence number has signaled
|
||||
*
|
||||
* @rdev: radeon device pointer
|
||||
* @seq: sequence number
|
||||
* @ring: ring index the fence is associated with
|
||||
*
|
||||
* Check if the last singled fence sequnce number is >= the requested
|
||||
* Check if the last signaled fence sequnce number is >= the requested
|
||||
* sequence number (all asics).
|
||||
* Returns true if the fence has signaled (current fence value
|
||||
* is >= requested value) or false if it has not (current fence
|
||||
|
@ -263,113 +261,131 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
|
|||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_wait_seq - wait for a specific sequence number
|
||||
* radeon_fence_any_seq_signaled - check if any sequence number is signaled
|
||||
*
|
||||
* @rdev: radeon device pointer
|
||||
* @target_seq: sequence number we want to wait for
|
||||
* @ring: ring index the fence is associated with
|
||||
* @seq: sequence numbers
|
||||
*
|
||||
* Check if the last signaled fence sequnce number is >= the requested
|
||||
* sequence number (all asics).
|
||||
* Returns true if any has signaled (current value is >= requested value)
|
||||
* or false if it has not. Helper function for radeon_fence_wait_seq.
|
||||
*/
|
||||
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_wait_seq - wait for a specific sequence numbers
|
||||
*
|
||||
* @rdev: radeon device pointer
|
||||
* @target_seq: sequence number(s) we want to wait for
|
||||
* @intr: use interruptable sleep
|
||||
* @lock_ring: whether the ring should be locked or not
|
||||
*
|
||||
* Wait for the requested sequence number to be written (all asics).
|
||||
* Wait for the requested sequence number(s) to be written by any ring
|
||||
* (all asics). Sequnce number array is indexed by ring id.
|
||||
* @intr selects whether to use interruptable (true) or non-interruptable
|
||||
* (false) sleep when waiting for the sequence number. Helper function
|
||||
* for radeon_fence_wait(), et al.
|
||||
* for radeon_fence_wait_*().
|
||||
* Returns 0 if the sequence number has passed, error for all other cases.
|
||||
* -EDEADLK is returned when a GPU lockup has been detected and the ring is
|
||||
* marked as not ready so no further jobs get scheduled until a successful
|
||||
* reset.
|
||||
* -EDEADLK is returned when a GPU lockup has been detected.
|
||||
*/
|
||||
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
|
||||
unsigned ring, bool intr, bool lock_ring)
|
||||
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
|
||||
bool intr, bool lock_ring)
|
||||
{
|
||||
unsigned long timeout, last_activity;
|
||||
uint64_t seq;
|
||||
unsigned i;
|
||||
uint64_t last_seq[RADEON_NUM_RINGS];
|
||||
bool signaled;
|
||||
int r;
|
||||
int i, r;
|
||||
|
||||
while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
|
||||
if (!rdev->ring[ring].ready) {
|
||||
return -EBUSY;
|
||||
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
|
||||
|
||||
/* Save current sequence values, used to check for GPU lockups */
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (!target_seq[i])
|
||||
continue;
|
||||
|
||||
last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
|
||||
trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
|
||||
radeon_irq_kms_sw_irq_get(rdev, i);
|
||||
}
|
||||
|
||||
timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
|
||||
/* the normal case, timeout is somewhere before last_activity */
|
||||
timeout = rdev->fence_drv[ring].last_activity - timeout;
|
||||
} else {
|
||||
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
|
||||
* anyway we will just wait for the minimum amount and then check for a lockup
|
||||
*/
|
||||
timeout = 1;
|
||||
}
|
||||
seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
|
||||
/* Save current last activity valuee, used to check for GPU lockups */
|
||||
last_activity = rdev->fence_drv[ring].last_activity;
|
||||
|
||||
trace_radeon_fence_wait_begin(rdev->ddev, seq);
|
||||
radeon_irq_kms_sw_irq_get(rdev, ring);
|
||||
if (intr) {
|
||||
r = wait_event_interruptible_timeout(rdev->fence_queue,
|
||||
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
|
||||
timeout);
|
||||
} else {
|
||||
r = wait_event_timeout(rdev->fence_queue,
|
||||
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
|
||||
timeout);
|
||||
r = wait_event_interruptible_timeout(rdev->fence_queue, (
|
||||
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|
||||
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
|
||||
} else {
|
||||
r = wait_event_timeout(rdev->fence_queue, (
|
||||
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|
||||
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
|
||||
}
|
||||
radeon_irq_kms_sw_irq_put(rdev, ring);
|
||||
if (unlikely(r < 0)) {
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (!target_seq[i])
|
||||
continue;
|
||||
|
||||
radeon_irq_kms_sw_irq_put(rdev, i);
|
||||
trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
|
||||
}
|
||||
|
||||
if (unlikely(r < 0))
|
||||
return r;
|
||||
}
|
||||
trace_radeon_fence_wait_end(rdev->ddev, seq);
|
||||
|
||||
if (unlikely(!signaled)) {
|
||||
if (rdev->needs_reset)
|
||||
return -EDEADLK;
|
||||
|
||||
/* we were interrupted for some reason and fence
|
||||
* isn't signaled yet, resume waiting */
|
||||
if (r) {
|
||||
if (r)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (!target_seq[i])
|
||||
continue;
|
||||
|
||||
if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
|
||||
break;
|
||||
}
|
||||
|
||||
/* check if sequence value has changed since last_activity */
|
||||
if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
|
||||
if (i != RADEON_NUM_RINGS)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lock_ring) {
|
||||
if (lock_ring)
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (!target_seq[i])
|
||||
continue;
|
||||
|
||||
if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
|
||||
break;
|
||||
}
|
||||
|
||||
/* test if somebody else has already decided that this is a lockup */
|
||||
if (last_activity != rdev->fence_drv[ring].last_activity) {
|
||||
if (lock_ring) {
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
|
||||
if (i < RADEON_NUM_RINGS) {
|
||||
/* good news we believe it's a lockup */
|
||||
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
|
||||
target_seq, seq);
|
||||
dev_warn(rdev->dev, "GPU lockup (waiting for "
|
||||
"0x%016llx last fence id 0x%016llx on"
|
||||
" ring %d)\n",
|
||||
target_seq[i], last_seq[i], i);
|
||||
|
||||
/* change last activity so nobody else think there is a lockup */
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
rdev->fence_drv[i].last_activity = jiffies;
|
||||
}
|
||||
|
||||
/* mark the ring as not ready any more */
|
||||
rdev->ring[ring].ready = false;
|
||||
if (lock_ring) {
|
||||
/* remember that we need an reset */
|
||||
rdev->needs_reset = true;
|
||||
if (lock_ring)
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
}
|
||||
wake_up_all(&rdev->fence_queue);
|
||||
return -EDEADLK;
|
||||
}
|
||||
|
||||
if (lock_ring) {
|
||||
if (lock_ring)
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -388,6 +404,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
|
|||
*/
|
||||
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
|
||||
{
|
||||
uint64_t seq[RADEON_NUM_RINGS] = {};
|
||||
int r;
|
||||
|
||||
if (fence == NULL) {
|
||||
|
@ -395,150 +412,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = radeon_fence_wait_seq(fence->rdev, fence->seq,
|
||||
fence->ring, intr, true);
|
||||
if (r) {
|
||||
seq[fence->ring] = fence->seq;
|
||||
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
|
||||
return 0;
|
||||
|
||||
r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_wait_any_seq - wait for a sequence number on any ring
|
||||
*
|
||||
* @rdev: radeon device pointer
|
||||
* @target_seq: sequence number(s) we want to wait for
|
||||
* @intr: use interruptable sleep
|
||||
*
|
||||
* Wait for the requested sequence number(s) to be written by any ring
|
||||
* (all asics). Sequnce number array is indexed by ring id.
|
||||
* @intr selects whether to use interruptable (true) or non-interruptable
|
||||
* (false) sleep when waiting for the sequence number. Helper function
|
||||
* for radeon_fence_wait_any(), et al.
|
||||
* Returns 0 if the sequence number has passed, error for all other cases.
|
||||
*/
|
||||
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
|
||||
u64 *target_seq, bool intr)
|
||||
{
|
||||
unsigned long timeout, last_activity, tmp;
|
||||
unsigned i, ring = RADEON_NUM_RINGS;
|
||||
bool signaled;
|
||||
int r;
|
||||
|
||||
for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (!target_seq[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* use the most recent one as indicator */
|
||||
if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
|
||||
last_activity = rdev->fence_drv[i].last_activity;
|
||||
}
|
||||
|
||||
/* For lockup detection just pick the lowest ring we are
|
||||
* actively waiting for
|
||||
*/
|
||||
if (i < ring) {
|
||||
ring = i;
|
||||
}
|
||||
}
|
||||
|
||||
/* nothing to wait for ? */
|
||||
if (ring == RADEON_NUM_RINGS) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
|
||||
timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
|
||||
if (time_after(last_activity, timeout)) {
|
||||
/* the normal case, timeout is somewhere before last_activity */
|
||||
timeout = last_activity - timeout;
|
||||
} else {
|
||||
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
|
||||
* anyway we will just wait for the minimum amount and then check for a lockup
|
||||
*/
|
||||
timeout = 1;
|
||||
}
|
||||
|
||||
trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (target_seq[i]) {
|
||||
radeon_irq_kms_sw_irq_get(rdev, i);
|
||||
}
|
||||
}
|
||||
if (intr) {
|
||||
r = wait_event_interruptible_timeout(rdev->fence_queue,
|
||||
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
|
||||
timeout);
|
||||
} else {
|
||||
r = wait_event_timeout(rdev->fence_queue,
|
||||
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
|
||||
timeout);
|
||||
}
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (target_seq[i]) {
|
||||
radeon_irq_kms_sw_irq_put(rdev, i);
|
||||
}
|
||||
}
|
||||
if (unlikely(r < 0)) {
|
||||
return r;
|
||||
}
|
||||
trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
|
||||
|
||||
if (unlikely(!signaled)) {
|
||||
/* we were interrupted for some reason and fence
|
||||
* isn't signaled yet, resume waiting */
|
||||
if (r) {
|
||||
continue;
|
||||
}
|
||||
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
|
||||
tmp = rdev->fence_drv[i].last_activity;
|
||||
}
|
||||
}
|
||||
/* test if somebody else has already decided that this is a lockup */
|
||||
if (last_activity != tmp) {
|
||||
last_activity = tmp;
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
|
||||
/* good news we believe it's a lockup */
|
||||
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
|
||||
target_seq[ring]);
|
||||
|
||||
/* change last activity so nobody else think there is a lockup */
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
rdev->fence_drv[i].last_activity = jiffies;
|
||||
}
|
||||
|
||||
/* mark the ring as not ready any more */
|
||||
rdev->ring[ring].ready = false;
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
return -EDEADLK;
|
||||
}
|
||||
mutex_unlock(&rdev->ring_lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_fence_wait_any - wait for a fence to signal on any ring
|
||||
*
|
||||
|
@ -557,7 +442,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|||
bool intr)
|
||||
{
|
||||
uint64_t seq[RADEON_NUM_RINGS];
|
||||
unsigned i;
|
||||
unsigned i, num_rings = 0;
|
||||
int r;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
|
@ -567,15 +452,19 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
|
||||
/* something was allready signaled */
|
||||
return 0;
|
||||
}
|
||||
|
||||
seq[i] = fences[i]->seq;
|
||||
++num_rings;
|
||||
|
||||
/* test if something was allready signaled */
|
||||
if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = radeon_fence_wait_any_seq(rdev, seq, intr);
|
||||
/* nothing to wait for ? */
|
||||
if (num_rings == 0)
|
||||
return -ENOENT;
|
||||
|
||||
r = radeon_fence_wait_seq(rdev, seq, intr, true);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -594,15 +483,15 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
|
|||
*/
|
||||
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
uint64_t seq;
|
||||
uint64_t seq[RADEON_NUM_RINGS] = {};
|
||||
|
||||
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
||||
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
|
||||
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
|
||||
if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
|
||||
/* nothing to wait for, last_seq is
|
||||
already the last emited fence */
|
||||
return -ENOENT;
|
||||
}
|
||||
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||
return radeon_fence_wait_seq(rdev, seq, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -617,14 +506,15 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
|
|||
*/
|
||||
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
|
||||
uint64_t seq[RADEON_NUM_RINGS] = {};
|
||||
int r;
|
||||
|
||||
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
|
||||
seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
|
||||
r = radeon_fence_wait_seq(rdev, seq, false, false);
|
||||
if (r) {
|
||||
if (r == -EDEADLK) {
|
||||
if (r == -EDEADLK)
|
||||
return -EDEADLK;
|
||||
}
|
||||
|
||||
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
|
||||
ring, r);
|
||||
}
|
||||
|
@ -826,7 +716,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
|
|||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
rdev->fence_drv[ring].sync_seq[i] = 0;
|
||||
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
|
||||
rdev->fence_drv[ring].last_activity = jiffies;
|
||||
rdev->fence_drv[ring].initialized = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -607,8 +607,8 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
|
|||
*/
|
||||
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
{
|
||||
unsigned pd_size, pts_size;
|
||||
u64 *pd_addr;
|
||||
unsigned pd_size, pd_entries, pts_size;
|
||||
struct radeon_ib ib;
|
||||
int r;
|
||||
|
||||
if (vm == NULL) {
|
||||
|
@ -619,8 +619,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
retry:
|
||||
pd_size = radeon_vm_directory_size(rdev);
|
||||
pd_entries = radeon_vm_num_pdes(rdev);
|
||||
|
||||
retry:
|
||||
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
|
||||
&vm->page_directory, pd_size,
|
||||
RADEON_VM_PTB_ALIGN_SIZE, false);
|
||||
|
@ -637,9 +639,31 @@ retry:
|
|||
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
|
||||
|
||||
/* Initially clear the page directory */
|
||||
pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
|
||||
memset(pd_addr, 0, pd_size);
|
||||
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
|
||||
NULL, pd_entries * 2 + 64);
|
||||
if (r) {
|
||||
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
ib.length_dw = 0;
|
||||
|
||||
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
|
||||
0, pd_entries, 0, 0);
|
||||
|
||||
radeon_ib_sync_to(&ib, vm->fence);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
|
||||
return r;
|
||||
}
|
||||
radeon_fence_unref(&vm->fence);
|
||||
vm->fence = radeon_fence_ref(ib.fence);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
radeon_fence_unref(&vm->last_flush);
|
||||
|
||||
/* allocate page table array */
|
||||
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
|
||||
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
|
||||
|
||||
|
@ -913,6 +937,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_page_flags - translate page flags to what the hw uses
|
||||
*
|
||||
* @flags: flags comming from userspace
|
||||
*
|
||||
* Translate the flags the userspace ABI uses to hw flags.
|
||||
*/
|
||||
static uint32_t radeon_vm_page_flags(uint32_t flags)
|
||||
{
|
||||
uint32_t hw_flags = 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
hw_flags |= R600_PTE_SYSTEM;
|
||||
hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
|
||||
}
|
||||
return hw_flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_update_pdes - make sure that page directory is valid
|
||||
*
|
||||
|
@ -974,7 +1018,11 @@ retry:
|
|||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pde,
|
||||
last_pt, count, incr,
|
||||
RADEON_VM_PAGE_VALID);
|
||||
R600_PTE_VALID);
|
||||
|
||||
count *= RADEON_VM_PTE_COUNT;
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
|
||||
count, 0, 0);
|
||||
}
|
||||
|
||||
count = 1;
|
||||
|
@ -987,8 +1035,11 @@ retry:
|
|||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
|
||||
incr, RADEON_VM_PAGE_VALID);
|
||||
incr, R600_PTE_VALID);
|
||||
|
||||
count *= RADEON_VM_PTE_COUNT;
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
|
||||
count, 0, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1082,7 +1133,6 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
struct radeon_bo *bo,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
unsigned ridx = rdev->asic->vm.pt_ring_index;
|
||||
struct radeon_ib ib;
|
||||
struct radeon_bo_va *bo_va;
|
||||
unsigned nptes, npdes, ndw;
|
||||
|
@ -1151,11 +1201,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
/* reserve space for pde addresses */
|
||||
ndw += npdes * 2;
|
||||
|
||||
/* reserve space for clearing new page tables */
|
||||
ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
|
||||
|
||||
/* update too big for an IB */
|
||||
if (ndw > 0xfffff)
|
||||
return -ENOMEM;
|
||||
|
||||
r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
|
||||
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
|
||||
ib.length_dw = 0;
|
||||
|
||||
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
|
||||
|
@ -1165,7 +1218,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
|
||||
addr, bo_va->flags);
|
||||
addr, radeon_vm_page_flags(bo_va->flags));
|
||||
|
||||
radeon_ib_sync_to(&ib, vm->fence);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
|
|
|
@ -418,7 +418,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
|
|||
if (nr < DRM_COMMAND_BASE)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
ret = radeon_drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#define RADEON_WAIT_IDLE_TIMEOUT 200
|
||||
|
||||
/**
|
||||
|
@ -47,8 +49,12 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
|
|||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
irqreturn_t ret;
|
||||
|
||||
return radeon_irq_process(rdev);
|
||||
ret = radeon_irq_process(rdev);
|
||||
if (ret == IRQ_HANDLED)
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
/**
|
||||
* radeon_driver_unload_kms - Main unload function for KMS.
|
||||
*
|
||||
|
@ -50,9 +50,14 @@ int radeon_driver_unload_kms(struct drm_device *dev)
|
|||
|
||||
if (rdev == NULL)
|
||||
return 0;
|
||||
|
||||
if (rdev->rmmio == NULL)
|
||||
goto done_free;
|
||||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
|
||||
radeon_acpi_fini(rdev);
|
||||
|
||||
radeon_modeset_fini(rdev);
|
||||
radeon_device_fini(rdev);
|
||||
|
||||
|
@ -125,9 +130,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||
"Error during ACPI methods call\n");
|
||||
}
|
||||
|
||||
if (radeon_runtime_pm != 0) {
|
||||
pm_runtime_use_autosuspend(dev->dev);
|
||||
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
|
||||
pm_runtime_set_active(dev->dev);
|
||||
pm_runtime_allow(dev->dev);
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
out:
|
||||
if (r)
|
||||
radeon_driver_unload_kms(dev);
|
||||
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -475,9 +491,14 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
|
|||
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int r;
|
||||
|
||||
file_priv->driver_priv = NULL;
|
||||
|
||||
r = pm_runtime_get_sync(dev->dev);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
/* new gpu have virtual address space support */
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
struct radeon_fpriv *fpriv;
|
||||
|
@ -506,6 +527,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
|
||||
file_priv->driver_priv = fpriv;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -249,6 +249,8 @@ struct radeon_mode_info {
|
|||
struct drm_property *underscan_vborder_property;
|
||||
/* audio */
|
||||
struct drm_property *audio_property;
|
||||
/* FMT dithering */
|
||||
struct drm_property *dither_property;
|
||||
/* hardcoded DFP edid from BIOS */
|
||||
struct edid *bios_hardcoded_edid;
|
||||
int bios_hardcoded_edid_size;
|
||||
|
@ -479,6 +481,11 @@ enum radeon_connector_audio {
|
|||
RADEON_AUDIO_AUTO = 2
|
||||
};
|
||||
|
||||
enum radeon_connector_dither {
|
||||
RADEON_FMT_DITHER_DISABLE = 0,
|
||||
RADEON_FMT_DITHER_ENABLE = 1,
|
||||
};
|
||||
|
||||
struct radeon_connector {
|
||||
struct drm_connector base;
|
||||
uint32_t connector_id;
|
||||
|
@ -498,6 +505,7 @@ struct radeon_connector {
|
|||
struct radeon_router router;
|
||||
struct radeon_i2c_chan *router_bus;
|
||||
enum radeon_connector_audio audio;
|
||||
enum radeon_connector_dither dither;
|
||||
};
|
||||
|
||||
struct radeon_framebuffer {
|
||||
|
@ -850,6 +858,12 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
|
|||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
|
||||
/* fmt blocks */
|
||||
void avivo_program_fmt(struct drm_encoder *encoder);
|
||||
void dce3_program_fmt(struct drm_encoder *encoder);
|
||||
void dce4_program_fmt(struct drm_encoder *encoder);
|
||||
void dce8_program_fmt(struct drm_encoder *encoder);
|
||||
|
||||
/* fbdev layer */
|
||||
int radeon_fbdev_init(struct radeon_device *rdev);
|
||||
void radeon_fbdev_fini(struct radeon_device *rdev);
|
||||
|
|
|
@ -508,17 +508,21 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
|
|||
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
|
||||
level = RADEON_DPM_FORCED_LEVEL_AUTO;
|
||||
} else {
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (rdev->asic->dpm.force_performance_level) {
|
||||
if (rdev->pm.dpm.thermal_active) {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
ret = radeon_dpm_force_performance_level(rdev, level);
|
||||
if (ret)
|
||||
count = -EINVAL;
|
||||
}
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
fail:
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -881,11 +885,12 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
printk("switching from power state:\n");
|
||||
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
|
||||
printk("switching to power state:\n");
|
||||
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
|
||||
|
||||
if (radeon_dpm == 1) {
|
||||
printk("switching from power state:\n");
|
||||
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
|
||||
printk("switching to power state:\n");
|
||||
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
|
||||
}
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
down_write(&rdev->pm.mclk_lock);
|
||||
mutex_lock(&rdev->ring_lock);
|
||||
|
@ -918,12 +923,16 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
|
|||
radeon_dpm_post_set_power_state(rdev);
|
||||
|
||||
if (rdev->asic->dpm.force_performance_level) {
|
||||
if (rdev->pm.dpm.thermal_active)
|
||||
if (rdev->pm.dpm.thermal_active) {
|
||||
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
|
||||
/* force low perf level for thermal */
|
||||
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
|
||||
else
|
||||
/* otherwise, enable auto */
|
||||
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
|
||||
/* save the user's level */
|
||||
rdev->pm.dpm.forced_level = level;
|
||||
} else {
|
||||
/* otherwise, user selected level */
|
||||
radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -1176,7 +1185,8 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
|
|||
mutex_lock(&rdev->pm.mutex);
|
||||
radeon_dpm_init(rdev);
|
||||
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
|
||||
radeon_dpm_print_power_states(rdev);
|
||||
if (radeon_dpm == 1)
|
||||
radeon_dpm_print_power_states(rdev);
|
||||
radeon_dpm_setup_asic(rdev);
|
||||
ret = radeon_dpm_enable(rdev);
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
|
@ -1238,6 +1248,23 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|||
case CHIP_RV670:
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
case CHIP_CAYMAN:
|
||||
case CHIP_ARUBA:
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_KAVERI:
|
||||
/* DPM requires the RLC, RV770+ dGPU requires SMC */
|
||||
if (!rdev->rlc_fw)
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
else if ((rdev->family >= CHIP_RV770) &&
|
||||
(!(rdev->flags & RADEON_IS_IGP)) &&
|
||||
(!rdev->smc_fw))
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
else if (radeon_dpm == 1)
|
||||
rdev->pm.pm_method = PM_METHOD_DPM;
|
||||
else
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
break;
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
|
@ -1253,16 +1280,11 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|||
case CHIP_BARTS:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
case CHIP_CAYMAN:
|
||||
case CHIP_ARUBA:
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_KAVERI:
|
||||
/* DPM requires the RLC, RV770+ dGPU requires SMC */
|
||||
if (!rdev->rlc_fw)
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
|
@ -1270,10 +1292,10 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|||
(!(rdev->flags & RADEON_IS_IGP)) &&
|
||||
(!rdev->smc_fw))
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
else if (radeon_dpm == 1)
|
||||
rdev->pm.pm_method = PM_METHOD_DPM;
|
||||
else
|
||||
else if (radeon_dpm == 0)
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
else
|
||||
rdev->pm.pm_method = PM_METHOD_DPM;
|
||||
break;
|
||||
default:
|
||||
/* default to profile method */
|
||||
|
|
|
@ -47,6 +47,30 @@ TRACE_EVENT(radeon_cs,
|
|||
__entry->fences)
|
||||
);
|
||||
|
||||
TRACE_EVENT(radeon_vm_set_page,
|
||||
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags),
|
||||
TP_ARGS(pe, addr, count, incr, flags),
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, pe)
|
||||
__field(u64, addr)
|
||||
__field(u32, count)
|
||||
__field(u32, incr)
|
||||
__field(u32, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pe = pe;
|
||||
__entry->addr = addr;
|
||||
__entry->count = count;
|
||||
__entry->incr = incr;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
|
||||
__entry->pe, __entry->addr, __entry->incr,
|
||||
__entry->flags, __entry->count)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(radeon_fence_request,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, u32 seqno),
|
||||
|
|
|
@ -240,6 +240,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
|
|||
if (handle != 0 && rdev->uvd.filp[i] == filp) {
|
||||
struct radeon_fence *fence;
|
||||
|
||||
radeon_uvd_note_usage(rdev);
|
||||
|
||||
r = radeon_uvd_get_destroy_msg(rdev,
|
||||
R600_RING_TYPE_UVD_INDEX, handle, &fence);
|
||||
if (r) {
|
||||
|
@ -619,7 +621,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
|
|||
if (r)
|
||||
goto err;
|
||||
|
||||
r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
|
||||
r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -153,6 +153,70 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
|
|||
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
|
||||
}
|
||||
|
||||
void avivo_program_fmt(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
int bpc = 0;
|
||||
u32 tmp = 0;
|
||||
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
|
||||
|
||||
if (connector) {
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
bpc = radeon_get_monitor_bpc(connector);
|
||||
dither = radeon_connector->dither;
|
||||
}
|
||||
|
||||
/* LVDS FMT is set up by atom */
|
||||
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
|
||||
return;
|
||||
|
||||
if (bpc == 0)
|
||||
return;
|
||||
|
||||
switch (bpc) {
|
||||
case 6:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
|
||||
else
|
||||
tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
|
||||
break;
|
||||
case 8:
|
||||
if (dither == RADEON_FMT_DITHER_ENABLE)
|
||||
/* XXX sort out optimal dither settings */
|
||||
tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
|
||||
AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
|
||||
else
|
||||
tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
|
||||
AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
|
||||
break;
|
||||
case 10:
|
||||
default:
|
||||
/* not needed */
|
||||
break;
|
||||
}
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
|
||||
WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
||||
WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void rs600_pm_misc(struct radeon_device *rdev)
|
||||
{
|
||||
int requested_index = rdev->pm.requested_power_state_index;
|
||||
|
|
|
@ -407,9 +407,9 @@ static void rv6xx_enable_engine_feedback_and_reference_sync(struct radeon_device
|
|||
WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
|
||||
}
|
||||
|
||||
static u64 rv6xx_clocks_per_unit(u32 unit)
|
||||
static u32 rv6xx_clocks_per_unit(u32 unit)
|
||||
{
|
||||
u64 tmp = 1 << (2 * unit);
|
||||
u32 tmp = 1 << (2 * unit);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
@ -417,7 +417,7 @@ static u64 rv6xx_clocks_per_unit(u32 unit)
|
|||
static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
|
||||
u32 unscaled_count, u32 unit)
|
||||
{
|
||||
u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
|
||||
u32 count_per_unit = rv6xx_clocks_per_unit(unit);
|
||||
|
||||
return (unscaled_count + count_per_unit - 1) / count_per_unit;
|
||||
}
|
||||
|
|
|
@ -78,11 +78,6 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
|
|||
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
|
||||
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
|
||||
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
|
||||
extern void si_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
|
||||
bool enable);
|
||||
|
||||
|
@ -4662,61 +4657,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
|
|||
block, mc_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* si_vm_set_page - update the page tables using the CP
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CP (SI).
|
||||
*/
|
||||
void si_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
while (count) {
|
||||
ndw = 2 + count * 2;
|
||||
if (ndw > 0x3FFE)
|
||||
ndw = 0x3FFE;
|
||||
|
||||
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
|
||||
ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(1));
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* DMA */
|
||||
si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
@ -5361,52 +5301,53 @@ void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
|
|||
if (buffer == NULL)
|
||||
return;
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
|
||||
buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
|
||||
buffer[count++] = 0x80000000;
|
||||
buffer[count++] = 0x80000000;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
|
||||
buffer[count++] = cpu_to_le32(0x80000000);
|
||||
buffer[count++] = cpu_to_le32(0x80000000);
|
||||
|
||||
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
|
||||
for (ext = sect->section; ext->extent != NULL; ++ext) {
|
||||
if (sect->id == SECT_CONTEXT) {
|
||||
buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
|
||||
buffer[count++] = ext->reg_index - 0xa000;
|
||||
buffer[count++] =
|
||||
cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
|
||||
buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
|
||||
for (i = 0; i < ext->reg_count; i++)
|
||||
buffer[count++] = ext->extent[i];
|
||||
buffer[count++] = cpu_to_le32(ext->extent[i]);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
|
||||
buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
|
||||
buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
buffer[count++] = 0x2a00126a;
|
||||
buffer[count++] = cpu_to_le32(0x2a00126a);
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
buffer[count++] = 0x0000124a;
|
||||
buffer[count++] = cpu_to_le32(0x0000124a);
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
buffer[count++] = 0x00000082;
|
||||
buffer[count++] = cpu_to_le32(0x00000082);
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
default:
|
||||
buffer[count++] = 0x00000000;
|
||||
buffer[count++] = cpu_to_le32(0x00000000);
|
||||
break;
|
||||
}
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
|
||||
buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
|
||||
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
|
||||
|
||||
buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
|
||||
buffer[count++] = 0;
|
||||
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
buffer[count++] = cpu_to_le32(0);
|
||||
}
|
||||
|
||||
static void si_init_pg(struct radeon_device *rdev)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_trace.h"
|
||||
#include "sid.h"
|
||||
|
||||
u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
|
||||
|
@ -75,11 +76,12 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
|
|||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
|
@ -90,16 +92,10 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
|
|||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & RADEON_VM_PAGE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
|
@ -110,7 +106,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
|
|||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & RADEON_VM_PAGE_VALID)
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
@ -118,7 +114,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
|
|||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
|
|
|
@ -3589,7 +3589,12 @@ static void si_program_display_gap(struct radeon_device *rdev)
|
|||
WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
|
||||
}
|
||||
|
||||
si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
|
||||
/* Setting this to false forces the performance state to low if the crtcs are disabled.
|
||||
* This can be a problem on PowerXpress systems or if you want to use the card
|
||||
* for offscreen rendering or compute if there are no crtcs enabled. Set it to
|
||||
* true for now so that performance scales even if the displays are off.
|
||||
*/
|
||||
si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
|
||||
}
|
||||
|
||||
static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
|
||||
|
@ -4553,7 +4558,7 @@ static int si_init_smc_table(struct radeon_device *rdev)
|
|||
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
|
||||
|
||||
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
|
||||
table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
|
||||
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
|
||||
|
||||
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
|
||||
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
|
||||
|
|
|
@ -478,7 +478,7 @@
|
|||
#define STATE3_MASK (0x1f << 15)
|
||||
#define STATE3_SHIFT 15
|
||||
|
||||
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
|
||||
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
|
||||
#define TRAIN_DONE_D0 (1 << 30)
|
||||
#define TRAIN_DONE_D1 (1 << 31)
|
||||
|
||||
|
@ -683,6 +683,51 @@
|
|||
* bit5 = 176.4 kHz
|
||||
* bit6 = 192 kHz
|
||||
*/
|
||||
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
|
||||
# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
|
||||
# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
|
||||
/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
|
||||
* 0 = invalid
|
||||
* x = legal delay value
|
||||
* 255 = sync not supported
|
||||
*/
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
|
||||
# define HBR_CAPABLE (1 << 0) /* enabled by default */
|
||||
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
|
||||
# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
|
||||
# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
|
||||
# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
|
||||
# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
|
||||
# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
|
||||
# define DESCRIPTION0(x) (((x) & 0xff) << 0)
|
||||
# define DESCRIPTION1(x) (((x) & 0xff) << 8)
|
||||
# define DESCRIPTION2(x) (((x) & 0xff) << 16)
|
||||
# define DESCRIPTION3(x) (((x) & 0xff) << 24)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
|
||||
# define DESCRIPTION4(x) (((x) & 0xff) << 0)
|
||||
# define DESCRIPTION5(x) (((x) & 0xff) << 8)
|
||||
# define DESCRIPTION6(x) (((x) & 0xff) << 16)
|
||||
# define DESCRIPTION7(x) (((x) & 0xff) << 24)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
|
||||
# define DESCRIPTION8(x) (((x) & 0xff) << 0)
|
||||
# define DESCRIPTION9(x) (((x) & 0xff) << 8)
|
||||
# define DESCRIPTION10(x) (((x) & 0xff) << 16)
|
||||
# define DESCRIPTION11(x) (((x) & 0xff) << 24)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
|
||||
# define DESCRIPTION12(x) (((x) & 0xff) << 0)
|
||||
# define DESCRIPTION13(x) (((x) & 0xff) << 8)
|
||||
# define DESCRIPTION14(x) (((x) & 0xff) << 16)
|
||||
# define DESCRIPTION15(x) (((x) & 0xff) << 24)
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
|
||||
# define DESCRIPTION16(x) (((x) & 0xff) << 0)
|
||||
# define DESCRIPTION17(x) (((x) & 0xff) << 8)
|
||||
|
||||
#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
|
||||
# define AUDIO_ENABLED (1 << 31)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче