drm fixes for 5.9-rc1
core: - Fix drm_dp_mst_port refcount leaks in drm_dp_mst_allocate_vcpi - Remove null check for kfree in drm_dev_release. - Fix DRM_FORMAT_MOD_AMLOGIC_FBC definition. - re-added docs for drm_gem_flink_ioctl() - add orientation quirk for ASUS T103HAF ttm: - ttm: fix page-offset calculation within TTM - revert patch causing vmwgfx regressions fbcon: - Fix a fbcon OOB read in fbdev, found by syzbot. vga: - Mark vga_tryget static as it's not used elsewhere. amdgpu: - Re-add spelling typo fix - Sienna Cichlid fixes - Navy Flounder fixes - DC fixes - SMU i2c fix - Power fixes vmwgfx: - regression fixes for modesetting crashes - misc fixes xlnx: - Small fixes to xlnx. omap: - Fix mode initialization in omap_connector_mode_valid(). - force runtime PM suspend on system suspend tidss: - fix modeset init for DPI panels -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJfM3QrAAoJEAx081l5xIa+4H8P/0YLHHK52yuYqyYEtvfehlDC i3ur47QdQS/6fkspVSbVndsToQS+D9ZNAZFUUSumVr1kR40yGC6oGZGmo+UWYH+T AiZC1LHmz8hsGMni40uCb+ble1mtcsbqjAkb5hG/9RxPfBiunQ6OJJGU7X2FoB3e TZNWwYKyF/Y5f/OlKdRbi9qfszWwWb/+ZW94q9ER1MjlcwrW3EqgRrkRGT3hH4UO AvzYhgGlzz7FXwQic3mJw3Sirm6NnbuUv9aRPRBCGm9i/BDSRHFpIcXIuLuMsiOk L/WtdYodMDbhB0xRKwDQzf9TIUxSwulrXO0Jfo8CVKknyHha62fz2bMGpf+4ron2 3WrZmV+5w0Q0qcfbh1EgHkVXmDry+mtFws1dvAtvyNp7GSI675tNI2qH3cgLfvQX FgMrfbmOp+f+ohXL7fUw+J/7cUjrVYZxtkCAEctB/6B44INjFdRuwQfP98x5CcmX CpZHLJKsMI+Y6r6Jno5foIQJIZJrDiUJLhRE2sN7cWv54+2gmHXbqsLAfMMpB6Mc 2gMZNIwB+qSVjULR+MHvzC3iZHHTkD3hijkYshb8+s5pICGo+eq2v4S/r31C4Wn1 DpToox1t/RPY+WPS5dbi95zxg1QDwr/BsExTCKTUB2k+ovoWE6byPvt2Vslh+F/K i+LrkziEqKb/BhyGA7z9 =dr7v -----END PGP SIGNATURE----- Merge tag 'drm-next-2020-08-12' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "This has a few vmwgfx regression fixes we hit from the merge window (one in TTM), it also has a bunch of amdgpu fixes along with a scattering everywhere else. core: - Fix drm_dp_mst_port refcount leaks in drm_dp_mst_allocate_vcpi - Remove null check for kfree in drm_dev_release. - Fix DRM_FORMAT_MOD_AMLOGIC_FBC definition. - re-added docs for drm_gem_flink_ioctl() - add orientation quirk for ASUS T103HAF ttm: - ttm: fix page-offset calculation within TTM - revert patch causing vmwgfx regressions fbcon: - Fix a fbcon OOB read in fbdev, found by syzbot. vga: - Mark vga_tryget static as it's not used elsewhere. amdgpu: - Re-add spelling typo fix - Sienna Cichlid fixes - Navy Flounder fixes - DC fixes - SMU i2c fix - Power fixes vmwgfx: - regression fixes for modesetting crashes - misc fixes xlnx: - Small fixes to xlnx. omap: - Fix mode initialization in omap_connector_mode_valid(). - force runtime PM suspend on system suspend tidss: - fix modeset init for DPI panels" * tag 'drm-next-2020-08-12' of git://anongit.freedesktop.org/drm/drm: (70 commits) drm/ttm: revert "drm/ttm: make TT creation purely optional v3" drm/vmwgfx: fix spelling mistake "Cant" -> "Can't" drm/vmwgfx: fix spelling mistake "Cound" -> "Could" drm/vmwgfx/ldu: Use drm_mode_config_reset drm/vmwgfx/sou: Use drm_mode_config_reset drm/vmwgfx/stdu: Use drm_mode_config_reset drm/vmwgfx: Fix two list_for_each loop exit tests drm/vmwgfx: Use correct vmw_legacy_display_unit pointer drm/vmwgfx: Use struct_size() helper drm/amdgpu: Fix bug where DPM is not enabled after hibernate and resume drm/amd/powerplay: put VCN/JPEG into PG ungate state before dpm table setup(V3) drm/amd/powerplay: update swSMU VCN/JPEG PG logics drm/amdgpu: use mode1 reset by default for sienna_cichlid drm/amdgpu/smu: rework i2c adpater registration drm/amd/display: Display goes blank after inst drm/amd/display: Change null plane state swizzle mode to 4kb_s drm/amd/display: Use helper function to check for HDMI signal drm/amd/display: AMD OUI (DPCD 0x00300) skipped on some sink drm/amd/display: Fix logger context drm/amd/display: populate new dml variable ...
This commit is contained in:
Коммит
ea6ec77437
|
@ -2574,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
|||
AMD_IP_BLOCK_TYPE_IH,
|
||||
};
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
adev->ip_blocks[i].status.hw = false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||
int j;
|
||||
struct amdgpu_ip_block *block;
|
||||
|
@ -2581,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
|||
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||
block = &adev->ip_blocks[j];
|
||||
|
||||
block->status.hw = false;
|
||||
if (block->version->type != ip_order[i] ||
|
||||
!block->status.valid)
|
||||
continue;
|
||||
|
|
|
@ -3212,6 +3212,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip crit temp on APU */
|
||||
if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
|
||||
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip limit attributes if DPM is not enabled */
|
||||
if (!adev->pm.dpm_enabled &&
|
||||
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
|
||||
|
|
|
@ -193,12 +193,18 @@ static int psp_sw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
psp_memory_training_fini(&adev->psp);
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
if (adev->psp.sos_fw) {
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
}
|
||||
if (adev->psp.asd_fw) {
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
}
|
||||
if (adev->psp.ta_fw) {
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10)
|
||||
psp_sysfs_fini(adev);
|
||||
|
@ -409,11 +415,28 @@ static int psp_clear_vf_fw(struct psp_context *psp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool psp_skip_tmr(struct psp_context *psp)
|
||||
{
|
||||
switch (psp->adev->asic_type) {
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int psp_tmr_load(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
|
||||
* Already set up by host driver.
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
|
||||
return 0;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
@ -1987,7 +2010,7 @@ static int psp_suspend(void *handle)
|
|||
|
||||
ret = psp_tmr_terminate(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Falied to terminate tmr\n");
|
||||
DRM_ERROR("Failed to terminate tmr\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1618,7 +1618,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
|||
data = con->eh_data;
|
||||
save_count = data->count - control->num_recs;
|
||||
/* only new entries are saved */
|
||||
if (save_count > 0)
|
||||
if (save_count > 0) {
|
||||
if (amdgpu_ras_eeprom_process_recods(control,
|
||||
&data->bps[control->num_recs],
|
||||
true,
|
||||
|
@ -1627,6 +1627,9 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3082,7 +3082,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
|
@ -3127,7 +3127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
|
@ -3158,7 +3158,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xffffffff, 0x010b0000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
|
||||
};
|
||||
|
@ -7529,6 +7529,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
|
|||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -49,12 +49,11 @@ static int jpeg_v3_0_set_powergating_state(void *handle,
|
|||
static int jpeg_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
|
||||
u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
|
||||
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
return -ENOENT;
|
||||
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
return -ENOENT;
|
||||
}
|
||||
adev->jpeg.num_jpeg_inst = 1;
|
||||
|
||||
jpeg_v3_0_set_dec_ring_funcs(adev);
|
||||
|
|
|
@ -97,6 +97,49 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
u64 r;
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
/* read low 32 bit */
|
||||
WREG32(address, reg);
|
||||
(void)RREG32(address);
|
||||
r = RREG32(data);
|
||||
|
||||
/* read high 32 bit*/
|
||||
WREG32(address, reg + 4);
|
||||
(void)RREG32(address);
|
||||
r |= ((u64)RREG32(data) << 32);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
/* write low 32 bit */
|
||||
WREG32(address, reg);
|
||||
(void)RREG32(address);
|
||||
WREG32(data, (u32)(v & 0xffffffffULL));
|
||||
(void)RREG32(data);
|
||||
|
||||
/* write high 32 bit */
|
||||
WREG32(address, reg + 4);
|
||||
(void)RREG32(address);
|
||||
WREG32(data, (u32)(v >> 32));
|
||||
(void)RREG32(data);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
@ -319,10 +362,15 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
|||
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
|
||||
amdgpu_reset_method);
|
||||
|
||||
if (smu_baco_is_support(smu))
|
||||
return AMD_RESET_METHOD_BACO;
|
||||
else
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
return AMD_RESET_METHOD_MODE1;
|
||||
default:
|
||||
if (smu_baco_is_support(smu))
|
||||
return AMD_RESET_METHOD_BACO;
|
||||
else
|
||||
return AMD_RESET_METHOD_MODE1;
|
||||
}
|
||||
}
|
||||
|
||||
static int nv_asic_reset(struct amdgpu_device *adev)
|
||||
|
@ -673,6 +721,8 @@ static int nv_common_early_init(void *handle)
|
|||
adev->smc_wreg = NULL;
|
||||
adev->pcie_rreg = &nv_pcie_rreg;
|
||||
adev->pcie_wreg = &nv_pcie_wreg;
|
||||
adev->pcie_rreg64 = &nv_pcie_rreg64;
|
||||
adev->pcie_wreg64 = &nv_pcie_wreg64;
|
||||
|
||||
/* TODO: will add them during VCN v2 implementation */
|
||||
adev->uvd_ctx_rreg = NULL;
|
||||
|
|
|
@ -1659,7 +1659,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
|
|||
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
|
||||
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
|
||||
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
|
||||
.test_ring = amdgpu_vcn_dec_ring_test_ring,
|
||||
.test_ring = vcn_v2_0_dec_ring_test_ring,
|
||||
.test_ib = amdgpu_vcn_dec_ring_test_ib,
|
||||
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
|
||||
.insert_start = vcn_v2_0_dec_ring_insert_start,
|
||||
|
|
|
@ -97,6 +97,8 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
|
||||
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
|
||||
#endif
|
||||
|
||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||
|
@ -1185,10 +1187,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
|||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
|
||||
break;
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
|
@ -8544,6 +8549,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Check connector changes */
|
||||
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
|
||||
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
|
||||
/* Skip connectors that are disabled or part of modeset already. */
|
||||
if (!old_con_state->crtc && !new_con_state->crtc)
|
||||
continue;
|
||||
|
||||
if (!new_con_state->crtc)
|
||||
continue;
|
||||
|
||||
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
|
||||
if (IS_ERR(new_crtc_state)) {
|
||||
ret = PTR_ERR(new_crtc_state);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (dm_old_con_state->abm_level !=
|
||||
dm_new_con_state->abm_level)
|
||||
new_crtc_state->connectors_changed = true;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->asic_type >= CHIP_NAVI10) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "dmub/dmub_srv.h"
|
||||
#include "resource.h"
|
||||
#include "dsc.h"
|
||||
#include "dc_link_dp.h"
|
||||
|
||||
struct dmub_debugfs_trace_header {
|
||||
uint32_t entry_count;
|
||||
|
@ -1150,7 +1151,7 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
|
|||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
|
||||
static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
|
@ -1186,7 +1187,7 @@ static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
|
|||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_bytes_per_pixel);
|
||||
dsc_state.dsc_bits_per_pixel);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
|
@ -1460,9 +1461,9 @@ static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
|
|||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
|
||||
static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_bytes_per_pixel_read,
|
||||
.read = dp_dsc_bits_per_pixel_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
|
@ -1552,7 +1553,7 @@ static const struct {
|
|||
{"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
|
||||
{"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
|
||||
{"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
|
||||
{"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
|
||||
{"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
|
||||
{"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
|
||||
{"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
|
||||
{"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
|
||||
|
|
|
@ -2834,6 +2834,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
|||
.bios_parser_destroy = bios_parser_destroy,
|
||||
|
||||
.get_board_layout_info = bios_get_board_layout_info,
|
||||
|
||||
.get_atom_dc_golden_table = NULL
|
||||
};
|
||||
|
||||
static bool bios_parser_construct(
|
||||
|
|
|
@ -2079,6 +2079,85 @@ static uint16_t bios_parser_pack_data_tables(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct atom_dc_golden_table_v1 *bios_get_golden_table(
|
||||
struct bios_parser *bp,
|
||||
uint32_t rev_major,
|
||||
uint32_t rev_minor,
|
||||
uint16_t *dc_golden_table_ver)
|
||||
{
|
||||
struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL;
|
||||
uint32_t dc_golden_offset = 0;
|
||||
*dc_golden_table_ver = 0;
|
||||
|
||||
if (!DATA_TABLES(dce_info))
|
||||
return NULL;
|
||||
|
||||
/* ver.4.4 or higher */
|
||||
switch (rev_major) {
|
||||
case 4:
|
||||
switch (rev_minor) {
|
||||
case 4:
|
||||
disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
|
||||
DATA_TABLES(dce_info));
|
||||
if (!disp_cntl_tbl_4_4)
|
||||
return NULL;
|
||||
dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset;
|
||||
*dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dc_golden_offset)
|
||||
return NULL;
|
||||
|
||||
if (*dc_golden_table_ver != 1)
|
||||
return NULL;
|
||||
|
||||
return GET_IMAGE(struct atom_dc_golden_table_v1,
|
||||
dc_golden_offset);
|
||||
}
|
||||
|
||||
static enum bp_result bios_get_atom_dc_golden_table(
|
||||
struct dc_bios *dcb)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL;
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision tbl_revision;
|
||||
uint16_t dc_golden_table_ver = 0;
|
||||
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
DATA_TABLES(dce_info));
|
||||
if (!header)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
get_atom_data_table_revision(header, &tbl_revision);
|
||||
|
||||
atom_dc_golden_table = bios_get_golden_table(bp,
|
||||
tbl_revision.major,
|
||||
tbl_revision.minor,
|
||||
&dc_golden_table_ver);
|
||||
|
||||
if (!atom_dc_golden_table)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver;
|
||||
dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val;
|
||||
dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val;
|
||||
dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static const struct dc_vbios_funcs vbios_funcs = {
|
||||
.get_connectors_number = bios_parser_get_connectors_number,
|
||||
|
||||
|
@ -2128,6 +2207,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
|||
|
||||
.get_board_layout_info = bios_get_board_layout_info,
|
||||
.pack_data_tables = bios_parser_pack_data_tables,
|
||||
|
||||
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table
|
||||
};
|
||||
|
||||
static bool bios_parser2_construct(
|
||||
|
|
|
@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
|
|||
return disp_clk_threshold;
|
||||
}
|
||||
|
||||
static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
|
||||
static void ramp_up_dispclk_with_dpp(
|
||||
struct clk_mgr_internal *clk_mgr,
|
||||
struct dc *dc,
|
||||
struct dc_clocks *new_clocks,
|
||||
bool safe_to_lower)
|
||||
{
|
||||
int i;
|
||||
int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
|
||||
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
|
||||
|
||||
/* this function is to change dispclk, dppclk and dprefclk according to
|
||||
* bandwidth requirement. Its call stack is rv1_update_clocks -->
|
||||
* update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
|
||||
* --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
|
||||
* prepare_bandwidth will be called first to allow enough clock,
|
||||
* watermark for change, after end of dcn hw change, optimize_bandwidth
|
||||
* is executed to lower clock to save power for new dcn hw settings.
|
||||
*
|
||||
* below is sequence of commit_planes_for_stream:
|
||||
*
|
||||
* step 1: prepare_bandwidth - raise clock to have enough bandwidth
|
||||
* step 2: lock_doublebuffer_enable
|
||||
* step 3: pipe_control_lock(true) - make dchubp register change will
|
||||
* not take effect right way
|
||||
* step 4: apply_ctx_for_surface - program dchubp
|
||||
* step 5: pipe_control_lock(false) - dchubp register change take effect
|
||||
* step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
|
||||
* for full_date, optimize clock to save power
|
||||
*
|
||||
* at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
|
||||
* changed for new dchubp configuration. but real dcn hub dchubps are
|
||||
* still running with old configuration until end of step 5. this need
|
||||
* clocks settings at step 1 should not less than that before step 1.
|
||||
* this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
|
||||
* , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
|
||||
* new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
|
||||
* 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
|
||||
*
|
||||
* the second condition is based on new dchubp configuration. dppclk
|
||||
* for new dchubp may be different from dppclk before step 1.
|
||||
* for example, before step 1, dchubps are as below:
|
||||
* pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
|
||||
* pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
|
||||
* for dppclk for pipe0 need dppclk = dispclk
|
||||
*
|
||||
* new dchubp pipe split configuration:
|
||||
* pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
|
||||
* pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
|
||||
* dppclk only needs dppclk = dispclk /2.
|
||||
*
|
||||
* dispclk, dppclk are not lock by otg master lock. they take effect
|
||||
* after step 1. during this transition, dispclk are the same, but
|
||||
* dppclk is changed to half of previous clock for old dchubp
|
||||
* configuration between step 1 and step 6. This may cause p-state
|
||||
* warning intermittently.
|
||||
*
|
||||
* for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
|
||||
* need make sure dppclk are not changed to less between step 1 and 6.
|
||||
* for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
|
||||
* new display clock is raised, but we do not know ratio of
|
||||
* new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
|
||||
* new_clocks->dispclk_khz /2 does not guarantee equal or higher than
|
||||
* old dppclk. we could ignore power saving different between
|
||||
* dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
|
||||
* as long as safe_to_lower = false, set dpclk = dispclk to simplify
|
||||
* condition check.
|
||||
* todo: review this change for other asic.
|
||||
**/
|
||||
if (!safe_to_lower)
|
||||
request_dpp_div = false;
|
||||
|
||||
/* set disp clk to dpp clk threshold */
|
||||
|
||||
clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
|
||||
|
@ -209,7 +274,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
|
||||
|| new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
|
||||
ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
|
||||
ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
send_request_to_lower = true;
|
||||
}
|
||||
|
|
|
@ -323,9 +323,10 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
/* if clock is being raised, increase refclk before lowering DTO */
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
/* always update dtos unless clock is lowered and not safe to lower */
|
||||
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
|
||||
* that we do not lower dto when it is not safe to lower. We do not need to
|
||||
* compare the current and new dppclk before calling this function.*/
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1250,6 +1250,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
int i, k, l;
|
||||
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
dc_streams[i] = context->streams[i];
|
||||
|
@ -1838,6 +1841,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
|||
int i;
|
||||
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
if (dc->idle_optimizations_allowed)
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
|
||||
#endif
|
||||
if (stream_status == NULL || stream_status->plane_count != surface_count)
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
|
||||
|
@ -2306,8 +2314,14 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
}
|
||||
}
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
|
||||
#endif
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
|
||||
context_clock_trace(dc, context);
|
||||
}
|
||||
|
||||
|
|
|
@ -1540,6 +1540,9 @@ static bool dc_link_construct(struct dc_link *link,
|
|||
}
|
||||
}
|
||||
|
||||
if (bios->funcs->get_atom_dc_golden_table)
|
||||
bios->funcs->get_atom_dc_golden_table(bios);
|
||||
|
||||
/*
|
||||
* TODO check if GPIO programmed correctly
|
||||
*
|
||||
|
@ -3102,6 +3105,9 @@ void core_link_enable_stream(
|
|||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
enum dc_status status;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
|
||||
#endif
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
|
||||
|
@ -3136,8 +3142,8 @@ void core_link_enable_stream(
|
|||
pipe_ctx->stream->link->link_state_valid = true;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
|
||||
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
|
||||
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
|
||||
#endif
|
||||
|
||||
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
|
||||
|
@ -3276,7 +3282,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
|
||||
core_link_set_avmute(pipe_ctx, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -1133,6 +1133,44 @@ static inline enum link_training_result perform_link_training_int(
|
|||
return status;
|
||||
}
|
||||
|
||||
static enum link_training_result check_link_loss_status(
|
||||
struct dc_link *link,
|
||||
const struct link_training_settings *link_training_setting)
|
||||
{
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
union lane_status lane_status;
|
||||
uint8_t dpcd_buf[6] = {0};
|
||||
uint32_t lane;
|
||||
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT,
|
||||
(uint8_t *)(dpcd_buf),
|
||||
sizeof(dpcd_buf));
|
||||
|
||||
/*parse lane status*/
|
||||
for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
|
||||
/*
|
||||
* check lanes status
|
||||
*/
|
||||
lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane);
|
||||
|
||||
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
|
||||
!lane_status.bits.CR_DONE_0 ||
|
||||
!lane_status.bits.SYMBOL_LOCKED_0) {
|
||||
/* if one of the channel equalization, clock
|
||||
* recovery or symbol lock is dropped
|
||||
* consider it as (link has been
|
||||
* dropped) dp sink status has changed
|
||||
*/
|
||||
status = LINK_TRAINING_LINK_LOSS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void initialize_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting,
|
||||
|
@ -1372,6 +1410,9 @@ static void print_status_message(
|
|||
case LINK_TRAINING_LQA_FAIL:
|
||||
lt_result = "LQA failed";
|
||||
break;
|
||||
case LINK_TRAINING_LINK_LOSS:
|
||||
lt_result = "Link loss";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1531,6 +1572,14 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
status);
|
||||
}
|
||||
|
||||
/* delay 5ms after Main Link output idle pattern and then check
|
||||
* DPCD 0202h.
|
||||
*/
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
|
||||
msleep(5);
|
||||
status = check_link_loss_status(link, <_settings);
|
||||
}
|
||||
|
||||
/* 6. print status message*/
|
||||
print_status_message(link, <_settings, status);
|
||||
|
||||
|
@ -4290,22 +4339,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
|
|||
|
||||
void dpcd_set_source_specific_data(struct dc_link *link)
|
||||
{
|
||||
uint8_t dspc = 0;
|
||||
enum dc_status ret;
|
||||
|
||||
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
|
||||
sizeof(dspc));
|
||||
|
||||
if (ret != DC_OK) {
|
||||
DC_LOG_ERROR("Error in DP aux read transaction,"
|
||||
" not writing source specific data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return if OUI unsupported */
|
||||
if (!(dspc & DP_OUI_SUPPORT))
|
||||
return;
|
||||
|
||||
if (!link->dc->vendor_signature.is_valid) {
|
||||
struct dpcd_amd_signature amd_signature;
|
||||
amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
|
||||
|
|
|
@ -246,20 +246,18 @@ struct dc_stream_status *dc_stream_get_status(
|
|||
|
||||
#ifndef TRIM_FSFT
|
||||
/**
|
||||
* dc_optimize_timing() - dc to optimize timing
|
||||
* dc_optimize_timing_for_fsft() - dc to optimize timing
|
||||
*/
|
||||
bool dc_optimize_timing(
|
||||
struct dc_crtc_timing *timing,
|
||||
bool dc_optimize_timing_for_fsft(
|
||||
struct dc_stream_state *pStream,
|
||||
unsigned int max_input_rate_in_khz)
|
||||
{
|
||||
//optimization is expected to assing a value to these:
|
||||
//timing->pix_clk_100hz
|
||||
//timing->v_front_porch
|
||||
//timing->v_total
|
||||
//timing->fast_transport_output_rate_100hz;
|
||||
timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
|
||||
struct dc *dc;
|
||||
|
||||
return true;
|
||||
dc = pStream->ctx->dc;
|
||||
|
||||
return (dc->hwss.optimize_timing_for_fsft &&
|
||||
dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -133,6 +133,9 @@ struct dc_vbios_funcs {
|
|||
uint16_t (*pack_data_tables)(
|
||||
struct dc_bios *dcb,
|
||||
void *dst);
|
||||
|
||||
enum bp_result (*get_atom_dc_golden_table)(
|
||||
struct dc_bios *dcb);
|
||||
};
|
||||
|
||||
struct bios_registers {
|
||||
|
@ -154,6 +157,7 @@ struct dc_bios {
|
|||
struct dc_firmware_info fw_info;
|
||||
bool fw_info_valid;
|
||||
struct dc_vram_info vram_info;
|
||||
struct dc_golden_table golden_table;
|
||||
};
|
||||
|
||||
#endif /* DC_BIOS_TYPES_H */
|
||||
|
|
|
@ -424,8 +424,8 @@ struct dc_stream_status *dc_stream_get_status(
|
|||
struct dc_stream_state *dc_stream);
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
bool dc_optimize_timing(
|
||||
struct dc_crtc_timing *timing,
|
||||
bool dc_optimize_timing_for_fsft(
|
||||
struct dc_stream_state *pStream,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -890,6 +890,20 @@ struct dsc_dec_dpcd_caps {
|
|||
uint32_t branch_max_line_width;
|
||||
};
|
||||
|
||||
struct dc_golden_table {
|
||||
uint16_t dc_golden_table_ver;
|
||||
uint32_t aux_dphy_rx_control0_val;
|
||||
uint32_t aux_dphy_tx_control_val;
|
||||
uint32_t aux_dphy_rx_control1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_0_val;
|
||||
uint32_t dc_gpio_aux_ctrl_1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_2_val;
|
||||
uint32_t dc_gpio_aux_ctrl_3_val;
|
||||
uint32_t dc_gpio_aux_ctrl_4_val;
|
||||
uint32_t dc_gpio_aux_ctrl_5_val;
|
||||
};
|
||||
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
enum dc_gpu_mem_alloc_type {
|
||||
DC_MEM_ALLOC_TYPE_GART,
|
||||
|
|
|
@ -38,7 +38,8 @@
|
|||
|
||||
#define AUX_REG_LIST(id)\
|
||||
SRI(AUX_CONTROL, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
|
||||
|
||||
#define HPD_REG_LIST(id)\
|
||||
SRI(DC_HPD_CONTROL, HPD, id)
|
||||
|
@ -107,6 +108,7 @@
|
|||
struct dce110_link_enc_aux_registers {
|
||||
uint32_t AUX_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL0;
|
||||
uint32_t AUX_DPHY_RX_CONTROL1;
|
||||
};
|
||||
|
||||
struct dce110_link_enc_hpd_registers {
|
||||
|
|
|
@ -233,8 +233,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
|||
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
|
||||
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
|
||||
true : false;
|
||||
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
|
||||
copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
|
||||
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 0;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->dmub_srv);
|
||||
|
|
|
@ -390,6 +390,8 @@ void dcn10_log_hw_state(struct dc *dc,
|
|||
}
|
||||
DTN_INFO("\n");
|
||||
|
||||
// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
|
||||
// TODO: Update golden log header to reflect this name change
|
||||
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
|
||||
for (i = 0; i < pool->res_cap->num_dsc; i++) {
|
||||
struct display_stream_compressor *dsc = pool->dscs[i];
|
||||
|
@ -400,7 +402,7 @@ void dcn10_log_hw_state(struct dc *dc,
|
|||
dsc->inst,
|
||||
s.dsc_clock_en,
|
||||
s.dsc_slice_width,
|
||||
s.dsc_bytes_per_pixel);
|
||||
s.dsc_bits_per_pixel);
|
||||
DTN_INFO("\n");
|
||||
}
|
||||
DTN_INFO("\n");
|
||||
|
|
|
@ -31,10 +31,10 @@
|
|||
#define TO_DCN10_LINK_ENC(link_encoder)\
|
||||
container_of(link_encoder, struct dcn10_link_encoder, base)
|
||||
|
||||
|
||||
#define AUX_REG_LIST(id)\
|
||||
SRI(AUX_CONTROL, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
|
||||
|
||||
#define HPD_REG_LIST(id)\
|
||||
SRI(DC_HPD_CONTROL, HPD, id)
|
||||
|
@ -73,6 +73,7 @@ struct dcn10_link_enc_aux_registers {
|
|||
uint32_t AUX_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL0;
|
||||
uint32_t AUX_DPHY_TX_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL1;
|
||||
};
|
||||
|
||||
struct dcn10_link_enc_hpd_registers {
|
||||
|
@ -443,7 +444,10 @@ struct dcn10_link_enc_registers {
|
|||
type AUX_TX_PRECHARGE_LEN; \
|
||||
type AUX_TX_PRECHARGE_SYMBOLS; \
|
||||
type AUX_MODE_DET_CHECK_DELAY;\
|
||||
type DPCS_DBG_CBUS_DIS
|
||||
type DPCS_DBG_CBUS_DIS;\
|
||||
type AUX_RX_PRECHARGE_SKIP;\
|
||||
type AUX_RX_TIMEOUT_LEN;\
|
||||
type AUX_RX_TIMEOUT_LEN_MUL
|
||||
|
||||
struct dcn10_link_enc_shift {
|
||||
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
|
||||
|
|
|
@ -156,7 +156,7 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
|
|||
|
||||
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
|
||||
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
|
||||
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
|
||||
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
|
||||
REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
|
||||
REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
|
||||
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
|
||||
|
|
|
@ -2498,3 +2498,30 @@ void dcn20_fpga_init_hw(struct dc *dc)
|
|||
tg->funcs->tg_init(tg);
|
||||
}
|
||||
}
|
||||
#ifndef TRIM_FSFT
|
||||
bool dcn20_optimize_timing_for_fsft(struct dc *dc,
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz)
|
||||
{
|
||||
unsigned int old_v_front_porch;
|
||||
unsigned int old_v_total;
|
||||
unsigned int max_input_rate_in_100hz;
|
||||
unsigned long long new_v_total;
|
||||
|
||||
max_input_rate_in_100hz = max_input_rate_in_khz * 10;
|
||||
if (max_input_rate_in_100hz < timing->pix_clk_100hz)
|
||||
return false;
|
||||
|
||||
old_v_total = timing->v_total;
|
||||
old_v_front_porch = timing->v_front_porch;
|
||||
|
||||
timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
|
||||
timing->pix_clk_100hz = max_input_rate_in_100hz;
|
||||
|
||||
new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
|
||||
|
||||
timing->v_total = new_v_total;
|
||||
timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -132,5 +132,10 @@ int dcn20_init_sys_ctx(struct dce_hwseq *hws,
|
|||
struct dc *dc,
|
||||
struct dc_phy_addr_space_config *pa_config);
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
bool dcn20_optimize_timing_for_fsft(struct dc *dc,
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
#endif /* __DC_HWSS_DCN20_H__ */
|
||||
|
||||
|
|
|
@ -88,6 +88,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
|||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
#ifndef TRIM_FSFT
|
||||
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn20_private_funcs = {
|
||||
|
|
|
@ -309,7 +309,6 @@ bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
|
|||
void enc2_hw_init(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
|
||||
/*
|
||||
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
|
||||
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
|
||||
|
@ -333,9 +332,18 @@ void enc2_hw_init(struct link_encoder *enc)
|
|||
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
|
||||
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
|
||||
*/
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
|
||||
} else {
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
|
||||
|
||||
}
|
||||
|
||||
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
|
||||
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
|
||||
|
|
|
@ -191,7 +191,10 @@
|
|||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh)
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
|
||||
|
||||
#define UNIPHY_DCN2_REG_LIST(id) \
|
||||
SRI(CLOCK_ENABLE, SYMCLK, id), \
|
||||
|
|
|
@ -2223,7 +2223,7 @@ int dcn20_populate_dml_pipes_from_context(
|
|||
if (!res_ctx->pipe_ctx[i].plane_state) {
|
||||
pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
|
||||
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
|
||||
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
|
||||
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s;
|
||||
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
|
||||
pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
|
||||
if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
|
||||
|
@ -2235,7 +2235,7 @@ int dcn20_populate_dml_pipes_from_context(
|
|||
pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
|
||||
pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
|
||||
pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
|
||||
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
|
||||
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
|
||||
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
|
||||
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
|
||||
pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
|
||||
|
@ -3069,8 +3069,7 @@ void dcn20_calculate_dlg_params(
|
|||
int pipe_cnt,
|
||||
int vlevel)
|
||||
{
|
||||
int i, j, pipe_idx, pipe_idx_unsplit;
|
||||
bool visited[MAX_PIPES] = { 0 };
|
||||
int i, pipe_idx;
|
||||
|
||||
/* Writeback MCIF_WB arbitration parameters */
|
||||
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
|
||||
|
@ -3089,55 +3088,17 @@ void dcn20_calculate_dlg_params(
|
|||
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
|
||||
|
||||
/*
|
||||
* An artifact of dml pipe split/odm is that pipes get merged back together for
|
||||
* calculation. Therefore we need to only extract for first pipe in ascending index order
|
||||
* and copy into the other split half.
|
||||
*/
|
||||
for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
if (!visited[pipe_idx]) {
|
||||
display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
|
||||
display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
|
||||
|
||||
dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
|
||||
dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
|
||||
dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
|
||||
dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
|
||||
/*
|
||||
* j iterates inside pipes array, unlike i which iterates inside
|
||||
* pipe_ctx array
|
||||
*/
|
||||
if (src->is_hsplit)
|
||||
for (j = pipe_idx + 1; j < pipe_cnt; j++) {
|
||||
display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
|
||||
display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
|
||||
|
||||
if (src_j->is_hsplit && !visited[j]
|
||||
&& src->hsplit_grp == src_j->hsplit_grp) {
|
||||
dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
|
||||
dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
|
||||
dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
|
||||
dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
|
||||
visited[j] = true;
|
||||
}
|
||||
}
|
||||
visited[pipe_idx] = true;
|
||||
pipe_idx_unsplit++;
|
||||
}
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
ASSERT(visited[pipe_idx]);
|
||||
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
|
||||
pipe_idx++;
|
||||
}
|
||||
|
|
|
@ -92,6 +92,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
|||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
#ifndef TRIM_FSFT
|
||||
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn21_private_funcs = {
|
||||
|
|
|
@ -62,7 +62,7 @@ static const struct link_encoder_funcs dcn30_link_enc_funcs = {
|
|||
.read_state = link_enc2_read_state,
|
||||
.validate_output_with_stream =
|
||||
dcn30_link_encoder_validate_output_with_stream,
|
||||
.hw_init = enc2_hw_init,
|
||||
.hw_init = enc3_hw_init,
|
||||
.setup = dcn10_link_encoder_setup,
|
||||
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
|
||||
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
|
||||
|
@ -203,3 +203,54 @@ void dcn30_link_encoder_construct(
|
|||
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#define AUX_REG(reg)\
|
||||
(enc10->aux_regs->reg)
|
||||
|
||||
#define AUX_REG_READ(reg_name) \
|
||||
dm_read_reg(CTX, AUX_REG(reg_name))
|
||||
|
||||
#define AUX_REG_WRITE(reg_name, val) \
|
||||
dm_write_reg(CTX, AUX_REG(reg_name), val)
|
||||
void enc3_hw_init(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
|
||||
/*
|
||||
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
|
||||
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
|
||||
02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
|
||||
03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
|
||||
04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
|
||||
05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
|
||||
06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
|
||||
07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
|
||||
*/
|
||||
|
||||
/*
|
||||
AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
|
||||
AUX_RX_START_WINDOW = 1 [6:4]
|
||||
AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
|
||||
AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
|
||||
AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
|
||||
AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
|
||||
AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
|
||||
AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
|
||||
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
|
||||
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
|
||||
*/
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
|
||||
|
||||
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
|
||||
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
|
||||
// 27MHz -> 0xd
|
||||
// 100MHz -> 0x32
|
||||
// 48MHz -> 0x18
|
||||
|
||||
// Set TMDS_CTL0 to 1. This is a legacy setting.
|
||||
REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
|
||||
|
||||
dcn10_aux_initialize(enc10);
|
||||
}
|
||||
|
|
|
@ -73,4 +73,6 @@ void dcn30_link_encoder_construct(
|
|||
const struct dcn10_link_enc_shift *link_shift,
|
||||
const struct dcn10_link_enc_mask *link_mask);
|
||||
|
||||
void enc3_hw_init(struct link_encoder *enc);
|
||||
|
||||
#endif /* __DC_LINK_ENCODER__DCN30_H__ */
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "dce110/dce110_hw_sequencer.h"
|
||||
#include "dcn10/dcn10_hw_sequencer.h"
|
||||
#include "dcn20/dcn20_hwseq.h"
|
||||
#include "dcn21/dcn21_hwseq.h"
|
||||
#include "dcn30_hwseq.h"
|
||||
|
||||
static const struct hw_sequencer_funcs dcn30_funcs = {
|
||||
|
@ -87,8 +88,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
|
|||
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn30_private_funcs = {
|
||||
|
|
|
@ -154,23 +154,11 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimeP
|
|||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
|
||||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
|
||||
|
||||
dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
|
||||
dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
|
||||
dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
|
||||
dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
|
||||
|
||||
unsigned int get_vstartup_calculated(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
unsigned int num_pipes,
|
||||
unsigned int which_pipe)
|
||||
{
|
||||
unsigned int which_plane;
|
||||
|
||||
recalculate_params(mode_lib, pipes, num_pipes);
|
||||
which_plane = mode_lib->vba.pipe_plane[which_pipe];
|
||||
return mode_lib->vba.VStartup[which_plane];
|
||||
}
|
||||
|
||||
double get_total_immediate_flip_bytes(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
|
@ -479,7 +467,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
|||
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
|
||||
1;
|
||||
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
|
||||
mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
|
||||
mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;;
|
||||
mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
|
||||
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
|
||||
dout->dsc_slices;
|
||||
mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
|
||||
|
|
|
@ -98,16 +98,11 @@ dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_c_in_us);
|
|||
dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us);
|
||||
dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us);
|
||||
|
||||
dml_get_pipe_attr_decl(vstartup);
|
||||
dml_get_pipe_attr_decl(vupdate_offset);
|
||||
dml_get_pipe_attr_decl(vupdate_width);
|
||||
dml_get_pipe_attr_decl(vready_offset);
|
||||
|
||||
unsigned int get_vstartup_calculated(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
unsigned int num_pipes,
|
||||
unsigned int which_pipe);
|
||||
|
||||
double get_total_immediate_flip_bytes(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
|
|
|
@ -71,8 +71,9 @@ enum dentist_divider_range {
|
|||
|
||||
#define CTX \
|
||||
clk_mgr->base.ctx
|
||||
|
||||
#define DC_LOGGER \
|
||||
clk_mgr->ctx->logger
|
||||
clk_mgr->base.ctx->logger
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ struct dsc_optc_config {
|
|||
struct dcn_dsc_state {
|
||||
uint32_t dsc_clock_en;
|
||||
uint32_t dsc_slice_width;
|
||||
uint32_t dsc_bytes_per_pixel;
|
||||
uint32_t dsc_bits_per_pixel;
|
||||
uint32_t dsc_slice_height;
|
||||
uint32_t dsc_pic_width;
|
||||
uint32_t dsc_pic_height;
|
||||
|
|
|
@ -116,6 +116,11 @@ struct hw_sequencer_funcs {
|
|||
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
|
||||
int num_pipes,
|
||||
const struct dc_static_screen_params *events);
|
||||
#ifndef TRIM_FSFT
|
||||
bool (*optimize_timing_for_fsft)(struct dc *dc,
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
|
||||
/* Stream Related */
|
||||
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
|
||||
|
|
|
@ -66,6 +66,8 @@ enum link_training_result {
|
|||
/* other failure during EQ step */
|
||||
LINK_TRAINING_EQ_FAIL_EQ,
|
||||
LINK_TRAINING_LQA_FAIL,
|
||||
/* one of the CR,EQ or symbol lock is dropped */
|
||||
LINK_TRAINING_LINK_LOSS,
|
||||
};
|
||||
|
||||
struct link_training_settings {
|
||||
|
|
|
@ -829,10 +829,13 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
|
|||
switch (packet_type) {
|
||||
case PACKET_TYPE_FS_V3:
|
||||
#ifndef TRIM_FSFT
|
||||
// always populate with pixel rate.
|
||||
build_vrr_infopacket_v3(
|
||||
stream->signal, vrr,
|
||||
stream->timing.flags.FAST_TRANSPORT,
|
||||
stream->timing.fast_transport_output_rate_100hz,
|
||||
(stream->timing.flags.FAST_TRANSPORT) ?
|
||||
stream->timing.fast_transport_output_rate_100hz :
|
||||
stream->timing.pix_clk_100hz,
|
||||
app_tf, infopacket);
|
||||
#else
|
||||
build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
|
||||
|
|
|
@ -941,7 +941,6 @@ struct atom_display_controller_info_v4_1
|
|||
uint8_t reserved3[8];
|
||||
};
|
||||
|
||||
|
||||
struct atom_display_controller_info_v4_2
|
||||
{
|
||||
struct atom_common_table_header table_header;
|
||||
|
@ -976,6 +975,59 @@ struct atom_display_controller_info_v4_2
|
|||
uint8_t reserved3[8];
|
||||
};
|
||||
|
||||
struct atom_display_controller_info_v4_4 {
|
||||
struct atom_common_table_header table_header;
|
||||
uint32_t display_caps;
|
||||
uint32_t bootup_dispclk_10khz;
|
||||
uint16_t dce_refclk_10khz;
|
||||
uint16_t i2c_engine_refclk_10khz;
|
||||
uint16_t dvi_ss_percentage; // in unit of 0.001%
|
||||
uint16_t dvi_ss_rate_10hz;
|
||||
uint16_t hdmi_ss_percentage; // in unit of 0.001%
|
||||
uint16_t hdmi_ss_rate_10hz;
|
||||
uint16_t dp_ss_percentage; // in unit of 0.001%
|
||||
uint16_t dp_ss_rate_10hz;
|
||||
uint8_t dvi_ss_mode; // enum of atom_spread_spectrum_mode
|
||||
uint8_t hdmi_ss_mode; // enum of atom_spread_spectrum_mode
|
||||
uint8_t dp_ss_mode; // enum of atom_spread_spectrum_mode
|
||||
uint8_t ss_reserved;
|
||||
uint8_t dfp_hardcode_mode_num; // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
|
||||
uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
|
||||
uint8_t vga_hardcode_mode_num; // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
|
||||
uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
|
||||
uint16_t dpphy_refclk_10khz;
|
||||
uint16_t hw_chip_id;
|
||||
uint8_t dcnip_min_ver;
|
||||
uint8_t dcnip_max_ver;
|
||||
uint8_t max_disp_pipe_num;
|
||||
uint8_t max_vbios_active_disp_pipum;
|
||||
uint8_t max_ppll_num;
|
||||
uint8_t max_disp_phy_num;
|
||||
uint8_t max_aux_pairs;
|
||||
uint8_t remotedisplayconfig;
|
||||
uint32_t dispclk_pll_vco_freq;
|
||||
uint32_t dp_ref_clk_freq;
|
||||
uint32_t max_mclk_chg_lat; // Worst case blackout duration for a memory clock frequency (p-state) change, units of 100s of ns (0.1 us)
|
||||
uint32_t max_sr_exit_lat; // Worst case memory self refresh exit time, units of 100ns of ns (0.1us)
|
||||
uint32_t max_sr_enter_exit_lat; // Worst case memory self refresh entry followed by immediate exit time, units of 100ns of ns (0.1us)
|
||||
uint16_t dc_golden_table_offset; // point of struct of atom_dc_golden_table_vxx
|
||||
uint16_t dc_golden_table_ver;
|
||||
uint32_t reserved3[3];
|
||||
};
|
||||
|
||||
struct atom_dc_golden_table_v1
|
||||
{
|
||||
uint32_t aux_dphy_rx_control0_val;
|
||||
uint32_t aux_dphy_tx_control_val;
|
||||
uint32_t aux_dphy_rx_control1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_0_val;
|
||||
uint32_t dc_gpio_aux_ctrl_1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_2_val;
|
||||
uint32_t dc_gpio_aux_ctrl_3_val;
|
||||
uint32_t dc_gpio_aux_ctrl_4_val;
|
||||
uint32_t dc_gpio_aux_ctrl_5_val;
|
||||
uint32_t reserved[23];
|
||||
};
|
||||
|
||||
enum dce_info_caps_def
|
||||
{
|
||||
|
|
|
@ -133,6 +133,78 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->ppt_funcs->dpm_set_vcn_enable)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&power_gate->vcn_gated) ^ enable)
|
||||
return 0;
|
||||
|
||||
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
|
||||
if (!ret)
|
||||
atomic_set(&power_gate->vcn_gated, !enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&power_gate->vcn_gate_lock);
|
||||
|
||||
ret = smu_dpm_set_vcn_enable_locked(smu, enable);
|
||||
|
||||
mutex_unlock(&power_gate->vcn_gate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->ppt_funcs->dpm_set_jpeg_enable)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&power_gate->jpeg_gated) ^ enable)
|
||||
return 0;
|
||||
|
||||
ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
|
||||
if (!ret)
|
||||
atomic_set(&power_gate->jpeg_gated, !enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&power_gate->jpeg_gate_lock);
|
||||
|
||||
ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
|
||||
|
||||
mutex_unlock(&power_gate->jpeg_gate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
|
||||
*
|
||||
|
@ -353,6 +425,45 @@ static int smu_early_init(void *handle)
|
|||
return smu_set_funcs(adev);
|
||||
}
|
||||
|
||||
static int smu_set_default_dpm_table(struct smu_context *smu)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int vcn_gate, jpeg_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->ppt_funcs->set_default_dpm_table)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&power_gate->vcn_gate_lock);
|
||||
mutex_lock(&power_gate->jpeg_gate_lock);
|
||||
|
||||
vcn_gate = atomic_read(&power_gate->vcn_gated);
|
||||
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
|
||||
|
||||
ret = smu_dpm_set_vcn_enable_locked(smu, true);
|
||||
if (ret)
|
||||
goto err0_out;
|
||||
|
||||
ret = smu_dpm_set_jpeg_enable_locked(smu, true);
|
||||
if (ret)
|
||||
goto err1_out;
|
||||
|
||||
ret = smu->ppt_funcs->set_default_dpm_table(smu);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev,
|
||||
"Failed to setup default dpm clock tables!\n");
|
||||
|
||||
smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
|
||||
err1_out:
|
||||
smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
|
||||
err0_out:
|
||||
mutex_unlock(&power_gate->jpeg_gate_lock);
|
||||
mutex_unlock(&power_gate->vcn_gate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -579,6 +690,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -586,6 +701,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
|
|||
{
|
||||
int ret;
|
||||
|
||||
smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
|
||||
|
||||
ret = smu_free_memory_pool(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -643,6 +760,11 @@ static int smu_sw_init(void *handle)
|
|||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
|
||||
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
|
||||
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
|
||||
mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
|
||||
mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
|
||||
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
|
||||
|
@ -734,7 +856,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
|||
uint32_t pcie_gen = 0, pcie_width = 0;
|
||||
int ret;
|
||||
|
||||
if (smu_is_dpm_running(smu) && adev->in_suspend) {
|
||||
if (adev->in_suspend && smu_is_dpm_running(smu)) {
|
||||
dev_info(adev->dev, "dpm has been enabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -844,10 +966,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_disable_umc_cdr_12gbps_workaround(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
|
||||
|
@ -1046,8 +1164,6 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
|
|||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
smu_i2c_fini(smu, &adev->pm.smu_i2c);
|
||||
|
||||
cancel_work_sync(&smu->throttling_logging_work);
|
||||
|
||||
ret = smu_disable_thermal_alert(smu);
|
||||
|
@ -1590,6 +1706,9 @@ int smu_set_mp1_state(struct smu_context *smu,
|
|||
}
|
||||
|
||||
ret = smu_send_smc_msg(smu, msg, NULL);
|
||||
/* some asics may not support those messages */
|
||||
if (ret == -EINVAL)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
|
||||
|
||||
|
@ -1944,6 +2063,10 @@ int smu_read_sensor(struct smu_context *smu,
|
|||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->read_sensor)
|
||||
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
|
||||
goto unlock;
|
||||
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
|
||||
*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
|
||||
|
@ -1966,7 +2089,7 @@ int smu_read_sensor(struct smu_context *smu,
|
|||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
|
||||
*(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
|
||||
*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
|
||||
|
@ -1974,11 +2097,12 @@ int smu_read_sensor(struct smu_context *smu,
|
|||
*size = 4;
|
||||
break;
|
||||
default:
|
||||
if (smu->ppt_funcs->read_sensor)
|
||||
ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
|
||||
*size = 0;
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1849,8 +1849,6 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
|
|||
|
||||
static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -1861,7 +1859,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
|
||||
|
@ -1870,7 +1867,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2080,22 +2076,11 @@ static const struct i2c_algorithm arcturus_i2c_algo = {
|
|||
.functionality = arcturus_i2c_func,
|
||||
};
|
||||
|
||||
static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (arcturus_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
|
@ -2111,9 +2096,6 @@ static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter
|
|||
|
||||
static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!arcturus_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
|
|
@ -292,8 +292,10 @@ struct smu_dpm_context {
|
|||
struct smu_power_gate {
|
||||
bool uvd_gated;
|
||||
bool vce_gated;
|
||||
bool vcn_gated;
|
||||
bool jpeg_gated;
|
||||
atomic_t vcn_gated;
|
||||
atomic_t jpeg_gated;
|
||||
struct mutex vcn_gate_lock;
|
||||
struct mutex jpeg_gate_lock;
|
||||
};
|
||||
|
||||
struct smu_power_context {
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
// *** IMPORTANT ***
|
||||
// SMU TEAM: Always increment the interface version if
|
||||
// any structure is changed in this file
|
||||
#define SMU11_DRIVER_IF_VERSION 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION 0x34
|
||||
|
||||
#define PPTABLE_Sienna_Cichlid_SMU_VERSION 5
|
||||
|
||||
|
@ -968,9 +968,15 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
uint32_t CurrClock[PPCLK_COUNT];
|
||||
uint16_t AverageGfxclkFrequency;
|
||||
uint16_t AverageFclkFrequency;
|
||||
uint16_t AverageUclkFrequency ;
|
||||
|
||||
uint16_t AverageGfxclkFrequencyPreDs;
|
||||
uint16_t AverageGfxclkFrequencyPostDs;
|
||||
uint16_t AverageFclkFrequencyPreDs;
|
||||
uint16_t AverageFclkFrequencyPostDs;
|
||||
uint16_t AverageUclkFrequencyPreDs ;
|
||||
uint16_t AverageUclkFrequencyPostDs ;
|
||||
|
||||
|
||||
uint16_t AverageGfxActivity ;
|
||||
uint16_t AverageUclkActivity ;
|
||||
uint8_t CurrSocVoltageOffset ;
|
||||
|
@ -988,6 +994,7 @@ typedef struct {
|
|||
uint16_t TemperatureLiquid0 ;
|
||||
uint16_t TemperatureLiquid1 ;
|
||||
uint16_t TemperaturePlx ;
|
||||
uint16_t Padding16 ;
|
||||
uint32_t ThrottlerStatus ;
|
||||
|
||||
uint8_t LinkDpmLevel;
|
||||
|
@ -1006,8 +1013,10 @@ typedef struct {
|
|||
uint16_t AverageDclk0Frequency ;
|
||||
uint16_t AverageVclk1Frequency ;
|
||||
uint16_t AverageDclk1Frequency ;
|
||||
uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
|
||||
uint16_t padding16_2;
|
||||
uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
|
||||
uint8_t PcieRate ;
|
||||
uint8_t PcieWidth ;
|
||||
|
||||
} SmuMetrics_t;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
|
||||
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
|
||||
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
|
||||
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34
|
||||
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
|
||||
|
||||
/* MP Apertures */
|
||||
#define MP0_Public 0x03800000
|
||||
|
|
|
@ -785,8 +785,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -796,14 +794,12 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -811,8 +807,6 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
|
||||
static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -821,14 +815,12 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2457,22 +2449,11 @@ static const struct i2c_algorithm navi10_i2c_algo = {
|
|||
.functionality = navi10_i2c_func,
|
||||
};
|
||||
|
||||
static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (navi10_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
|
@ -2488,9 +2469,6 @@ static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *
|
|||
|
||||
static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!navi10_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
|
|
@ -459,8 +459,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
|
|||
|
||||
static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -470,14 +468,12 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -485,8 +481,6 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
|
||||
static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -495,14 +489,12 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -70,14 +70,16 @@
|
|||
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
|
||||
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
|
||||
|
||||
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
|
||||
|
||||
static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
|
||||
MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 1),
|
||||
MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 1),
|
||||
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
|
||||
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
|
||||
MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
|
||||
MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
|
||||
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
|
||||
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
|
||||
MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
|
||||
MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
|
||||
MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
|
||||
|
@ -85,42 +87,43 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
|
|||
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
|
||||
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
|
||||
MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
|
||||
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
|
||||
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
|
||||
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 1),
|
||||
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 1),
|
||||
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
|
||||
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
|
||||
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 1),
|
||||
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 1),
|
||||
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
|
||||
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
|
||||
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
|
||||
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
|
||||
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
|
||||
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
|
||||
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
|
||||
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
|
||||
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
|
||||
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
|
||||
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
|
||||
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
|
||||
MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
|
||||
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 1),
|
||||
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
|
||||
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
|
||||
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
|
||||
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
|
||||
MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 1),
|
||||
MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 1),
|
||||
MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 1),
|
||||
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 1),
|
||||
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 1),
|
||||
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 1),
|
||||
MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 1),
|
||||
MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1),
|
||||
MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0),
|
||||
MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0),
|
||||
MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0),
|
||||
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
|
||||
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
|
||||
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
|
||||
MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0),
|
||||
MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0),
|
||||
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
|
||||
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
|
||||
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
|
||||
MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
|
||||
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 1),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
|
||||
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 1),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 1),
|
||||
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
|
||||
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
|
||||
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
|
||||
|
@ -442,13 +445,16 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
|
|||
*value = metrics->CurrClock[PPCLK_DCEFCLK];
|
||||
break;
|
||||
case METRICS_AVERAGE_GFXCLK:
|
||||
*value = metrics->AverageGfxclkFrequency;
|
||||
if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
|
||||
*value = metrics->AverageGfxclkFrequencyPostDs;
|
||||
else
|
||||
*value = metrics->AverageGfxclkFrequencyPreDs;
|
||||
break;
|
||||
case METRICS_AVERAGE_FCLK:
|
||||
*value = metrics->AverageFclkFrequency;
|
||||
*value = metrics->AverageFclkFrequencyPostDs;
|
||||
break;
|
||||
case METRICS_AVERAGE_UCLK:
|
||||
*value = metrics->AverageUclkFrequency;
|
||||
*value = metrics->AverageUclkFrequencyPostDs;
|
||||
break;
|
||||
case METRICS_AVERAGE_GFXACTIVITY:
|
||||
*value = metrics->AverageGfxActivity;
|
||||
|
@ -760,10 +766,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -779,7 +782,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
|
||||
|
@ -792,7 +794,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -800,8 +801,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
|
|||
|
||||
static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
|
@ -810,14 +809,12 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2624,22 +2621,11 @@ static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
|
|||
.functionality = sienna_cichlid_i2c_func,
|
||||
};
|
||||
|
||||
static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (sienna_cichlid_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
|
@ -2655,9 +2641,6 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_a
|
|||
|
||||
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!sienna_cichlid_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
|||
|
||||
switch (type) {
|
||||
case CMN2ASIC_MAPPING_MSG:
|
||||
if (index > SMU_MSG_MAX_COUNT ||
|
||||
if (index >= SMU_MSG_MAX_COUNT ||
|
||||
!smu->message_map)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -181,7 +181,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
|||
return msg_mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_CLK:
|
||||
if (index > SMU_CLK_COUNT ||
|
||||
if (index >= SMU_CLK_COUNT ||
|
||||
!smu->clock_map)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -192,7 +192,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
|||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_FEATURE:
|
||||
if (index > SMU_FEATURE_COUNT ||
|
||||
if (index >= SMU_FEATURE_COUNT ||
|
||||
!smu->feature_map)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -203,7 +203,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
|||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_TABLE:
|
||||
if (index > SMU_TABLE_COUNT ||
|
||||
if (index >= SMU_TABLE_COUNT ||
|
||||
!smu->table_map)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -214,7 +214,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
|||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_PWR:
|
||||
if (index > SMU_POWER_SOURCE_COUNT ||
|
||||
if (index >= SMU_POWER_SOURCE_COUNT ||
|
||||
!smu->pwr_src_map)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -60,7 +60,6 @@
|
|||
#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
|
||||
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
|
||||
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
|
||||
#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu)
|
||||
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
|
||||
#define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu)
|
||||
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)
|
||||
|
@ -77,8 +76,6 @@
|
|||
#define smu_get_dal_power_level(smu, clocks) smu_ppt_funcs(get_dal_power_level, 0, smu, clocks)
|
||||
#define smu_get_perf_level(smu, designation, level) smu_ppt_funcs(get_perf_level, 0, smu, designation, level)
|
||||
#define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
|
||||
#define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
|
||||
#define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
|
||||
#define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
|
||||
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
|
||||
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
|
||||
|
|
|
@ -1029,6 +1029,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
|
|||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
|
||||
return 0;
|
||||
if (enable)
|
||||
|
|
|
@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return ci_is_smc_ram_running(hwmgr);
|
||||
return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
|
||||
CGS_IND_REG__SMC, FEATURE_STATUS,
|
||||
VOLTAGE_CONTROLLER_ON))
|
||||
? true : false;
|
||||
}
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -4308,11 +4308,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
|||
{
|
||||
int ret;
|
||||
|
||||
port = drm_dp_mst_topology_get_port_validated(mgr, port);
|
||||
if (!port)
|
||||
if (slots < 0)
|
||||
return false;
|
||||
|
||||
if (slots < 0)
|
||||
port = drm_dp_mst_topology_get_port_validated(mgr, port);
|
||||
if (!port)
|
||||
return false;
|
||||
|
||||
if (port->vcpi.vcpi > 0) {
|
||||
|
@ -4328,6 +4328,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
|||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
|
||||
DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
|
||||
drm_dp_mst_topology_put_port(port);
|
||||
goto out;
|
||||
}
|
||||
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
|
||||
|
|
|
@ -815,8 +815,7 @@ static void drm_dev_release(struct kref *ref)
|
|||
|
||||
drm_managed_release(dev);
|
||||
|
||||
if (dev->managed.final_kfree)
|
||||
kfree(dev->managed.final_kfree);
|
||||
kfree(dev->managed.final_kfree);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -879,6 +879,9 @@ err:
|
|||
* @file_priv: drm file-private structure
|
||||
*
|
||||
* Open an object using the global name, returning a handle and the size.
|
||||
*
|
||||
* This handle (of course) holds a reference to the object, so the object
|
||||
* will not go away until the handle is deleted.
|
||||
*/
|
||||
int
|
||||
drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Asus T103HAF */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
|
|
|
@ -4915,6 +4915,7 @@ static int dispc_runtime_resume(struct device *dev)
|
|||
static const struct dev_pm_ops dispc_pm_ops = {
|
||||
.runtime_suspend = dispc_runtime_suspend,
|
||||
.runtime_resume = dispc_runtime_resume,
|
||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
struct platform_driver omap_dispchw_driver = {
|
||||
|
|
|
@ -5467,6 +5467,7 @@ static int dsi_runtime_resume(struct device *dev)
|
|||
static const struct dev_pm_ops dsi_pm_ops = {
|
||||
.runtime_suspend = dsi_runtime_suspend,
|
||||
.runtime_resume = dsi_runtime_resume,
|
||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
struct platform_driver omap_dsihw_driver = {
|
||||
|
|
|
@ -1614,6 +1614,7 @@ static int dss_runtime_resume(struct device *dev)
|
|||
static const struct dev_pm_ops dss_pm_ops = {
|
||||
.runtime_suspend = dss_runtime_suspend,
|
||||
.runtime_resume = dss_runtime_resume,
|
||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
struct platform_driver omap_dsshw_driver = {
|
||||
|
|
|
@ -903,6 +903,7 @@ static int venc_runtime_resume(struct device *dev)
|
|||
static const struct dev_pm_ops venc_pm_ops = {
|
||||
.runtime_suspend = venc_runtime_suspend,
|
||||
.runtime_resume = venc_runtime_resume,
|
||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
};
|
||||
|
||||
static const struct of_device_id venc_of_match[] = {
|
||||
|
|
|
@ -89,7 +89,7 @@ static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *conn
|
|||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
struct drm_display_mode new_mode = { { 0 } };
|
||||
struct drm_display_mode new_mode = {};
|
||||
enum drm_mode_status status;
|
||||
|
||||
status = omap_connector_mode_fixup(omap_connector->output, mode,
|
||||
|
|
|
@ -154,7 +154,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
|
|||
break;
|
||||
case DISPC_VP_DPI:
|
||||
enc_type = DRM_MODE_ENCODER_DPI;
|
||||
conn_type = DRM_MODE_CONNECTOR_LVDS;
|
||||
conn_type = DRM_MODE_CONNECTOR_DPI;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
|
|
@ -287,11 +287,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
*/
|
||||
|
||||
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
|
||||
bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
|
||||
|
||||
ret = ttm_tt_create(bo, zero);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
if (bo->ttm == NULL) {
|
||||
bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
|
||||
ret = ttm_tt_create(bo, zero);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
|
||||
if (ret)
|
||||
|
@ -652,8 +653,13 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
|||
placement.num_busy_placement = 0;
|
||||
bdev->driver->evict_flags(bo, &placement);
|
||||
|
||||
if (!placement.num_placement && !placement.num_busy_placement)
|
||||
return ttm_bo_pipeline_gutting(bo);
|
||||
if (!placement.num_placement && !placement.num_busy_placement) {
|
||||
ret = ttm_bo_pipeline_gutting(bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ttm_tt_create(bo, false);
|
||||
}
|
||||
|
||||
evict_mem = bo->mem;
|
||||
evict_mem.mm_node = NULL;
|
||||
|
@ -1192,8 +1198,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
/*
|
||||
* Remove the backing store if no placement is given.
|
||||
*/
|
||||
if (!placement->num_placement && !placement->num_busy_placement)
|
||||
return ttm_bo_pipeline_gutting(bo);
|
||||
if (!placement->num_placement && !placement->num_busy_placement) {
|
||||
ret = ttm_bo_pipeline_gutting(bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ttm_tt_create(bo, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether we need to move buffer.
|
||||
|
@ -1210,6 +1221,14 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
ttm_flag_masked(&bo->mem.placement, new_flags,
|
||||
~TTM_PL_MASK_MEMTYPE);
|
||||
}
|
||||
/*
|
||||
* We might need to add a TTM.
|
||||
*/
|
||||
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
|
||||
ret = ttm_tt_create(bo, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_validate);
|
||||
|
|
|
@ -531,15 +531,12 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
|||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
struct ttm_tt *ttm;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
pgprot_t prot;
|
||||
int ret;
|
||||
|
||||
ret = ttm_tt_create(bo, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
BUG_ON(!ttm);
|
||||
|
||||
ttm = bo->ttm;
|
||||
ret = ttm_tt_populate(ttm, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -351,11 +351,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
|||
|
||||
};
|
||||
|
||||
if (ttm_tt_create(bo, true)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
|
||||
ttm = bo->ttm;
|
||||
if (ttm_tt_populate(bo->ttm, &ctx)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
|
@ -510,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
|
|||
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
{
|
||||
unsigned long offset = (addr) - vma->vm_start;
|
||||
struct ttm_buffer_object *bo = vma->vm_private_data;
|
||||
unsigned long offset = (addr) - vma->vm_start +
|
||||
((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
|
||||
<< PAGE_SHIFT);
|
||||
int ret;
|
||||
|
||||
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
|
||||
|
|
|
@ -50,9 +50,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
if (bo->ttm)
|
||||
return 0;
|
||||
|
||||
if (bdev->need_dma32)
|
||||
page_flags |= TTM_PAGE_FLAG_DMA32;
|
||||
|
||||
|
@ -70,6 +67,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||
page_flags |= TTM_PAGE_FLAG_SG;
|
||||
break;
|
||||
default:
|
||||
bo->ttm = NULL;
|
||||
pr_err("Illegal buffer object type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -3037,7 +3037,7 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
|
|||
res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
|
||||
cmd->body.soid);
|
||||
if (IS_ERR(res)) {
|
||||
DRM_ERROR("Cound not find streamoutput to bind.\n");
|
||||
DRM_ERROR("Could not find streamoutput to bind.\n");
|
||||
return PTR_ERR(res);
|
||||
}
|
||||
|
||||
|
|
|
@ -186,7 +186,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
|||
/* TODO handle none page aligned offsets */
|
||||
/* TODO handle more dst & src != 0 */
|
||||
/* TODO handle more then one copy */
|
||||
DRM_ERROR("Cant snoop dma request for cursor!\n");
|
||||
DRM_ERROR("Can't snoop dma request for cursor!\n");
|
||||
DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
|
||||
box->srcx, box->srcy, box->srcz,
|
||||
box->x, box->y, box->z,
|
||||
|
@ -2575,7 +2575,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
|||
++i;
|
||||
}
|
||||
|
||||
if (i != unit) {
|
||||
if (&con->head == &dev_priv->dev->mode_config.connector_list) {
|
||||
DRM_ERROR("Could not find initial display unit.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
|
@ -2599,13 +2599,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
|||
break;
|
||||
}
|
||||
|
||||
if (mode->type & DRM_MODE_TYPE_PREFERRED)
|
||||
*p_mode = mode;
|
||||
else {
|
||||
if (&mode->head == &con->modes) {
|
||||
WARN_ONCE(true, "Could not find initial preferred mode.\n");
|
||||
*p_mode = list_first_entry(&con->modes,
|
||||
struct drm_display_mode,
|
||||
head);
|
||||
} else {
|
||||
*p_mode = mode;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
|||
struct vmw_legacy_display_unit *entry;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
/* If there is no display topology the host just assumes
|
||||
* that the guest will set the same layout as the host.
|
||||
|
@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
|||
crtc = &entry->base.crtc;
|
||||
w = max(w, crtc->x + crtc->mode.hdisplay);
|
||||
h = max(h, crtc->y + crtc->mode.vdisplay);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (crtc == NULL)
|
||||
return 0;
|
||||
fb = entry->base.crtc.primary->state->fb;
|
||||
fb = crtc->primary->state->fb;
|
||||
|
||||
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
|
||||
fb->format->cpp[0] * 8,
|
||||
|
@ -388,8 +387,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
ldu->base.is_implicit = true;
|
||||
|
||||
/* Initialize primary plane */
|
||||
vmw_du_plane_reset(primary);
|
||||
|
||||
ret = drm_universal_plane_init(dev, &ldu->base.primary,
|
||||
0, &vmw_ldu_plane_funcs,
|
||||
vmw_primary_plane_formats,
|
||||
|
@ -403,8 +400,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
|
||||
|
||||
/* Initialize cursor plane */
|
||||
vmw_du_plane_reset(cursor);
|
||||
|
||||
ret = drm_universal_plane_init(dev, &ldu->base.cursor,
|
||||
0, &vmw_ldu_cursor_funcs,
|
||||
vmw_cursor_plane_formats,
|
||||
|
@ -418,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
|
||||
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
|
||||
|
||||
vmw_du_connector_reset(connector);
|
||||
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
if (ret) {
|
||||
|
@ -446,7 +440,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
goto err_free_encoder;
|
||||
}
|
||||
|
||||
vmw_du_crtc_reset(crtc);
|
||||
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
|
||||
&ldu->base.cursor,
|
||||
&vmw_legacy_crtc_funcs, NULL);
|
||||
|
@ -521,6 +514,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
|
|||
|
||||
dev_priv->active_display_unit = vmw_du_legacy;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
DRM_INFO("Legacy Display Unit initialized\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -859,8 +859,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
sou->base.is_implicit = false;
|
||||
|
||||
/* Initialize primary plane */
|
||||
vmw_du_plane_reset(primary);
|
||||
|
||||
ret = drm_universal_plane_init(dev, &sou->base.primary,
|
||||
0, &vmw_sou_plane_funcs,
|
||||
vmw_primary_plane_formats,
|
||||
|
@ -875,8 +873,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
drm_plane_enable_fb_damage_clips(primary);
|
||||
|
||||
/* Initialize cursor plane */
|
||||
vmw_du_plane_reset(cursor);
|
||||
|
||||
ret = drm_universal_plane_init(dev, &sou->base.cursor,
|
||||
0, &vmw_sou_cursor_funcs,
|
||||
vmw_cursor_plane_formats,
|
||||
|
@ -890,7 +886,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
|
||||
drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
|
||||
|
||||
vmw_du_connector_reset(connector);
|
||||
ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
if (ret) {
|
||||
|
@ -918,8 +913,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
goto err_free_encoder;
|
||||
}
|
||||
|
||||
|
||||
vmw_du_crtc_reset(crtc);
|
||||
ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
|
||||
&sou->base.cursor,
|
||||
&vmw_screen_object_crtc_funcs, NULL);
|
||||
|
@ -973,6 +966,8 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
|
|||
|
||||
dev_priv->active_display_unit = vmw_du_screen_object;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
DRM_INFO("Screen Objects Display Unit initialized\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1738,8 +1738,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
stdu->base.is_implicit = false;
|
||||
|
||||
/* Initialize primary plane */
|
||||
vmw_du_plane_reset(primary);
|
||||
|
||||
ret = drm_universal_plane_init(dev, primary,
|
||||
0, &vmw_stdu_plane_funcs,
|
||||
vmw_primary_plane_formats,
|
||||
|
@ -1754,8 +1752,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
drm_plane_enable_fb_damage_clips(primary);
|
||||
|
||||
/* Initialize cursor plane */
|
||||
vmw_du_plane_reset(cursor);
|
||||
|
||||
ret = drm_universal_plane_init(dev, cursor,
|
||||
0, &vmw_stdu_cursor_funcs,
|
||||
vmw_cursor_plane_formats,
|
||||
|
@ -1769,8 +1765,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
|
||||
drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs);
|
||||
|
||||
vmw_du_connector_reset(connector);
|
||||
|
||||
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
if (ret) {
|
||||
|
@ -1798,7 +1792,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
|||
goto err_free_encoder;
|
||||
}
|
||||
|
||||
vmw_du_crtc_reset(crtc);
|
||||
ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary,
|
||||
&stdu->base.cursor,
|
||||
&vmw_stdu_crtc_funcs, NULL);
|
||||
|
@ -1894,6 +1887,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
DRM_INFO("Screen Target Display device initialized\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1969,7 +1969,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
|
|||
num_mip = 1;
|
||||
|
||||
num_subres = num_layers * num_mip;
|
||||
dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
|
||||
dirty_size = struct_size(dirty, boxes, num_subres);
|
||||
acc_size = ttm_round_pot(dirty_size);
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
|
||||
acc_size, &ctx);
|
||||
|
|
|
@ -44,7 +44,7 @@ MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
|
|||
*/
|
||||
static uint zynqmp_dp_power_on_delay_ms = 4;
|
||||
module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
|
||||
MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
|
||||
MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
|
||||
|
||||
/* Link configuration registers */
|
||||
#define ZYNQMP_DP_LINK_BW_SET 0x0
|
||||
|
@ -567,34 +567,37 @@ static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
|
|||
u8 current_bw)
|
||||
{
|
||||
int max_rate = dp->link_config.max_rate;
|
||||
u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
|
||||
u8 bw_code;
|
||||
u8 max_lanes = dp->link_config.max_lanes;
|
||||
u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
|
||||
u8 bpp = dp->config.bpp;
|
||||
u8 lane_cnt;
|
||||
s8 i;
|
||||
|
||||
if (current_bw == DP_LINK_BW_1_62) {
|
||||
/* Downshift from current bandwidth */
|
||||
switch (current_bw) {
|
||||
case DP_LINK_BW_5_4:
|
||||
bw_code = DP_LINK_BW_2_7;
|
||||
break;
|
||||
case DP_LINK_BW_2_7:
|
||||
bw_code = DP_LINK_BW_1_62;
|
||||
break;
|
||||
case DP_LINK_BW_1_62:
|
||||
dev_err(dp->dev, "can't downshift. already lowest link rate\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
|
||||
if (current_bw && bws[i] >= current_bw)
|
||||
continue;
|
||||
|
||||
if (bws[i] <= max_link_rate_code)
|
||||
break;
|
||||
default:
|
||||
/* If not given, start with max supported */
|
||||
bw_code = max_link_rate_code;
|
||||
break;
|
||||
}
|
||||
|
||||
for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
|
||||
int bw;
|
||||
u32 rate;
|
||||
|
||||
bw = drm_dp_bw_code_to_link_rate(bws[i]);
|
||||
bw = drm_dp_bw_code_to_link_rate(bw_code);
|
||||
rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
|
||||
if (pclock <= rate) {
|
||||
dp->mode.bw_code = bws[i];
|
||||
dp->mode.bw_code = bw_code;
|
||||
dp->mode.lane_cnt = lane_cnt;
|
||||
dp->mode.pclock = pclock;
|
||||
return dp->mode.bw_code;
|
||||
|
@ -1308,7 +1311,7 @@ zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
|
|||
ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
|
||||
sizeof(dp->dpcd));
|
||||
if (ret < 0) {
|
||||
dev_dbg(dp->dev, "DPCD read failes");
|
||||
dev_dbg(dp->dev, "DPCD read failed");
|
||||
goto disconnected;
|
||||
}
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ EXPORT_SYMBOL(vga_get);
|
|||
*
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
|
||||
static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
|
||||
{
|
||||
struct vga_device *vgadev;
|
||||
unsigned long flags;
|
||||
|
@ -554,7 +554,6 @@ bail:
|
|||
spin_unlock_irqrestore(&vga_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(vga_tryget);
|
||||
|
||||
/**
|
||||
* vga_put - release lock on legacy VGA resources
|
||||
|
|
|
@ -957,7 +957,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
|
|||
int
|
||||
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
||||
{
|
||||
int flags = info->flags;
|
||||
int ret = 0;
|
||||
u32 activate;
|
||||
struct fb_var_screeninfo old_var;
|
||||
|
@ -1052,9 +1051,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
|||
event.data = &mode;
|
||||
fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
|
||||
|
||||
if (flags & FBINFO_MISC_USEREVENT)
|
||||
fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fb_set_var);
|
||||
|
@ -1105,9 +1101,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
|
|||
return -EFAULT;
|
||||
console_lock();
|
||||
lock_fb_info(info);
|
||||
info->flags |= FBINFO_MISC_USEREVENT;
|
||||
ret = fb_set_var(info, &var);
|
||||
info->flags &= ~FBINFO_MISC_USEREVENT;
|
||||
if (!ret)
|
||||
fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
|
||||
unlock_fb_info(info);
|
||||
console_unlock();
|
||||
if (!ret && copy_to_user(argp, &var, sizeof(var)))
|
||||
|
|
|
@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
|
|||
|
||||
var->activate |= FB_ACTIVATE_FORCE;
|
||||
console_lock();
|
||||
fb_info->flags |= FBINFO_MISC_USEREVENT;
|
||||
err = fb_set_var(fb_info, var);
|
||||
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
|
||||
if (!err)
|
||||
fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
|
||||
console_unlock();
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/freezer.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/fbcon.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/cell-regs.h>
|
||||
|
@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
|
|||
var = info->var;
|
||||
fb_videomode_to_var(&var, vmode);
|
||||
console_lock();
|
||||
info->flags |= FBINFO_MISC_USEREVENT;
|
||||
/* Force, in case only special bits changed */
|
||||
var.activate |= FB_ACTIVATE_FORCE;
|
||||
par->new_mode_id = val;
|
||||
retval = fb_set_var(info, &var);
|
||||
info->flags &= ~FBINFO_MISC_USEREVENT;
|
||||
if (!retval)
|
||||
fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
|
||||
console_unlock();
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -400,8 +400,6 @@ struct fb_tile_ops {
|
|||
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
|
||||
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
|
||||
|
||||
#define FBINFO_MISC_USEREVENT 0x10000 /* event request
|
||||
from userspace */
|
||||
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
|
||||
|
||||
/* A driver may set this flag to indicate that it does want a set_par to be
|
||||
|
|
|
@ -109,12 +109,6 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
|
|||
return vga_get(pdev, rsrc, 0);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
|
||||
#else
|
||||
static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_VGA_ARB)
|
||||
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
|
||||
#else
|
||||
|
|
|
@ -1004,7 +1004,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
|
|||
#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
|
||||
fourcc_mod_code(AMLOGIC, \
|
||||
((__layout) & __fourcc_mod_amlogic_layout_mask) | \
|
||||
((__options) & __fourcc_mod_amlogic_options_mask \
|
||||
(((__options) & __fourcc_mod_amlogic_options_mask) \
|
||||
<< __fourcc_mod_amlogic_options_shift))
|
||||
|
||||
/* Amlogic FBC Layouts */
|
||||
|
|
Загрузка…
Ссылка в новой задаче