amdgpu:
 - Display replay fixes
 - Fixes for headless boards
 - Fix documentation breakage
 - RAS fixes
 - Handle newer IP discovery tables
 - SMU 13.0.6 fixes
 - SR-IOV fixes
 - Display vstartup fixes
 - NBIO 7.9 fixes
 - Display scaling mode fixes
 - Debugfs power reporting fix
 - GC 9.4.3 fixes
 - Dirty framebuffer fixes for fbcon
 - eDP fixes
 - DCN 3.1.5 fix
 - Display ODM fixes
 - GPU core dump fix
 - Re-enable zops property now that IGT test is fixed
 - Fix possible UAF in CS code
 - Cursor degamma fix
 
 amdkfd:
 - HMM fixes
 - Interrupt masking fix
 - GFX11 MQD fixes
 
 i915:
 - Mark requests for GuC virtual engines to avoid use-after-free
 
 nouveau:
 - Fix fence state in nouveau_fence_emit()
 
 ivpu:
 - replace strncpy
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmT6ihUACgkQDHTzWXnE
 hr6Vqg/+OGfbxx0qev5C93bLYpg8d4zbply0zTed2hU48zczARkOyDH+h2uYM4tP
 rmB6mK/0KRy3H8vkngKduR/IF6QBwzOnLDpS+C/TrHYQYqMDwvs3qEDVYqXh3V5H
 GPIFuu9sb2Nb/o9Fid70pvNACbgGsAUgqMUhQ/is3NjzOR8S/qjdBQ7wSdoUoOqx
 okTdlwuMK5SEYrihSyTZvhNcwpR/1L8JuxOUXXXUSQ0tRXBer/ZNF2lcEyYmQ0zs
 bZHKM4dNbdew/EhygbH6LVB5RjFaT5pGw08Xm8zJry+q5tXQV/NIXPQHL3vWqQoX
 i2QLbvGX/Uu8LJg9YNdsa1kPwNKADAxF64cW38Llv8ybsPHyva/I255j689/TvSG
 Se7HkTooURKS6GWFHPOkyMNC0Y+Fb/7WG5zUPSDVk9stqJz9pzx48okTsCWeuovD
 cBOssp8If1QsTyPvDq5A47l5z1oO3J1rdJ9fL0GnpOuXulPhgJGIoUSkftyI6lbw
 rhUoRd7w6VcgOsA9WIkAf+/325em0Y0AKZBgQnr2jfF56IE4iLa8yFuJNeReZ9oy
 W9yf14AB0orVm9+P4+PqATXBh+PdCTD1CPcB0MyK1SZAjh836Tc0HPKvJAUU6jp+
 8aw3BKXcaLjIP/dCyhSddXCnuuTPWreff5isktwgEXUtNFv9jx0=
 =fPeN
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2023-09-08' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular rounds of rc1 fixes, a large bunch for amdgpu since it's three
  weeks in one go, one i915, one nouveau and one ivpu.

  I think there might be a few more fixes in misc that I haven't pulled
  in yet, but we should get them all for rc2.

  amdgpu:
   - Display replay fixes
   - Fixes for headless boards
   - Fix documentation breakage
   - RAS fixes
   - Handle newer IP discovery tables
   - SMU 13.0.6 fixes
   - SR-IOV fixes
   - Display vstartup fixes
   - NBIO 7.9 fixes
   - Display scaling mode fixes
   - Debugfs power reporting fix
   - GC 9.4.3 fixes
   - Dirty framebuffer fixes for fbcon
   - eDP fixes
   - DCN 3.1.5 fix
   - Display ODM fixes
   - GPU core dump fix
   - Re-enable zops property now that IGT test is fixed
   - Fix possible UAF in CS code
   - Cursor degamma fix

  amdkfd:
   - HMM fixes
   - Interrupt masking fix
   - GFX11 MQD fixes

  i915:
   - Mark requests for GuC virtual engines to avoid use-after-free

  nouveau:
   - Fix fence state in nouveau_fence_emit()

  ivpu:
   - replace strncpy"

* tag 'drm-next-2023-09-08' of git://anongit.freedesktop.org/drm/drm: (51 commits)
  drm/amdgpu: Restrict bootloader wait to SMUv13.0.6
  drm/amd/display: prevent potential division by zero errors
  drm/amd/display: enable cursor degamma for DCN3+ DRM legacy gamma
  drm/amd/display: limit the v_startup workaround to ASICs older than DCN3.1
  Revert "drm/amd/display: Remove v_startup workaround for dcn3+"
  drm/amdgpu: fix amdgpu_cs_p1_user_fence
  Revert "Revert "drm/amd/display: Implement zpos property""
  drm/amdkfd: Add missing gfx11 MQD manager callbacks
  drm/amdgpu: Free ras cmd input buffer properly
  drm/amdgpu: Hide xcp partition sysfs under SRIOV
  drm/amdgpu: use read-modify-write mode for gfx v9_4_3 SQ setting
  drm/amdkfd: use mask to get v9 interrupt sq data bits correctly
  drm/amdgpu: Allocate coredump memory in a nonblocking way
  drm/amdgpu: Support query ecc cap for aqua_vanjaram
  drm/amdgpu: Add umc_info v4_0 structure
  drm/amd/display: always switch off ODM before committing more streams
  drm/amd/display: Remove wait while locked
  drm/amd/display: update blank state on ODM changes
  drm/amd/display: Add smu write msg id fail retry process
  drm/amdgpu: Add SMU v13.0.6 default reset methods
  ...
This commit is contained in:
Linus Torvalds 2023-09-07 19:47:04 -07:00
Родитель 73be7fb14e 43ffcd6fa1
Коммит a48fa7efaf
62 изменённых файлов: 684 добавлений и 227 удалений

Просмотреть файл

@ -11,19 +11,19 @@ via sysfs
product_name
------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: product_name
product_number
--------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
:doc: product_name
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: product_number
serial_number
-------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: serial_number
unique_id

Просмотреть файл

@ -118,8 +118,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
struct vpu_jsm_msg resp;
int ret;
if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
return -ENOMEM;
strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);

Просмотреть файл

@ -442,9 +442,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
mem_info->local_mem_size_public,
mem_info->local_mem_size_private);
if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100;
else if (adev->pm.dpm_enabled) {
if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@ -463,9 +461,7 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
else if (adev->pm.dpm_enabled)
if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;

Просмотреть файл

@ -217,6 +217,7 @@ union umc_info {
struct atom_umc_info_v3_1 v31;
struct atom_umc_info_v3_2 v32;
struct atom_umc_info_v3_3 v33;
struct atom_umc_info_v4_0 v40;
};
union vram_info {
@ -508,9 +509,8 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size, &frev, &crev, &data_offset)) {
umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
if (frev == 3) {
umc_info = (union umc_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
@ -533,6 +533,20 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
/* unsupported crev */
return false;
}
} else if (frev == 4) {
switch (crev) {
case 0:
umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
ecc_default_enabled =
(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
break;
default:
/* unsupported crev */
return false;
}
} else {
/* unsupported frev */
return false;
}
}

Просмотреть файл

@ -127,7 +127,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@ -137,23 +136,14 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
drm_gem_object_put(gobj);
size = amdgpu_bo_size(p->uf_bo);
if (size != PAGE_SIZE || (data->offset + 8) > size) {
r = -EINVAL;
goto error_unref;
}
if (size != PAGE_SIZE || data->offset > (size - 8))
return -EINVAL;
if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
r = -EINVAL;
goto error_unref;
}
if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
return -EINVAL;
*offset = data->offset;
return 0;
error_unref:
amdgpu_bo_unref(&p->uf_bo);
return r;
}
static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,

Просмотреть файл

@ -885,13 +885,20 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
int ret;
amdgpu_asic_pre_asic_init(adev);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
return amdgpu_atomfirmware_asic_init(adev, true);
else
adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
return ret;
} else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
return 0;
}
/**
@ -4694,9 +4701,12 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
}
if (ret)
dev_err(adev->dev, "GPU mode1 reset failed\n");
goto mode1_reset_failed;
amdgpu_device_load_pci_state(adev->pdev);
ret = amdgpu_psp_wait_for_bootloader(adev);
if (ret)
goto mode1_reset_failed;
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@ -4707,7 +4717,17 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
udelay(1);
}
if (i >= adev->usec_timeout) {
ret = -ETIMEDOUT;
goto mode1_reset_failed;
}
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
mode1_reset_failed:
dev_err(adev->dev, "GPU mode1 reset failed\n");
return ret;
}
@ -4849,7 +4869,7 @@ static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
ktime_get_ts64(&adev->reset_time);
dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
}
#endif

Просмотреть файл

@ -1390,6 +1390,7 @@ union gc_info {
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v2_0 v2;
struct gc_info_v2_1 v2_1;
};
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@ -1465,6 +1466,15 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
if (gc_info->v2.header.version_minor == 1) {
adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
}
break;
default:
dev_err(adev->dev,
@ -1478,6 +1488,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
union mall_info {
struct mall_info_v1_0 v1;
struct mall_info_v2_0 v2;
};
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
@ -1518,6 +1529,10 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
adev->gmc.mall_size = mall_size;
adev->gmc.m_half_use = half_use;
break;
case 2:
mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
break;
default:
dev_err(adev->dev,
"Unhandled MALL info table %d.%d\n",

Просмотреть файл

@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@ -532,11 +534,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, unsigned int num_clips)
{
if (file)
return -ENOSYS;
return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
num_clips);
}
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = amdgpu_dirtyfb
};
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@ -1139,7 +1159,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_framebuffer_init(dev, &rfb->base,
&amdgpu_fb_funcs_atomic);
else
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;

Просмотреть файл

@ -241,6 +241,9 @@ struct amdgpu_gfx_config {
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
uint32_t gc_tcp_size_per_cu;
uint32_t gc_num_cu_per_sqc;
uint32_t gc_tcc_size;
};
struct amdgpu_cu_info {

Просмотреть файл

@ -2078,6 +2078,17 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
/* SECUREDISPLAY end */
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
{
struct psp_context *psp = &adev->psp;
int ret = 0;
if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
ret = psp->funcs->wait_for_bootloader(psp);
return ret;
}
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;

Просмотреть файл

@ -109,6 +109,7 @@ enum psp_reg_prog_id {
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_spl)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
@ -533,4 +534,6 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
#endif

Просмотреть файл

@ -764,7 +764,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
int ret = 0;
int ret;
if (!con)
return -EINVAL;
@ -773,7 +773,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (enable &&
head->block != AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_ras_is_feature_allowed(adev, head))
goto out;
return 0;
/* Only enable gfx ras feature from host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@ -801,16 +801,16 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
enable ? "enable":"disable",
get_ras_block_str(head),
amdgpu_ras_is_poison_mode_supported(adev), ret);
goto out;
return ret;
}
kfree(info);
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
out:
if (head->block == AMDGPU_RAS_BLOCK__GFX)
kfree(info);
return ret;
return 0;
}
/* Only used in device probe stage and called only once. */
@ -2399,6 +2399,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
return true;
default:
return false;

Просмотреть файл

@ -158,9 +158,10 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7): /* Sienna cichlid */
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 2): /* Aldebaran */
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
case IP_VERSION(13, 0, 6):
return (adev->gmc.is_app_apu) ? false : true;
default:
return false;
}

Просмотреть файл

@ -203,6 +203,9 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
if (adev->rev_id == 0) {
WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
REDUCE_FIFO_DEPTH_BY_2, 2);
} else {
WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
SPARE, 0x1);
}
}
}
@ -860,11 +863,15 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
r = amdgpu_gfx_sysfs_init(adev);
r = amdgpu_gfx_ras_sw_init(adev);
if (r)
return r;
return amdgpu_gfx_ras_sw_init(adev);
if (!amdgpu_sriov_vf(adev))
r = amdgpu_gfx_sysfs_init(adev);
return r;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@ -885,7 +892,8 @@ static int gfx_v9_4_3_sw_fini(void *handle)
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
amdgpu_gfx_sysfs_fini(adev);
if (!amdgpu_sriov_vf(adev))
amdgpu_gfx_sysfs_fini(adev);
return 0;
}
@ -2219,15 +2227,6 @@ static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
WREG32_SOC15(GC, GET_INST(GC, xcc_id),
regRLC_CGTT_MGCG_OVERRIDE, data);
def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
if (enable)
data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
else
data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
if (def != data)
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
}
static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
@ -4048,7 +4047,8 @@ static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
uint32_t i;
uint32_t data;
data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
if (amdgpu_watchdog_timer.timeout_fatal_disable &&

Просмотреть файл

@ -360,8 +360,10 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
return ret;
}

Просмотреть файл

@ -437,6 +437,24 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
}
if (!amdgpu_sriov_vf(adev)) {
u32 baco_cntl;
for_each_inst(i, adev->aid_mask) {
baco_cntl = RREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL);
if (baco_cntl & (BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
BIF_BX0_BACO_CNTL__BACO_EN_MASK)) {
baco_cntl &= ~(
BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
BIF_BX0_BACO_CNTL__BACO_EN_MASK);
dev_dbg(adev->dev,
"Unsetting baco dummy mode %x",
baco_cntl);
WREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL,
baco_cntl);
}
}
}
}
static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)

Просмотреть файл

@ -133,12 +133,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int retry_loop, ret;
for (retry_loop = 0; retry_loop < 70; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
0x80000000, 0xffffffff, false);
if (ret == 0)
break;
}
if (ret)
dev_warn(adev->dev, "Bootloader wait timed out");
return ret;
}
static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
int retry_loop, ret;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
@ -157,6 +177,19 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
return ret;
}
static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) {
psp_v13_0_wait_for_vmbx_ready(psp);
return psp_v13_0_wait_for_bootloader(psp);
}
return 0;
}
static int psp_v13_0_bootloader_load_component(struct psp_context *psp,
struct psp_bin_desc *bin_desc,
enum psp_bootloader_cmd bl_cmd)
@ -714,6 +747,7 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
.bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
.bootloader_load_spl = psp_v13_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,

Просмотреть файл

@ -559,8 +559,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
*/
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
return AMD_RESET_METHOD_MODE1;
else
return AMD_RESET_METHOD_NONE;
return AMD_RESET_METHOD_MODE2;
default:
break;
}

Просмотреть файл

@ -384,7 +384,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
default:
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),

Просмотреть файл

@ -457,6 +457,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@ -472,6 +473,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@ -501,6 +503,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif

Просмотреть файл

@ -1686,6 +1686,8 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
if (r == -EBUSY)
r = -EAGAIN;
goto unreserve_out;
}

Просмотреть файл

@ -65,6 +65,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@ -4265,6 +4266,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@ -4374,6 +4376,20 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
replay_feature_enabled = true;
break;
default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break;
}
}
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@ -4422,6 +4438,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
/*
* Disable psr if replay can be enabled
*/
if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
psr_feature_enabled = false;
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@ -6004,7 +6026,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
else
else if (!old_stream)
drm_mode_set_crtcinfo(&mode, 0);
/*

Просмотреть файл

@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@ -123,7 +124,12 @@ static void vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
if (vblank_work->enable) {
/*
* Prioritize replay, instead of psr
*/
if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
amdgpu_dm_replay_enable(vblank_work->stream, false);
else if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
@ -132,6 +138,7 @@ static void vblank_control_worker(struct work_struct *work)
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
vblank_work->stream->link->panel_config.psr.disallow_replay &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}

Просмотреть файл

@ -1269,6 +1269,13 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;
/* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
* legacy gamma setup.
*/
if (crtc_state->cm_is_degamma_srgb &&
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
@ -1468,6 +1475,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps);
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
drm_plane_create_zpos_immutable_property(plane, 0);
} else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
unsigned int zpos = 1 + drm_plane_index(plane);
drm_plane_create_zpos_property(plane, zpos, 1, 254);
} else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
drm_plane_create_zpos_immutable_property(plane, 255);
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||

Просмотреть файл

@ -78,3 +78,4 @@ DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)

Просмотреть файл

@ -32,6 +32,7 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
@ -132,6 +133,8 @@ static int dcn315_smu_send_msg_with_param(
unsigned int msg_id, unsigned int param)
{
uint32_t result;
uint32_t i = 0;
uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
@ -148,10 +151,19 @@ static int dcn315_smu_send_msg_with_param(
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param);
/* Trigger the message transaction by writing the message ID */
generic_write_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3, msg_id);
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
generic_write_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3, msg_id);
read_back_data = generic_read_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3);
if (read_back_data == msg_id)
break;
udelay(2);
smu_print("SMU msg id write fail %x times. \n", i + 1);
}
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);

Просмотреть файл

@ -2073,12 +2073,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
/* Check for case where we are going from odm 2:1 to max
* pipe scenario. For these cases, we will call
* commit_minimal_transition_state() to exit out of odm 2:1
* first before processing new streams
/* ODM Combine 2:1 power optimization is only applied for single stream
* scenario, it uses extra pipes than needed to reduce power consumption
* We need to switch off this feature to make room for new streams.
*/
if (stream_count == dc->res_pool->pipe_count) {
if (stream_count > dc->current_state->stream_count &&
dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@ -3501,6 +3501,45 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
* operations to complete. It should be invoked as a pre-amble prior
* to full update programming before asserting any HW locks.
*/
int pipe_idx;
int opp_inst;
int opp_count = dc->res_pool->pipe_count;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
continue;
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
}
}
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@ -3519,24 +3558,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL) {
/* wait for all double-buffer activity to clear on all pipes */
int pipe_idx;
for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
if (!pipe_ctx->stream)
continue;
if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
}
}
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);

Просмотреть файл

@ -1106,29 +1106,6 @@ void dcn20_blank_pixel_data(
v_active,
offset);
if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
/* when exiting dynamic ODM need to reinit DPG state for unused pipes */
struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
odm_pipe = pipe_ctx->next_odm_pipe;
while (old_odm_pipe) {
if (!odm_pipe || old_odm_pipe->pipe_idx != odm_pipe->pipe_idx)
dc->hwss.set_disp_pattern_generator(dc,
old_odm_pipe,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
COLOR_DEPTH_888,
NULL,
0,
0,
0);
old_odm_pipe = old_odm_pipe->next_odm_pipe;
if (odm_pipe)
odm_pipe = odm_pipe->next_odm_pipe;
}
}
if (!blank)
if (stream_res->abm) {
dc->hwss.set_pipe(pipe_ctx);
@ -1584,17 +1561,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
int mpcc_inst = hubp->inst;
int opp_inst;
int opp_count = dc->res_pool->pipe_count;
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
}
}
hws->funcs.update_mpcc(dc, pipe_ctx);
}
@ -1722,11 +1688,16 @@ static void dcn20_program_pipe(
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
/* Only need to unblank on top pipe */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
/* Only need to unblank on top pipe */
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.odm ||
pipe_ctx->stream->update_flags.bits.abm_level)
hws->funcs.blank_pixel_data(dc, pipe_ctx,
!pipe_ctx->plane_state ||
!pipe_ctx->plane_state->visible);
}
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe

Просмотреть файл

@ -987,3 +987,20 @@ void dcn30_prepare_bandwidth(struct dc *dc,
}
}
void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params)
{
unsigned int i;
unsigned int triggers = 0;
if (params->triggers.surface_update)
triggers |= 0x100;
if (params->triggers.cursor_update)
triggers |= 0x8;
if (params->triggers.force_trigger)
triggers |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}

Просмотреть файл

@ -87,5 +87,7 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params);
#endif /* __DC_HWSS_DCN30_H__ */

Просмотреть файл

@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,

Просмотреть файл

@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,

Просмотреть файл

@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,

Просмотреть файл

@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,

Просмотреть файл

@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn10_set_static_screen_control,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,

Просмотреть файл

@ -2564,18 +2564,128 @@ static int find_optimal_free_pipe_as_secondary_dpp_pipe(
return free_pipe_idx;
}
static struct pipe_ctx *find_idle_secondary_pipe_check_mpo(
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
{
int i;
struct pipe_ctx *secondary_pipe = NULL;
struct pipe_ctx *next_odm_mpo_pipe = NULL;
int primary_index, preferred_pipe_idx;
struct pipe_ctx *old_primary_pipe = NULL;
/*
* Modified from find_idle_secondary_pipe
* With windowed MPO and ODM, we want to avoid the case where we want a
* free pipe for the left side but the free pipe is being used on the
* right side.
* Add check on current_state if the primary_pipe is the left side,
* to check the right side ( primary_pipe->next_odm_pipe ) to see if
* it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe )
* - If so, then don't use this pipe
* EXCEPTION - 3 plane ( 2 MPO plane ) case
* - in this case, the primary pipe has already gotten a free pipe for the
* MPO window in the left
* - when it tries to get a free pipe for the MPO window on the right,
* it will see that it is already assigned to the right side
* ( primary_pipe->next_odm_pipe ). But in this case, we want this
* free pipe, since it will be for the right side. So add an
* additional condition, that skipping the free pipe on the right only
* applies if the primary pipe has no bottom pipe currently assigned
*/
if (primary_pipe) {
primary_index = primary_pipe->pipe_idx;
old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index];
if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe)
&& (!primary_pipe->bottom_pipe))
next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe;
preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) &&
!(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
/*
* search backwards for the second pipe to keep pipe
* assignment more consistent
*/
if (!secondary_pipe)
for (i = pool->pipe_count - 1; i >= 0; i--) {
if ((res_ctx->pipe_ctx[i].stream == NULL) &&
!(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) {
secondary_pipe = &res_ctx->pipe_ctx[i];
secondary_pipe->pipe_idx = i;
break;
}
}
return secondary_pipe;
}
static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
const struct resource_pool *pool,
struct dc_stream_state *stream,
const struct pipe_ctx *head_pipe)
{
struct resource_context *res_ctx = &state->res_ctx;
struct pipe_ctx *idle_pipe, *pipe;
struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
int head_index;
if (!head_pipe)
ASSERT(0);
/*
* Modified from dcn20_acquire_idle_pipe_for_layer
* Check if head_pipe in old_context already has bottom_pipe allocated.
* - If so, check if that pipe is available in the current context.
* -- If so, reuse pipe from old_context
*/
head_index = head_pipe->pipe_idx;
pipe = &old_ctx->pipe_ctx[head_index];
if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) {
idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx];
idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx;
} else {
idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe);
if (!idle_pipe)
return NULL;
}
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
return idle_pipe;
}
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
int free_pipe_idx =
find_optimal_free_pipe_as_secondary_dpp_pipe(
&cur_ctx->res_ctx, &new_ctx->res_ctx,
pool, opp_head_pipe);
int free_pipe_idx;
struct pipe_ctx *free_pipe;
if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm)
return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
&cur_ctx->res_ctx, &new_ctx->res_ctx,
pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx];
free_pipe->pipe_idx = free_pipe_idx;

Просмотреть файл

@ -1099,6 +1099,11 @@ void dcn20_calculate_dlg_params(struct dc *dc,
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
if (dc->ctx->dce_version < DCN_VERSION_3_1 &&
context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
dcn20_adjust_freesync_v_startup(
&context->res_ctx.pipe_ctx[i].stream->timing,
&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
@ -1927,7 +1932,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@ -1951,15 +1955,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
dcn20_adjust_freesync_v_startup(
&context->res_ctx.pipe_ctx[i].stream->timing,
&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
}
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
@ -2232,7 +2227,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@ -2261,15 +2255,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
dcn20_adjust_freesync_v_startup(
&context->res_ctx.pipe_ctx[i].stream->timing,
&context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
}
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;

Просмотреть файл

@ -293,6 +293,17 @@ static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_
return num_lines;
}
static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
{
unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
v_blank = timing->v_total - v_active;
v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
return v_back_porch;
}
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@ -310,6 +321,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
unsigned int num_lines = 0;
unsigned int v_back_porch = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
@ -323,9 +335,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
v_back_porch = get_vertical_back_porch(timing);
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
// vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
// + 2 is because
// 1 -> VStartup_start should be 1 line before VSync
// 1 -> always reserve 1 line between start of vblank to vstartup signal
pipes[pipe_cnt].pipe.dest.vblank_nom =
max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&

Просмотреть файл

@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
if (mid_point_frames_ceil &&
(last_render_time_in_us / mid_point_frames_ceil) <
in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
if (last_render_time_in_us / frames_to_insert <
in_out_vrr->min_duration_in_us){
if (frames_to_insert &&
(last_render_time_in_us / frames_to_insert) <
in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}

Просмотреть файл

@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
@ -250,6 +251,7 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
DC_DISABLE_REPLAY = 0x50,
DC_ENABLE_DPIA_TRACE = 0x80,
};

Просмотреть файл

@ -3117,6 +3117,24 @@ enum atom_umc_config1_def {
UMC_CONFIG1__ENABLE_ECC_CAPABLE = 0x00010000,
};
struct atom_umc_info_v4_0 {
struct atom_common_table_header table_header;
uint32_t ucode_reserved[5];
uint8_t umcip_min_ver;
uint8_t umcip_max_ver;
uint8_t vram_type;
uint8_t umc_config;
uint32_t mem_refclk_10khz;
uint32_t clk_reserved[4];
uint32_t golden_reserved;
uint32_t umc_config1;
uint32_t reserved[2];
uint8_t channel_num;
uint8_t channel_width;
uint8_t channel_reserve[2];
uint8_t umc_info_reserved[16];
};
/*
***************************************************************************
Data Table vram_info structure

Просмотреть файл

@ -30,7 +30,7 @@
#define GC_TABLE_ID 0x4347
#define HARVEST_TABLE_SIGNATURE 0x56524148
#define VCN_INFO_TABLE_ID 0x004E4356
#define MALL_INFO_TABLE_ID 0x4D414C4C
#define MALL_INFO_TABLE_ID 0x4C4C414D
typedef enum
{
@ -280,6 +280,36 @@ struct gc_info_v2_0 {
uint32_t gc_num_packer_per_sc;
};
struct gc_info_v2_1 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_cu_per_sh;
uint32_t gc_num_sh_per_se;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_tccs;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_packer_per_sc;
/* new for v2_1 */
uint32_t gc_num_tcp_per_sh;
uint32_t gc_tcp_size_per_cu;
uint32_t gc_num_sdp_interface;
uint32_t gc_num_cu_per_sqc;
uint32_t gc_instruction_cache_size_per_sqc;
uint32_t gc_scalar_data_cache_size_per_sqc;
uint32_t gc_tcc_size;
};
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */
@ -312,6 +342,12 @@ struct mall_info_v1_0 {
uint32_t reserved[5];
};
struct mall_info_v2_0 {
struct mall_info_header header;
uint32_t mall_size_per_umc;
uint32_t reserved[8];
};
#define VCN_INFO_TABLE_MAX_NUM_INSTANCES 4
struct vcn_info_header {

Просмотреть файл

@ -3311,8 +3311,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(gc_ver != IP_VERSION(9, 4, 3)) &&
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
return 0;
/* hotspot temperature for gc 9,4,3*/
@ -3324,9 +3326,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only SOC15 dGPUs support hotspot and mem temperatures */
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
(gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
(attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
@ -3471,6 +3471,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");

Просмотреть файл

@ -1031,10 +1031,7 @@ struct pptable_funcs {
enum smu_feature_mask mask);
/**
* @notify_display_change: Enable fast memory clock switching.
*
* Allows for fine grained memory clock switching but has more stringent
* timing requirements.
* @notify_display_change: General interface call to let SMU know about DC change
*/
int (*notify_display_change)(struct smu_context *smu);

Просмотреть файл

@ -138,7 +138,10 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
#define PPSMC_Message_Count 0x4D
#define PPSMC_MSG_DALNotPresent 0x4E
#define PPSMC_Message_Count 0x4F
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1

Просмотреть файл

@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
#define SMU_METRICS_TABLE_VERSION 0x5
#define SMU_METRICS_TABLE_VERSION 0x7
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@ -198,7 +198,7 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t SocketThmResidencyAcc;
uint32_t VrThmResidencyAcc;
uint32_t HbmThmResidencyAcc;
uint32_t spare;
uint32_t GfxLockXCDMak;
// New Items at end to maintain driver compatibility
uint32_t GfxclkFrequency[8];

Просмотреть файл

@ -83,13 +83,27 @@
#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
#define PPSMC_MSG_PrepareForDriverUnload 0x34
#define PPSMC_Message_Count 0x35
#define PPSMC_MSG_ReadThrottlerLimit 0x35
#define PPSMC_MSG_QueryValidMcaCount 0x36
#define PPSMC_MSG_McaBankDumpDW 0x37
#define PPSMC_MSG_GetCTFLimit 0x38
#define PPSMC_Message_Count 0x39
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
//PPSMC Reset Types for driver msg argument
#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
//CTF/Throttle Limit types
#define PPSMC_AID_THM_TYPE 0x1
#define PPSMC_CCD_THM_TYPE 0x2
#define PPSMC_XCD_THM_TYPE 0x3
#define PPSMC_HBM_THM_TYPE 0x4
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_MSG;

Просмотреть файл

@ -84,6 +84,7 @@
__SMU_DUMMY_MAP(SetTjMax), \
__SMU_DUMMY_MAP(SetFanTemperatureTarget), \
__SMU_DUMMY_MAP(PrepareMp1ForUnload), \
__SMU_DUMMY_MAP(GetCTFLimit), \
__SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \
__SMU_DUMMY_MAP(DramLogSetDramAddrLow), \
__SMU_DUMMY_MAP(DramLogSetDramSize), \
@ -245,7 +246,8 @@
__SMU_DUMMY_MAP(AllowGpo), \
__SMU_DUMMY_MAP(Mode2Reset), \
__SMU_DUMMY_MAP(RequestI2cTransaction), \
__SMU_DUMMY_MAP(GetMetricsTable),
__SMU_DUMMY_MAP(GetMetricsTable), \
__SMU_DUMMY_MAP(DALNotPresent),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type

Просмотреть файл

@ -837,12 +837,8 @@ int smu_v13_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
if (!smu->pm_enabled)
return ret;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
if (!amdgpu_device_has_dc_support(smu->adev))
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
return ret;
}

Просмотреть файл

@ -162,6 +162,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@ -2687,6 +2688,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
.notify_display_change = smu_v13_0_notify_display_change,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)

Просмотреть файл

@ -132,6 +132,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@ -2081,6 +2082,55 @@ out:
return ret;
}
static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
u32 aid_temp, xcd_temp, mem_temp;
uint32_t smu_version;
u32 ccd_temp = 0;
int ret;
if (amdgpu_sriov_vf(smu->adev))
return 0;
if (!range)
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (smu_version < 0x554500)
return 0;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_AID_THM_TYPE, &aid_temp);
if (ret)
goto failed;
if (adev->flags & AMD_IS_APU) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_CCD_THM_TYPE, &ccd_temp);
if (ret)
goto failed;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_XCD_THM_TYPE, &xcd_temp);
if (ret)
goto failed;
range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
PPSMC_HBM_THM_TYPE, &mem_temp);
if (ret)
goto failed;
range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
failed:
return ret;
}
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@ -2108,8 +2158,7 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
/* TODO: Enable this when FW support is added */
return false;
return true;
}
static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
@ -2177,6 +2226,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,

Просмотреть файл

@ -58,6 +58,7 @@ struct i915_perf_group;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
struct intel_hw_status_page {
struct list_head timelines;

Просмотреть файл

@ -5470,6 +5470,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
ve->base.mask = VIRTUAL_ENGINES;
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {

Просмотреть файл

@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
* Keep one request on each engine for reserved use under mempressure
* do not use with virtual engines as this really is only needed for
* kernel contexts.
* Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
if (!intel_engine_is_virtual(rq->engine) &&
is_power_of_2(rq->execution_mask) &&
if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;

Просмотреть файл

@ -1122,18 +1122,11 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
ret = nouveau_fence_new(pfence);
ret = nouveau_fence_new(pfence, chan);
if (ret)
goto fail;
ret = nouveau_fence_emit(*pfence, chan);
if (ret)
goto fail_fence_unref;
return 0;
fail_fence_unref:
nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);

Просмотреть файл

@ -875,16 +875,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret)
goto out_unlock;
ret = nouveau_fence_new(&fence);
ret = nouveau_fence_new(&fence, chan);
if (ret)
goto out_unlock;
ret = nouveau_fence_emit(fence, chan);
if (ret) {
nouveau_fence_unref(&fence);
goto out_unlock;
}
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through

Просмотреть файл

@ -70,11 +70,9 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
ret = nouveau_fence_new(&fence);
ret = nouveau_fence_new(&fence, chan);
if (!ret) {
ret = nouveau_fence_emit(fence, chan);
if (!ret)
ret = nouveau_fence_wait(fence, false, false);
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}

Просмотреть файл

@ -209,8 +209,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, dmem->migrate.chan);
nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@ -403,8 +402,7 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@ -677,8 +675,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, drm->dmem->migrate.chan);
nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);

Просмотреть файл

@ -96,7 +96,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
unsigned long index;
int ret;
ret = nouveau_fence_new(&exec_job->fence);
/* Create a new fence, but do not emit yet. */
ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
if (ret)
return ret;
@ -170,13 +171,17 @@ nouveau_exec_job_run(struct nouveau_job *job)
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
ret = nouveau_fence_emit(fence, chan);
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
/* The fence was emitted successfully, set the job's fence pointer to
* NULL in order to avoid freeing it up when the job is cleaned up.
*/
exec_job->fence = NULL;
return &fence->base;
@ -189,7 +194,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
nouveau_job_free(job);
nouveau_fence_unref(&exec_job->fence);
kfree(exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}

Просмотреть файл

@ -205,16 +205,13 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
}
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
if (unlikely(!chan->fence))
return -ENODEV;
fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
@ -406,18 +403,41 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
nouveau_fence_new(struct nouveau_fence **pfence)
nouveau_fence_create(struct nouveau_fence **pfence,
struct nouveau_channel *chan)
{
struct nouveau_fence *fence;
if (unlikely(!chan->fence))
return -ENODEV;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
fence->channel = chan;
*pfence = fence;
return 0;
}
int
nouveau_fence_new(struct nouveau_fence **pfence,
struct nouveau_channel *chan)
{
int ret = 0;
ret = nouveau_fence_create(pfence, chan);
if (ret)
return ret;
ret = nouveau_fence_emit(*pfence);
if (ret)
nouveau_fence_unref(pfence);
return ret;
}
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";

Просмотреть файл

@ -17,10 +17,11 @@ struct nouveau_fence {
unsigned long timeout;
};
int nouveau_fence_new(struct nouveau_fence **);
int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);

Просмотреть файл

@ -914,11 +914,8 @@ revalidate:
}
}
ret = nouveau_fence_new(&fence);
if (!ret)
ret = nouveau_fence_emit(fence, chan);
ret = nouveau_fence_new(&fence, chan);
if (ret) {
nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;