Merge tag 'drm-next-5.4-2019-08-23' of git://people.freedesktop.org/~agd5f/linux into drm-next
drm-next-5.4-2019-08-23: amdgpu: - Enable power features on Navi12 - Enable power features on Arcturus - RAS updates - Initial Renoir APU support - Enable power featyres on Renoir - DC gamma fixes - DCN2 fixes - GPU reset support for Picasso - Misc cleanups and fixes scheduler: - Possible race fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190823202620.3870-1-alexander.deucher@amd.com
This commit is contained in:
Коммит
578d2342ec
|
@ -100,7 +100,8 @@ amdgpu-y += \
|
|||
amdgpu_psp.o \
|
||||
psp_v3_1.o \
|
||||
psp_v10_0.o \
|
||||
psp_v11_0.o
|
||||
psp_v11_0.o \
|
||||
psp_v12_0.o
|
||||
|
||||
# add SMC block
|
||||
amdgpu-y += \
|
||||
|
@ -154,6 +155,7 @@ amdgpu-y += \
|
|||
|
||||
# add ATHUB block
|
||||
amdgpu-y += \
|
||||
athub_v1_0.o \
|
||||
athub_v2_0.o
|
||||
|
||||
# add amdkfd interfaces
|
||||
|
|
|
@ -87,6 +87,7 @@
|
|||
#include "amdgpu_discovery.h"
|
||||
#include "amdgpu_mes.h"
|
||||
#include "amdgpu_umc.h"
|
||||
#include "amdgpu_mmhub.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 16
|
||||
|
||||
|
@ -788,7 +789,6 @@ struct amdgpu_device {
|
|||
int usec_timeout;
|
||||
const struct amdgpu_asic_funcs *asic_funcs;
|
||||
bool shutdown;
|
||||
bool need_dma32;
|
||||
bool need_swiotlb;
|
||||
bool accel_working;
|
||||
struct notifier_block acpi_nb;
|
||||
|
@ -976,6 +976,7 @@ struct amdgpu_device {
|
|||
|
||||
const struct amdgpu_nbio_funcs *nbio_funcs;
|
||||
const struct amdgpu_df_funcs *df_funcs;
|
||||
const struct amdgpu_mmhub_funcs *mmhub_funcs;
|
||||
|
||||
/* delayed work_func for deferring clockgating during resume */
|
||||
struct delayed_work delayed_init_work;
|
||||
|
|
|
@ -801,42 +801,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
|||
return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
|
||||
}
|
||||
|
||||
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||
uint32_t req = (1 << vmid) |
|
||||
(0 << GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT) |/* legacy */
|
||||
GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK |
|
||||
GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK |
|
||||
GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK |
|
||||
GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK |
|
||||
GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK;
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
||||
/* Use light weight invalidation.
|
||||
*
|
||||
* TODO 1: agree on the right set of invalidation registers for
|
||||
* KFD use. Use the last one for now. Invalidate only GCHUB as
|
||||
* SDMA is now moved to GCHUB
|
||||
*
|
||||
* TODO 2: support range-based invalidation, requires kfg2kgd
|
||||
* interface change
|
||||
*/
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32),
|
||||
0xffffffff);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32),
|
||||
0x0000001f);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ), req);
|
||||
|
||||
while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK)) &
|
||||
(1 << vmid)))
|
||||
cpu_relax();
|
||||
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
}
|
||||
|
||||
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
|
||||
{
|
||||
signed long r;
|
||||
|
@ -877,7 +841,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
|||
if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
|
||||
if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
|
||||
== pasid) {
|
||||
write_vmid_invalidate_request(kgd, vmid);
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
||||
AMDGPU_GFXHUB_0, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -895,7 +860,7 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
write_vmid_invalidate_request(kgd, vmid);
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -670,7 +670,7 @@ static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
|
|||
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||
int vmid;
|
||||
int vmid, i;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
uint32_t flush_type = 0;
|
||||
|
||||
|
@ -689,8 +689,9 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
|||
if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
|
||||
if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
|
||||
== pasid) {
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
||||
flush_type);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid,
|
||||
i, flush_type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -702,6 +703,7 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
|
|||
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||
int i;
|
||||
|
||||
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
|
||||
pr_err("non kfd vmid %d\n", vmid);
|
||||
|
@ -723,7 +725,9 @@ int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
|
|||
* TODO 2: support range-based invalidation, requires kfg2kgd
|
||||
* interface change
|
||||
*/
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
|||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
||||
|
||||
if (p->post_deps)
|
||||
return -EINVAL;
|
||||
|
||||
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
|
||||
GFP_KERNEL);
|
||||
p->num_post_deps = 0;
|
||||
|
@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
|||
|
||||
|
||||
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk
|
||||
*chunk)
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
|
||||
unsigned num_deps;
|
||||
|
@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
|
|||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
|
||||
|
||||
if (p->post_deps)
|
||||
return -EINVAL;
|
||||
|
||||
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
|
||||
GFP_KERNEL);
|
||||
p->num_post_deps = 0;
|
||||
|
|
|
@ -42,7 +42,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
|
|||
[AMDGPU_HW_IP_VCN_JPEG] = 1,
|
||||
};
|
||||
|
||||
static int amdgput_ctx_total_num_entities(void)
|
||||
static int amdgpu_ctx_total_num_entities(void)
|
||||
{
|
||||
unsigned i, num_entities = 0;
|
||||
|
||||
|
@ -73,7 +73,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||
struct drm_file *filp,
|
||||
struct amdgpu_ctx *ctx)
|
||||
{
|
||||
unsigned num_entities = amdgput_ctx_total_num_entities();
|
||||
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
||||
unsigned i, j, k;
|
||||
int r;
|
||||
|
||||
|
@ -207,7 +207,7 @@ error_free_fences:
|
|||
static void amdgpu_ctx_fini(struct kref *ref)
|
||||
{
|
||||
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
|
||||
unsigned num_entities = amdgput_ctx_total_num_entities();
|
||||
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
unsigned i, j;
|
||||
|
||||
|
@ -289,10 +289,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
|
|||
|
||||
ctx = container_of(ref, struct amdgpu_ctx, refcount);
|
||||
|
||||
num_entities = 0;
|
||||
for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
|
||||
num_entities += amdgpu_ctx_num_entities[i];
|
||||
|
||||
num_entities = amdgpu_ctx_total_num_entities();
|
||||
for (i = 0; i < num_entities; i++)
|
||||
drm_sched_entity_destroy(&ctx->entities[0][i].entity);
|
||||
|
||||
|
@ -354,7 +351,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
|
|||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_ctx_mgr *mgr;
|
||||
uint32_t ras_counter;
|
||||
unsigned long ras_counter;
|
||||
|
||||
if (!fpriv)
|
||||
return -EINVAL;
|
||||
|
@ -524,7 +521,7 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
|||
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
unsigned num_entities = amdgput_ctx_total_num_entities();
|
||||
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
||||
enum drm_sched_priority ctx_prio;
|
||||
unsigned i;
|
||||
|
||||
|
@ -544,21 +541,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
|
|||
struct drm_sched_entity *entity)
|
||||
{
|
||||
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
|
||||
unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
|
||||
struct dma_fence *other = centity->fences[idx];
|
||||
struct dma_fence *other;
|
||||
unsigned idx;
|
||||
long r;
|
||||
|
||||
if (other) {
|
||||
signed long r;
|
||||
r = dma_fence_wait(other, true);
|
||||
if (r < 0) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
|
||||
spin_lock(&ctx->ring_lock);
|
||||
idx = centity->sequence & (amdgpu_sched_jobs - 1);
|
||||
other = dma_fence_get(centity->fences[idx]);
|
||||
spin_unlock(&ctx->ring_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
}
|
||||
if (!other)
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
r = dma_fence_wait(other, true);
|
||||
if (r < 0 && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
|
||||
|
||||
dma_fence_put(other);
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
|
||||
|
@ -569,7 +569,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
|
|||
|
||||
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
|
||||
{
|
||||
unsigned num_entities = amdgput_ctx_total_num_entities();
|
||||
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct idr *idp;
|
||||
uint32_t id, i;
|
||||
|
@ -591,7 +591,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
|
|||
|
||||
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
{
|
||||
unsigned num_entities = amdgput_ctx_total_num_entities();
|
||||
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct idr *idp;
|
||||
uint32_t id, i;
|
||||
|
|
|
@ -49,8 +49,8 @@ struct amdgpu_ctx {
|
|||
enum drm_sched_priority override_priority;
|
||||
struct mutex lock;
|
||||
atomic_t guilty;
|
||||
uint32_t ras_counter_ce;
|
||||
uint32_t ras_counter_ue;
|
||||
unsigned long ras_counter_ce;
|
||||
unsigned long ras_counter_ue;
|
||||
};
|
||||
|
||||
struct amdgpu_ctx_mgr {
|
||||
|
|
|
@ -71,6 +71,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
|||
MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||
|
@ -102,6 +103,7 @@ static const char *amdgpu_asic_name[] = {
|
|||
"VEGA20",
|
||||
"RAVEN",
|
||||
"ARCTURUS",
|
||||
"RENOIR",
|
||||
"NAVI10",
|
||||
"NAVI14",
|
||||
"NAVI12",
|
||||
|
@ -1427,6 +1429,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
|||
case CHIP_ARCTURUS:
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
chip_name = "navi10";
|
||||
break;
|
||||
|
@ -1579,7 +1584,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_ARCTURUS:
|
||||
if (adev->asic_type == CHIP_RAVEN)
|
||||
case CHIP_RENOIR:
|
||||
if (adev->asic_type == CHIP_RAVEN ||
|
||||
adev->asic_type == CHIP_RENOIR)
|
||||
adev->family = AMDGPU_FAMILY_RV;
|
||||
else
|
||||
adev->family = AMDGPU_FAMILY_AI;
|
||||
|
@ -3518,6 +3525,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA20:
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_RAVEN:
|
||||
break;
|
||||
default:
|
||||
goto disabled;
|
||||
|
|
|
@ -79,9 +79,10 @@
|
|||
* - 3.31.0 - Add support for per-flip tiling attribute changes with DC
|
||||
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
|
||||
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
|
||||
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 33
|
||||
#define KMS_DRIVER_MINOR 34
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
|
||||
|
@ -142,7 +143,7 @@ int amdgpu_async_gfx_ring = 1;
|
|||
int amdgpu_mcbp = 0;
|
||||
int amdgpu_discovery = -1;
|
||||
int amdgpu_mes = 0;
|
||||
int amdgpu_noretry;
|
||||
int amdgpu_noretry = 1;
|
||||
|
||||
struct amdgpu_mgpu_info mgpu_info = {
|
||||
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
|
||||
|
@ -610,7 +611,7 @@ MODULE_PARM_DESC(mes,
|
|||
module_param_named(mes, amdgpu_mes, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(noretry,
|
||||
"Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
|
||||
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
|
||||
module_param_named(noretry, amdgpu_noretry, int, 0644);
|
||||
|
||||
#ifdef CONFIG_HSA_AMD
|
||||
|
@ -1000,6 +1001,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT},
|
||||
/* Navi10 */
|
||||
{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
|
@ -1008,6 +1010,11 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
|
||||
/* Navi14 */
|
||||
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
|
||||
|
||||
/* Renoir */
|
||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
|
||||
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
|
|
@ -251,7 +251,9 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
|||
}
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -310,9 +312,9 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
|||
uint64_t flags)
|
||||
{
|
||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||
unsigned i,t,p;
|
||||
unsigned t,p;
|
||||
#endif
|
||||
int r;
|
||||
int r, i;
|
||||
|
||||
if (!adev->gart.ready) {
|
||||
WARN(1, "trying to bind memory to uninitialized GART !\n");
|
||||
|
@ -336,7 +338,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
|||
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ struct amdgpu_gds {
|
|||
uint32_t gws_size;
|
||||
uint32_t oa_size;
|
||||
uint32_t gds_compute_max_wave_id;
|
||||
uint32_t vgt_gs_max_wave_id;
|
||||
};
|
||||
|
||||
struct amdgpu_gds_reg_offset {
|
||||
|
|
|
@ -220,6 +220,14 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
|||
const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
|
||||
u64 size_af, size_bf;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
mc->agp_start = 0xffffffff;
|
||||
mc->agp_end = 0x0;
|
||||
mc->agp_size = 0;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (mc->fb_start > mc->gart_start) {
|
||||
size_bf = (mc->fb_start & sixteen_gb_mask) -
|
||||
ALIGN(mc->gart_end + 1, sixteen_gb);
|
||||
|
|
|
@ -89,8 +89,8 @@ struct amdgpu_vmhub {
|
|||
*/
|
||||
struct amdgpu_gmc_funcs {
|
||||
/* flush the vm tlb via mmio */
|
||||
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
|
||||
uint32_t vmid, uint32_t flush_type);
|
||||
void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type);
|
||||
/* flush the vm tlb via ring */
|
||||
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
|
||||
uint64_t pd_addr);
|
||||
|
@ -177,10 +177,11 @@ struct amdgpu_gmc {
|
|||
|
||||
struct amdgpu_xgmi xgmi;
|
||||
struct amdgpu_irq_src ecc_irq;
|
||||
struct ras_common_if *ras_if;
|
||||
struct ras_common_if *umc_ras_if;
|
||||
struct ras_common_if *mmhub_ras_if;
|
||||
};
|
||||
|
||||
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
|
||||
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
|
||||
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
|
||||
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
||||
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (C) 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __AMDGPU_MMHUB_H__
|
||||
#define __AMDGPU_MMHUB_H__
|
||||
|
||||
struct amdgpu_mmhub_funcs {
|
||||
void (*ras_init)(struct amdgpu_device *adev);
|
||||
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -246,8 +246,9 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
|||
bp.size = size;
|
||||
bp.byte_align = align;
|
||||
bp.domain = domain;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
||||
: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||
bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
|
||||
|
|
|
@ -2828,10 +2828,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
|||
DRM_ERROR("failed to create device file pp_dpm_socclk\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
|
||||
return ret;
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (adev->asic_type >= CHIP_VEGA20) {
|
||||
|
@ -2841,10 +2843,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
|||
return ret;
|
||||
}
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_dpm_pcie\n");
|
||||
return ret;
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create device file pp_dpm_pcie\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
|
||||
if (ret) {
|
||||
|
@ -2948,9 +2952,11 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
|||
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
|
||||
if (adev->asic_type >= CHIP_VEGA10) {
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
|
||||
if (adev->asic_type != CHIP_ARCTURUS)
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
|
||||
}
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
if (adev->asic_type != CHIP_ARCTURUS)
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
|
||||
if (adev->asic_type >= CHIP_VEGA20)
|
||||
device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
|
||||
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "psp_v3_1.h"
|
||||
#include "psp_v10_0.h"
|
||||
#include "psp_v11_0.h"
|
||||
#include "psp_v12_0.h"
|
||||
|
||||
static void psp_set_funcs(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -63,6 +64,9 @@ static int psp_early_init(void *handle)
|
|||
psp_v11_0_set_psp_funcs(psp);
|
||||
psp->autoload_supported = true;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
psp_v12_0_set_psp_funcs(psp);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -140,8 +144,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
|||
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
|
||||
|
||||
index = atomic_inc_return(&psp->fence_value);
|
||||
ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
|
||||
fence_mc_addr, index);
|
||||
ret = psp_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
|
||||
if (ret) {
|
||||
atomic_dec(&psp->fence_value);
|
||||
mutex_unlock(&psp->mutex);
|
||||
|
@ -260,7 +263,7 @@ static int psp_tmr_init(struct psp_context *psp)
|
|||
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
&psp->tmr_bo, &psp->tmr_mc_addr, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -940,6 +943,60 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void psp_print_fw_hdr(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
const struct sdma_firmware_header_v1_0 *sdma_hdr =
|
||||
(const struct sdma_firmware_header_v1_0 *)
|
||||
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *ce_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *pfp_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *me_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *mec_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||
const struct rlc_firmware_header_v2_0 *rlc_hdr =
|
||||
(const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||
const struct smc_firmware_header_v1_0 *smc_hdr =
|
||||
(const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
|
||||
|
||||
switch (ucode->ucode_id) {
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
case AMDGPU_UCODE_ID_SDMA1:
|
||||
case AMDGPU_UCODE_ID_SDMA2:
|
||||
case AMDGPU_UCODE_ID_SDMA3:
|
||||
case AMDGPU_UCODE_ID_SDMA4:
|
||||
case AMDGPU_UCODE_ID_SDMA5:
|
||||
case AMDGPU_UCODE_ID_SDMA6:
|
||||
case AMDGPU_UCODE_ID_SDMA7:
|
||||
amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_CE:
|
||||
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_PFP:
|
||||
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header);
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
amdgpu_ucode_print_smc_hdr(&smc_hdr->header);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
|
||||
struct psp_gfx_cmd_resp *cmd)
|
||||
{
|
||||
|
@ -1019,14 +1076,19 @@ out:
|
|||
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
|
||||
/* skip mec JT when autoload is enabled */
|
||||
continue;
|
||||
/* Renoir only needs to load mec jump table one time */
|
||||
if (adev->asic_type == CHIP_RENOIR &&
|
||||
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)
|
||||
continue;
|
||||
|
||||
psp_print_fw_hdr(psp, ucode);
|
||||
|
||||
ret = psp_execute_np_fw_load(psp, ucode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Start rlc autoload after psp recieved all the gfx firmware */
|
||||
if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM ||
|
||||
(adev->asic_type == CHIP_NAVI12 && ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) {
|
||||
if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
|
||||
ret = psp_rlc_autoload(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to start rlc autoload\n");
|
||||
|
@ -1154,7 +1216,7 @@ static int psp_hw_fini(void *handle)
|
|||
|
||||
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, NULL);
|
||||
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
||||
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
||||
|
@ -1358,3 +1420,12 @@ const struct amdgpu_ip_block_version psp_v11_0_ip_block =
|
|||
.rev = 0,
|
||||
.funcs = &psp_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version psp_v12_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_PSP,
|
||||
.major = 12,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &psp_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -90,7 +90,6 @@ struct psp_funcs
|
|||
int (*ring_destroy)(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type);
|
||||
int (*cmd_submit)(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||
int index);
|
||||
bool (*compare_sram_data)(struct psp_context *psp,
|
||||
|
@ -172,7 +171,6 @@ struct psp_context
|
|||
/* tmr buffer */
|
||||
struct amdgpu_bo *tmr_bo;
|
||||
uint64_t tmr_mc_addr;
|
||||
void *tmr_buf;
|
||||
|
||||
/* asd firmware and buffer */
|
||||
const struct firmware *asd_fw;
|
||||
|
@ -223,8 +221,8 @@ struct amdgpu_psp_funcs {
|
|||
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
|
||||
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
|
||||
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
|
||||
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
|
||||
(psp)->funcs->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
|
||||
#define psp_cmd_submit(psp, cmd_mc, fence_mc, index) \
|
||||
(psp)->funcs->cmd_submit((psp), (cmd_mc), (fence_mc), (index))
|
||||
#define psp_compare_sram_data(psp, ucode, type) \
|
||||
(psp)->funcs->compare_sram_data((psp), (ucode), (type))
|
||||
#define psp_init_microcode(psp) \
|
||||
|
@ -270,6 +268,7 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
|||
uint32_t field_val, uint32_t mask, bool check_changed);
|
||||
|
||||
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
|
||||
|
||||
int psp_gpu_reset(struct amdgpu_device *adev);
|
||||
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
|
||||
|
|
|
@ -131,6 +131,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
|||
char err[9] = "ue";
|
||||
int op = -1;
|
||||
int block_id;
|
||||
uint32_t sub_block;
|
||||
u64 address, value;
|
||||
|
||||
if (*pos)
|
||||
|
@ -169,11 +170,12 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
|||
data->op = op;
|
||||
|
||||
if (op == 2) {
|
||||
if (sscanf(str, "%*s %*s %*s %llu %llu",
|
||||
&address, &value) != 2)
|
||||
if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
|
||||
&address, &value) != 2)
|
||||
if (sscanf(str, "%*s %*s %*s %u %llu %llu",
|
||||
&sub_block, &address, &value) != 3)
|
||||
if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
|
||||
&sub_block, &address, &value) != 3)
|
||||
return -EINVAL;
|
||||
data->head.sub_block_index = sub_block;
|
||||
data->inject.address = address;
|
||||
data->inject.value = value;
|
||||
}
|
||||
|
@ -218,7 +220,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
|||
* write the struct to the control node.
|
||||
*
|
||||
* bash:
|
||||
* echo op block [error [address value]] > .../ras/ras_ctrl
|
||||
* echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
|
||||
* op: disable, enable, inject
|
||||
* disable: only block is needed
|
||||
* enable: block and error are needed
|
||||
|
@ -228,10 +230,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
|||
* error: ue, ce
|
||||
* ue: multi_uncorrectable
|
||||
* ce: single_correctable
|
||||
* sub_block: sub block index, pass 0 if there is no sub block
|
||||
*
|
||||
* here are some examples for bash commands,
|
||||
* echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||
* echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
|
||||
*
|
||||
* How to check the result?
|
||||
|
@ -611,6 +614,10 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
|
|||
if (adev->gfx.funcs->query_ras_error_count)
|
||||
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
|
||||
break;
|
||||
case AMDGPU_RAS_BLOCK__MMHUB:
|
||||
if (adev->mmhub_funcs->query_ras_error_count)
|
||||
adev->mmhub_funcs->query_ras_error_count(adev, &err_data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -656,6 +663,7 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
|
|||
ret = -EINVAL;
|
||||
break;
|
||||
case AMDGPU_RAS_BLOCK__UMC:
|
||||
case AMDGPU_RAS_BLOCK__MMHUB:
|
||||
ret = psp_ras_trigger_error(&adev->psp, &block_info);
|
||||
break;
|
||||
default:
|
||||
|
@ -680,7 +688,7 @@ int amdgpu_ras_error_cure(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/* get the total error counts on all IPs */
|
||||
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||
bool is_ce)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
@ -688,7 +696,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
|||
struct ras_err_data data = {0, 0};
|
||||
|
||||
if (!con)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(obj, &con->head, node) {
|
||||
struct ras_query_if info = {
|
||||
|
@ -696,7 +704,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
|||
};
|
||||
|
||||
if (amdgpu_ras_error_query(adev, &info))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
data.ce_count += info.ce_count;
|
||||
data.ue_count += info.ue_count;
|
||||
|
@ -785,25 +793,8 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
|
|||
{
|
||||
struct amdgpu_ras *con =
|
||||
container_of(attr, struct amdgpu_ras, features_attr);
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
struct ras_common_if head;
|
||||
int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
|
||||
int i, enabled;
|
||||
ssize_t s;
|
||||
|
||||
s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
|
||||
|
||||
for (i = 0; i < ras_block_count; i++) {
|
||||
head.block = i;
|
||||
enabled = amdgpu_ras_is_feature_enabled(adev, &head);
|
||||
|
||||
s += scnprintf(&buf[s], PAGE_SIZE - s,
|
||||
"%s ras feature mask: %s\n",
|
||||
ras_block_str(i), enabled?"on":"off");
|
||||
}
|
||||
|
||||
return s;
|
||||
return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
|
||||
}
|
||||
|
||||
static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
|
||||
|
|
|
@ -484,7 +484,7 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
|
|||
void amdgpu_ras_resume(struct amdgpu_device *adev);
|
||||
void amdgpu_ras_suspend(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||
bool is_ce);
|
||||
|
||||
/* error handling functions */
|
||||
|
|
|
@ -1745,7 +1745,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
r = ttm_bo_device_init(&adev->mman.bdev,
|
||||
&amdgpu_bo_driver,
|
||||
adev->ddev->anon_inode->i_mapping,
|
||||
adev->need_dma32);
|
||||
dma_addressing_limited(adev->dev));
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
|
|
|
@ -83,8 +83,8 @@ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
|
|||
const struct smc_firmware_header_v2_0 *v2_hdr =
|
||||
container_of(v1_hdr, struct smc_firmware_header_v2_0, v1_0);
|
||||
|
||||
DRM_INFO("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_offset_bytes));
|
||||
DRM_INFO("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_size_bytes));
|
||||
DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_offset_bytes));
|
||||
DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_size_bytes));
|
||||
} else {
|
||||
DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
|
@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||
case CHIP_RAVEN:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_RENOIR:
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
|
@ -369,6 +370,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||
return AMDGPU_FW_LOAD_PSP;
|
||||
case CHIP_ARCTURUS:
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Unknown firmware load type\n");
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
|
||||
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
|
||||
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
|
||||
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
|
||||
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
|
||||
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
|
||||
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
|
||||
|
@ -55,6 +56,7 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
|||
MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
|
||||
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
|
||||
MODULE_FIRMWARE(FIRMWARE_RENOIR);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI10);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI14);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI12);
|
||||
|
@ -83,6 +85,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
case CHIP_ARCTURUS:
|
||||
fw_name = FIRMWARE_ARCTURUS;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
fw_name = FIRMWARE_RENOIR;
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
fw_name = FIRMWARE_NAVI10;
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
|
|
|
@ -2863,6 +2863,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
|
|||
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
||||
"CPU update of VM recommended only for large BAR system\n");
|
||||
|
||||
if (vm->use_cpu_for_update)
|
||||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||
else
|
||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||
dma_fence_put(vm->last_update);
|
||||
vm->last_update = NULL;
|
||||
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "athub_v1_0.h"
|
||||
|
||||
#include "athub/athub_1_0_offset.h"
|
||||
#include "athub/athub_1_0_sh_mask.h"
|
||||
#include "vega10_enum.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
|
||||
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
else
|
||||
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
|
||||
}
|
||||
|
||||
static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
|
||||
(adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
|
||||
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
else
|
||||
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
|
||||
if(def != data)
|
||||
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
|
||||
}
|
||||
|
||||
int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
athub_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
athub_update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
|
||||
{
|
||||
int data;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
/* AMD_CG_SUPPORT_ATHUB_MGCG */
|
||||
data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_ATHUB_LS */
|
||||
if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_ATHUB_LS;
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __ATHUB_V1_0_H__
|
||||
#define __ATHUB_V1_0_H__
|
||||
|
||||
int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state);
|
||||
void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
|
||||
|
||||
#endif
|
|
@ -75,6 +75,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
|
|||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
athub_v2_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
athub_v2_0_update_medium_grain_light_sleep(adev,
|
||||
|
|
|
@ -236,6 +236,7 @@ static void dce_v10_0_page_flip(struct amdgpu_device *adev,
|
|||
int crtc_id, u64 crtc_base, bool async)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
|
||||
u32 tmp;
|
||||
|
||||
/* flip at hsync for async, default is vsync */
|
||||
|
@ -243,6 +244,9 @@ static void dce_v10_0_page_flip(struct amdgpu_device *adev,
|
|||
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
|
||||
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
|
||||
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
/* update pitch */
|
||||
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
|
||||
fb->pitches[0] / fb->format->cpp[0]);
|
||||
/* update the primary scanout address */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
|
|
|
@ -254,6 +254,7 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
|
|||
int crtc_id, u64 crtc_base, bool async)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
|
||||
u32 tmp;
|
||||
|
||||
/* flip immediate for async, default is vsync */
|
||||
|
@ -261,6 +262,9 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
|
|||
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
|
||||
GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
|
||||
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
/* update pitch */
|
||||
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
|
||||
fb->pitches[0] / fb->format->cpp[0]);
|
||||
/* update the scanout addresses */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
|
|
|
@ -191,10 +191,14 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
|
|||
int crtc_id, u64 crtc_base, bool async)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
|
||||
|
||||
/* flip at hsync for async, default is vsync */
|
||||
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
|
||||
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
|
||||
/* update pitch */
|
||||
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
|
||||
fb->pitches[0] / fb->format->cpp[0]);
|
||||
/* update the scanout addresses */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
|
|
|
@ -184,10 +184,14 @@ static void dce_v8_0_page_flip(struct amdgpu_device *adev,
|
|||
int crtc_id, u64 crtc_base, bool async)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
|
||||
|
||||
/* flip at hsync for async, default is vsync */
|
||||
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
|
||||
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
|
||||
/* update pitch */
|
||||
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
|
||||
fb->pitches[0] / fb->format->cpp[0]);
|
||||
/* update the primary scanout addresses */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
|
|
|
@ -458,6 +458,7 @@ static int dce_virtual_hw_init(void *handle)
|
|||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_RENOIR:
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
|
|
|
@ -1748,9 +1748,12 @@ static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
gfx_v10_0_init_csb(adev);
|
||||
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
|
||||
/* TODO: init power gating */
|
||||
return;
|
||||
|
@ -4373,15 +4376,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
/* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
|
||||
* This resets the wave ID counters. (needed by transform feedback)
|
||||
* TODO: This might only be needed on a VMID switch when we change
|
||||
* the GDS OA mapping, not sure.
|
||||
*/
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
|
||||
else
|
||||
|
@ -5128,7 +5122,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
|||
5 + /* HDP_INVL */
|
||||
8 + 8 + /* FENCE x2 */
|
||||
2, /* SWITCH_BUFFER */
|
||||
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
|
||||
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v10_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
|
||||
|
@ -5281,7 +5275,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
|
|||
default:
|
||||
adev->gds.gds_size = 0x10000;
|
||||
adev->gds.gds_compute_max_wave_id = 0x4ff;
|
||||
adev->gds.vgt_gs_max_wave_id = 0x3ff;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
|
||||
#include "gc/gc_9_0_offset.h"
|
||||
#include "gc/gc_9_0_sh_mask.h"
|
||||
|
||||
#include "vega10_enum.h"
|
||||
#include "hdp/hdp_4_0_offset.h"
|
||||
|
||||
#include "soc15.h"
|
||||
#include "soc15_common.h"
|
||||
#include "clearstate_gfx9.h"
|
||||
#include "v9_structs.h"
|
||||
|
@ -60,6 +60,9 @@
|
|||
#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
|
||||
#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
|
||||
|
||||
#define mmGCEA_PROBE_MAP 0x070c
|
||||
#define mmGCEA_PROBE_MAP_BASE_IDX 0
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
|
||||
|
@ -108,6 +111,13 @@ MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
|
|||
MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
|
||||
|
||||
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
|
||||
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
|
||||
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
|
||||
|
@ -611,6 +621,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
|
||||
|
@ -755,6 +781,11 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
golden_settings_gc_9_1_rv1,
|
||||
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_9_1_rn,
|
||||
ARRAY_SIZE(golden_settings_gc_9_1_rn));
|
||||
return; /* for renoir, don't need common goldensetting */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1008,6 +1039,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
|||
(adev->gfx.rlc_feature_version < 1) ||
|
||||
!adev->gfx.rlc.is_rlc_v2_1)
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -1343,6 +1378,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_ARCTURUS:
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -1602,7 +1640,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
|
||||
/* TODO: double check the cp_table_size for RV */
|
||||
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
|
||||
r = amdgpu_gfx_rlc_init_cpt(adev);
|
||||
|
@ -1612,6 +1650,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
gfx_v9_0_init_lbpw(adev);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
|
@ -1863,6 +1902,16 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
gb_addr_config &= ~0xf3e777ff;
|
||||
gb_addr_config |= 0x22014042;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
|
||||
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
|
||||
gb_addr_config &= ~0xf3e777ff;
|
||||
gb_addr_config |= 0x22010042;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
|
@ -2140,6 +2189,7 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_RENOIR:
|
||||
adev->gfx.mec.num_mec = 2;
|
||||
break;
|
||||
default:
|
||||
|
@ -2297,7 +2347,7 @@ static int gfx_v9_0_sw_fini(void *handle)
|
|||
gfx_v9_0_mec_fini(adev);
|
||||
gfx_v9_0_ngg_fini(adev);
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
|
@ -2976,6 +3026,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
if (amdgpu_lbpw == 0)
|
||||
gfx_v9_0_enable_lbpw(adev, false);
|
||||
else
|
||||
|
@ -4511,6 +4562,9 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
|
|||
{
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||
|
||||
if (is_support_sw_smu(adev) && !enable)
|
||||
smu_set_gfx_cgpg(&adev->smu, enable);
|
||||
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
|
||||
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
|
||||
|
@ -4622,6 +4676,9 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
|
|||
{
|
||||
uint32_t data, def;
|
||||
|
||||
if (adev->asic_type == CHIP_ARCTURUS)
|
||||
return;
|
||||
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||
|
||||
/* Enable 3D CGCG/CGLS */
|
||||
|
@ -4687,8 +4744,12 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
|
|||
/* enable cgcg FSM(0x0000363F) */
|
||||
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
|
||||
|
||||
data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
|
||||
if (adev->asic_type == CHIP_ARCTURUS)
|
||||
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
|
||||
else
|
||||
data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
|
||||
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
|
||||
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
|
||||
|
@ -4760,6 +4821,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
if (!enable) {
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
|
||||
|
@ -4778,6 +4840,8 @@ static int gfx_v9_0_set_powergating_state(void *handle,
|
|||
gfx_v9_0_enable_cp_power_gating(adev, false);
|
||||
|
||||
/* update gfx cgpg state */
|
||||
if (is_support_sw_smu(adev) && enable)
|
||||
smu_set_gfx_cgpg(&adev->smu, enable);
|
||||
gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
|
||||
|
||||
/* update mgcg state */
|
||||
|
@ -4814,6 +4878,8 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
|
|||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_RENOIR:
|
||||
gfx_v9_0_update_gfx_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
|
@ -5396,7 +5462,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
|
|||
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
||||
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
||||
WREG32(mmSQ_CMD, value);
|
||||
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
|
@ -5978,6 +6044,9 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
|
|||
if (adev->asic_type != CHIP_VEGA20)
|
||||
return -EINVAL;
|
||||
|
||||
if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
|
||||
return -EINVAL;
|
||||
|
||||
if (!ras_gfx_subblocks[info->head.sub_block_index].name)
|
||||
return -EPERM;
|
||||
|
||||
|
@ -6285,6 +6354,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_RENOIR:
|
||||
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -140,7 +140,7 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
|
|||
/* XXX for emulation, Refer to closed source code.*/
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
|
||||
L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
|
||||
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
|
||||
|
|
|
@ -141,17 +141,40 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
if (printk_ratelimit()) {
|
||||
struct amdgpu_task_info task_info;
|
||||
|
||||
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
|
||||
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
|
||||
|
||||
dev_err(adev->dev,
|
||||
"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
|
||||
"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
|
||||
"for process %s pid %d thread %s pid %d)\n",
|
||||
entry->vmid_src ? "mmhub" : "gfxhub",
|
||||
entry->src_id, entry->ring_id, entry->vmid,
|
||||
entry->pasid);
|
||||
dev_err(adev->dev, " at page 0x%016llx from %d\n",
|
||||
entry->pasid, task_info.process_name, task_info.tgid,
|
||||
task_info.task_name, task_info.pid);
|
||||
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
|
||||
addr, entry->client_id);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
dev_err(adev->dev,
|
||||
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
|
||||
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
|
||||
status);
|
||||
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
|
||||
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
|
||||
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
|
||||
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
|
||||
dev_err(adev->dev, "\t RW: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
GCVM_L2_PROTECTION_FAULT_STATUS, RW));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -230,8 +253,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
|||
*
|
||||
* Flush the TLB for the requested page table.
|
||||
*/
|
||||
static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
uint32_t vmid, uint32_t flush_type)
|
||||
static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type)
|
||||
{
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct dma_fence *fence;
|
||||
|
@ -244,7 +267,14 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
|||
|
||||
mutex_lock(&adev->mman.gtt_window_lock);
|
||||
|
||||
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
|
||||
if (vmhub == AMDGPU_MMHUB_0) {
|
||||
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
|
||||
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(vmhub != AMDGPU_GFXHUB_0);
|
||||
|
||||
if (!adev->mman.buffer_funcs_enabled ||
|
||||
!adev->ib_pool_ready ||
|
||||
adev->in_gpu_reset) {
|
||||
|
@ -592,7 +622,6 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v10_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
gfxhub_v2_0_init(adev);
|
||||
|
@ -642,26 +671,10 @@ static int gmc_v10_0_sw_init(void *handle)
|
|||
else
|
||||
adev->gmc.stolen_size = 9 * 1024 *1024;
|
||||
|
||||
/*
|
||||
* Set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 44-bits.
|
||||
* IGP - can handle 44-bits
|
||||
* PCI - dma32 for legacy pci gart, 44 bits on navi10
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 44;
|
||||
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
|
||||
}
|
||||
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = gmc_v10_0_mc_init(adev);
|
||||
|
@ -773,7 +786,8 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
|
|||
|
||||
gfxhub_v2_0_set_fault_enable_default(adev, value);
|
||||
mmhub_v2_0_set_fault_enable_default(adev, value);
|
||||
gmc_v10_0_flush_gpu_tlb(adev, 0, 0);
|
||||
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
|
||||
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
|
||||
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
|
|
|
@ -362,8 +362,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
uint32_t vmid, uint32_t flush_type)
|
||||
static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type)
|
||||
{
|
||||
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
|
||||
}
|
||||
|
@ -571,7 +571,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
|||
else
|
||||
gmc_v6_0_set_fault_enable_default(adev, true);
|
||||
|
||||
gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
|
||||
gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
|
||||
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)table_addr);
|
||||
|
@ -839,9 +839,10 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v6_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->num_vmhubs = 1;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
|
@ -862,20 +863,12 @@ static int gmc_v6_0_sw_init(void *handle)
|
|||
|
||||
adev->gmc.mc_mask = 0xffffffffffULL;
|
||||
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
r = gmc_v6_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
|
|
@ -433,8 +433,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
|||
*
|
||||
* Flush the TLB for the requested page table (CIK).
|
||||
*/
|
||||
static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
uint32_t vmid, uint32_t flush_type)
|
||||
static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type)
|
||||
{
|
||||
/* bits 0-15 are the VM contexts0-15 */
|
||||
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
|
||||
|
@ -677,7 +677,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
|||
WREG32(mmCHUB_CONTROL, tmp);
|
||||
}
|
||||
|
||||
gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
|
||||
gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)table_addr);
|
||||
|
@ -959,9 +959,10 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v7_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->num_vmhubs = 1;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
|
@ -990,25 +991,12 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
* IGP - can handle 40-bits
|
||||
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
pr_warn("amdgpu: No suitable DMA available\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
pr_warn("amdgpu: No coherent DMA available\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
|
||||
r = gmc_v7_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
|
|
@ -635,8 +635,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
|||
*
|
||||
* Flush the TLB for the requested page table (VI).
|
||||
*/
|
||||
static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
uint32_t vmid, uint32_t flush_type)
|
||||
static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type)
|
||||
{
|
||||
/* bits 0-15 are the VM contexts0-15 */
|
||||
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
|
||||
|
@ -921,7 +921,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
|||
else
|
||||
gmc_v8_0_set_fault_enable_default(adev, true);
|
||||
|
||||
gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
|
||||
gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)table_addr);
|
||||
|
@ -1079,9 +1079,10 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v8_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->num_vmhubs = 1;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
|
@ -1116,25 +1117,12 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
* IGP - can handle 40-bits
|
||||
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
pr_warn("amdgpu: No suitable DMA available\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
pr_warn("amdgpu: No coherent DMA available\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
|
||||
r = gmc_v8_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
|
||||
#include "gfxhub_v1_0.h"
|
||||
#include "mmhub_v1_0.h"
|
||||
#include "athub_v1_0.h"
|
||||
#include "gfxhub_v1_1.h"
|
||||
#include "mmhub_v9_4.h"
|
||||
#include "umc_v6_1.h"
|
||||
|
@ -266,7 +267,7 @@ static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
struct ras_common_if *ras_if = adev->gmc.ras_if;
|
||||
struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
|
||||
struct ras_dispatch_if ih_data = {
|
||||
.entry = entry,
|
||||
};
|
||||
|
@ -390,6 +391,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
|||
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
|
||||
dev_err(adev->dev, "\t RW: 0x%lx\n",
|
||||
REG_GET_FIELD(status,
|
||||
VM_L2_PROTECTION_FAULT_STATUS, RW));
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -452,44 +456,45 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
|||
*
|
||||
* Flush the TLB for the requested page table using certain type.
|
||||
*/
|
||||
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
|
||||
uint32_t vmid, uint32_t flush_type)
|
||||
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type)
|
||||
{
|
||||
const unsigned eng = 17;
|
||||
unsigned i, j;
|
||||
u32 j, tmp;
|
||||
struct amdgpu_vmhub *hub;
|
||||
|
||||
for (i = 0; i < adev->num_vmhubs; ++i) {
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[i];
|
||||
u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
|
||||
BUG_ON(vmhub >= adev->num_vmhubs);
|
||||
|
||||
/* This is necessary for a HW workaround under SRIOV as well
|
||||
* as GFXOFF under bare metal
|
||||
*/
|
||||
if (adev->gfx.kiq.ring.sched.ready &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
||||
!adev->in_gpu_reset) {
|
||||
uint32_t req = hub->vm_inv_eng0_req + eng;
|
||||
uint32_t ack = hub->vm_inv_eng0_ack + eng;
|
||||
hub = &adev->vmhub[vmhub];
|
||||
tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
|
||||
|
||||
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
|
||||
1 << vmid);
|
||||
continue;
|
||||
}
|
||||
/* This is necessary for a HW workaround under SRIOV as well
|
||||
* as GFXOFF under bare metal
|
||||
*/
|
||||
if (adev->gfx.kiq.ring.sched.ready &&
|
||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
||||
!adev->in_gpu_reset) {
|
||||
uint32_t req = hub->vm_inv_eng0_req + eng;
|
||||
uint32_t ack = hub->vm_inv_eng0_ack + eng;
|
||||
|
||||
spin_lock(&adev->gmc.invalidate_lock);
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
|
||||
if (tmp & (1 << vmid))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
spin_unlock(&adev->gmc.invalidate_lock);
|
||||
if (j < adev->usec_timeout)
|
||||
continue;
|
||||
|
||||
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
|
||||
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
|
||||
1 << vmid);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&adev->gmc.invalidate_lock);
|
||||
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
|
||||
if (tmp & (1 << vmid))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
spin_unlock(&adev->gmc.invalidate_lock);
|
||||
if (j < adev->usec_timeout)
|
||||
return;
|
||||
|
||||
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
|
||||
}
|
||||
|
||||
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
|
@ -656,6 +661,17 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
adev->mmhub_funcs = &mmhub_v1_0_funcs;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int gmc_v9_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -663,6 +679,7 @@ static int gmc_v9_0_early_init(void *handle)
|
|||
gmc_v9_0_set_gmc_funcs(adev);
|
||||
gmc_v9_0_set_irq_funcs(adev);
|
||||
gmc_v9_0_set_umc_funcs(adev);
|
||||
gmc_v9_0_set_mmhub_funcs(adev);
|
||||
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
|
@ -690,6 +707,7 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA10:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_RENOIR:
|
||||
return true;
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
|
@ -728,27 +746,25 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_ecc_late_init(void *handle)
|
||||
static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
|
||||
struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ras_common_if **ras_if = &adev->gmc.ras_if;
|
||||
struct ras_common_if **ras_if = NULL;
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = gmc_v9_0_process_ras_data_cb,
|
||||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "umc_err_count",
|
||||
.debugfs_name = "umc_err_inject",
|
||||
};
|
||||
struct ras_common_if ras_block = {
|
||||
.block = AMDGPU_RAS_BLOCK__UMC,
|
||||
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||
.sub_block_index = 0,
|
||||
.name = "umc",
|
||||
};
|
||||
int r;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
|
||||
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
|
||||
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
|
||||
ras_if = &adev->gmc.umc_ras_if;
|
||||
else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
|
||||
ras_if = &adev->gmc.mmhub_ras_if;
|
||||
else
|
||||
BUG();
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -763,7 +779,7 @@ static int gmc_v9_0_ecc_late_init(void *handle)
|
|||
if (r == -EAGAIN) {
|
||||
/* request a gpu reset. will run again. */
|
||||
amdgpu_ras_request_reset_on_boot(adev,
|
||||
AMDGPU_RAS_BLOCK__UMC);
|
||||
ras_block->block);
|
||||
return 0;
|
||||
}
|
||||
/* fail to enable ras, cleanup all. */
|
||||
|
@ -777,41 +793,46 @@ static int gmc_v9_0_ecc_late_init(void *handle)
|
|||
if (!*ras_if)
|
||||
return -ENOMEM;
|
||||
|
||||
**ras_if = ras_block;
|
||||
**ras_if = *ras_block;
|
||||
|
||||
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
|
||||
if (r) {
|
||||
if (r == -EAGAIN) {
|
||||
amdgpu_ras_request_reset_on_boot(adev,
|
||||
AMDGPU_RAS_BLOCK__UMC);
|
||||
ras_block->block);
|
||||
r = 0;
|
||||
}
|
||||
goto feature;
|
||||
}
|
||||
|
||||
ih_info.head = **ras_if;
|
||||
fs_info.head = **ras_if;
|
||||
fs_info->head = **ras_if;
|
||||
|
||||
r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
|
||||
if (r)
|
||||
goto interrupt;
|
||||
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
|
||||
r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
|
||||
if (r)
|
||||
goto interrupt;
|
||||
}
|
||||
|
||||
amdgpu_ras_debugfs_create(adev, &fs_info);
|
||||
amdgpu_ras_debugfs_create(adev, fs_info);
|
||||
|
||||
r = amdgpu_ras_sysfs_create(adev, &fs_info);
|
||||
r = amdgpu_ras_sysfs_create(adev, fs_info);
|
||||
if (r)
|
||||
goto sysfs;
|
||||
resume:
|
||||
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||
if (r)
|
||||
goto irq;
|
||||
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
|
||||
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||
if (r)
|
||||
goto irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
irq:
|
||||
amdgpu_ras_sysfs_remove(adev, *ras_if);
|
||||
sysfs:
|
||||
amdgpu_ras_debugfs_remove(adev, *ras_if);
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
interrupt:
|
||||
amdgpu_ras_feature_enable(adev, *ras_if, 0);
|
||||
feature:
|
||||
|
@ -820,6 +841,40 @@ feature:
|
|||
return r;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_ecc_late_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
|
||||
struct ras_fs_if umc_fs_info = {
|
||||
.sysfs_name = "umc_err_count",
|
||||
.debugfs_name = "umc_err_inject",
|
||||
};
|
||||
struct ras_common_if umc_ras_block = {
|
||||
.block = AMDGPU_RAS_BLOCK__UMC,
|
||||
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||
.sub_block_index = 0,
|
||||
.name = "umc",
|
||||
};
|
||||
struct ras_fs_if mmhub_fs_info = {
|
||||
.sysfs_name = "mmhub_err_count",
|
||||
.debugfs_name = "mmhub_err_inject",
|
||||
};
|
||||
struct ras_common_if mmhub_ras_block = {
|
||||
.block = AMDGPU_RAS_BLOCK__MMHUB,
|
||||
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||
.sub_block_index = 0,
|
||||
.name = "mmhub",
|
||||
};
|
||||
|
||||
r = gmc_v9_0_ecc_ras_block_late_init(handle,
|
||||
&umc_fs_info, &umc_ras_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = gmc_v9_0_ecc_ras_block_late_init(handle,
|
||||
&mmhub_fs_info, &mmhub_ras_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_late_init(void *handle)
|
||||
{
|
||||
|
@ -869,18 +924,17 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
|||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 base = 0;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
if (adev->asic_type == CHIP_ARCTURUS)
|
||||
base = mmhub_v9_4_get_fb_location(adev);
|
||||
else
|
||||
base = mmhub_v1_0_get_fb_location(adev);
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_ARCTURUS)
|
||||
base = mmhub_v9_4_get_fb_location(adev);
|
||||
else if (!amdgpu_sriov_vf(adev))
|
||||
base = mmhub_v1_0_get_fb_location(adev);
|
||||
|
||||
/* add the xgmi offset of the physical node */
|
||||
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
|
||||
amdgpu_gmc_vram_location(adev, mc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_gmc_agp_location(adev, mc);
|
||||
amdgpu_gmc_agp_location(adev, mc);
|
||||
/* base offset of vram pages */
|
||||
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
|
||||
|
||||
|
@ -959,6 +1013,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
|||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
case CHIP_RAVEN: /* DCE SG support */
|
||||
case CHIP_RENOIR:
|
||||
adev->gmc.gart_size = 1024ULL << 20;
|
||||
break;
|
||||
}
|
||||
|
@ -1009,6 +1064,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
|
||||
size = (REG_GET_FIELD(viewport,
|
||||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
|
||||
|
@ -1037,7 +1093,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v9_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
gfxhub_v1_0_init(adev);
|
||||
|
@ -1065,8 +1120,10 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_RENOIR:
|
||||
adev->num_vmhubs = 2;
|
||||
|
||||
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
* vm size is 256TB (48bit), maximum size of Vega10,
|
||||
|
@ -1119,25 +1176,12 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 44-bits.
|
||||
* IGP - can handle 44-bits
|
||||
* PCI - dma32 for legacy pci gart, 44 bits on vega10
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 44;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = gfxhub_v1_1_get_xgmi_info(adev);
|
||||
|
@ -1180,21 +1224,32 @@ static int gmc_v9_0_sw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
|
||||
adev->gmc.ras_if) {
|
||||
struct ras_common_if *ras_if = adev->gmc.ras_if;
|
||||
adev->gmc.umc_ras_if) {
|
||||
struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.head = *ras_if,
|
||||
};
|
||||
|
||||
/*remove fs first*/
|
||||
/* remove fs first */
|
||||
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||
/*remove the IH*/
|
||||
/* remove the IH */
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||
kfree(ras_if);
|
||||
}
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
|
||||
adev->gmc.mmhub_ras_if) {
|
||||
struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
|
||||
|
||||
/* remove fs and disable ras feature */
|
||||
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||
kfree(ras_if);
|
||||
}
|
||||
|
||||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
|
||||
|
@ -1227,6 +1282,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA12:
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
/* TODO for renoir */
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_athub_1_0_0,
|
||||
ARRAY_SIZE(golden_settings_athub_1_0_0));
|
||||
|
@ -1243,7 +1299,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
*/
|
||||
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
int r, i;
|
||||
bool value;
|
||||
u32 tmp;
|
||||
|
||||
|
@ -1261,6 +1317,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
/* TODO for renoir */
|
||||
mmhub_v1_0_update_power_gating(adev, true);
|
||||
break;
|
||||
default:
|
||||
|
@ -1299,7 +1356,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||
mmhub_v9_4_set_fault_enable_default(adev, value);
|
||||
else
|
||||
mmhub_v1_0_set_fault_enable_default(adev, value);
|
||||
gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
|
||||
|
||||
for (i = 0; i < adev->num_vmhubs; ++i)
|
||||
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
|
||||
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
|
@ -1408,9 +1467,13 @@ static int gmc_v9_0_set_clockgating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->asic_type == CHIP_ARCTURUS)
|
||||
return 0;
|
||||
mmhub_v9_4_set_clockgating(adev, state);
|
||||
else
|
||||
mmhub_v1_0_set_clockgating(adev, state);
|
||||
|
||||
return mmhub_v1_0_set_clockgating(adev, state);
|
||||
athub_v1_0_set_clockgating(adev, state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
|
@ -1418,9 +1481,11 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->asic_type == CHIP_ARCTURUS)
|
||||
return;
|
||||
mmhub_v9_4_get_clockgating(adev, flags);
|
||||
else
|
||||
mmhub_v1_0_get_clockgating(adev, flags);
|
||||
|
||||
mmhub_v1_0_get_clockgating(adev, flags);
|
||||
athub_v1_0_get_clockgating(adev, flags);
|
||||
}
|
||||
|
||||
static int gmc_v9_0_set_powergating_state(void *handle,
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "mmhub_v1_0.h"
|
||||
|
||||
#include "mmhub/mmhub_1_0_offset.h"
|
||||
#include "mmhub/mmhub_1_0_sh_mask.h"
|
||||
#include "mmhub/mmhub_1_0_default.h"
|
||||
#include "athub/athub_1_0_offset.h"
|
||||
#include "athub/athub_1_0_sh_mask.h"
|
||||
#include "mmhub/mmhub_9_4_0_offset.h"
|
||||
#include "vega10_enum.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
@ -35,6 +35,9 @@
|
|||
#define mmDAGB0_CNTL_MISC2_RV 0x008f
|
||||
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
|
||||
|
||||
#define EA_EDC_CNT_MASK 0x3
|
||||
#define EA_EDC_CNT_SHIFT 0x2
|
||||
|
||||
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
|
||||
{
|
||||
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
|
||||
|
@ -491,22 +494,6 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
|||
WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
|
||||
}
|
||||
|
||||
static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
|
||||
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
else
|
||||
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
|
||||
}
|
||||
|
||||
static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
|
@ -523,23 +510,6 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
|||
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
|
||||
}
|
||||
|
||||
static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
|
||||
(adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
|
||||
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
else
|
||||
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
|
||||
if(def != data)
|
||||
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
|
||||
}
|
||||
|
||||
int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
|
@ -551,14 +521,11 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
|||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
mmhub_v1_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
athub_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
mmhub_v1_0_update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
athub_update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -569,18 +536,85 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
|
|||
|
||||
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
|
||||
{
|
||||
int data;
|
||||
int data, data1;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
|
||||
|
||||
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||
data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
|
||||
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
|
||||
if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
|
||||
!(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_LS */
|
||||
data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
|
||||
if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||
}
|
||||
|
||||
static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
int i;
|
||||
uint32_t ea0_edc_cnt, ea0_edc_cnt2;
|
||||
uint32_t ea1_edc_cnt, ea1_edc_cnt2;
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
|
||||
/* EDC CNT will be cleared automatically after read */
|
||||
ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20);
|
||||
ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20);
|
||||
ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20);
|
||||
ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20);
|
||||
|
||||
/* error count of each error type is recorded by 2 bits,
|
||||
* ce and ue count in EDC_CNT
|
||||
*/
|
||||
for (i = 0; i < 5; i++) {
|
||||
err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
|
||||
err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
|
||||
ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
|
||||
ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
|
||||
err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
|
||||
err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
|
||||
ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
|
||||
ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
|
||||
}
|
||||
/* successive ue count in EDC_CNT */
|
||||
for (i = 0; i < 5; i++) {
|
||||
err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK);
|
||||
err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK);
|
||||
ea0_edc_cnt >>= EA_EDC_CNT_SHIFT;
|
||||
ea1_edc_cnt >>= EA_EDC_CNT_SHIFT;
|
||||
}
|
||||
|
||||
/* ce and ue count in EDC_CNT2 */
|
||||
for (i = 0; i < 3; i++) {
|
||||
err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
|
||||
err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
|
||||
ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
|
||||
ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
|
||||
err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
|
||||
err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
|
||||
ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
|
||||
ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
|
||||
}
|
||||
/* successive ue count in EDC_CNT2 */
|
||||
for (i = 0; i < 6; i++) {
|
||||
err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK);
|
||||
err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK);
|
||||
ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
|
||||
ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
|
||||
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
|
||||
};
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#ifndef __MMHUB_V1_0_H__
|
||||
#define __MMHUB_V1_0_H__
|
||||
|
||||
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
|
||||
|
||||
u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
|
||||
int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
|
||||
void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
|
||||
|
|
|
@ -126,7 +126,7 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
|
|||
/* XXX for emulation, Refer to closed source code.*/
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
||||
0);
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
|
||||
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp);
|
||||
|
@ -407,6 +407,7 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
|
|||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
mmhub_v2_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
mmhub_v2_0_update_medium_grain_light_sleep(adev,
|
||||
|
|
|
@ -515,3 +515,128 @@ void mmhub_v9_4_init(struct amdgpu_device *adev)
|
|||
i * MMHUB_INSTANCE_REGISTER_OFFSET;
|
||||
}
|
||||
}
|
||||
|
||||
static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data, def1, data1;
|
||||
int i, j;
|
||||
int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
|
||||
|
||||
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
|
||||
def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmATCL2_0_ATC_L2_MISC_CG,
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
|
||||
data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
else
|
||||
data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
|
||||
|
||||
for (j = 0; j < 5; j++) {
|
||||
def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmDAGB0_CNTL_MISC2,
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET +
|
||||
j * dist);
|
||||
if (enable &&
|
||||
(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
|
||||
data1 &=
|
||||
~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
||||
} else {
|
||||
data1 |=
|
||||
(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
||||
}
|
||||
|
||||
if (def1 != data1)
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmDAGB0_CNTL_MISC2,
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET +
|
||||
j * dist, data1);
|
||||
|
||||
if (i == 1 && j == 3)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
|
||||
def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmATCL2_0_ATC_L2_MISC_CG,
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
|
||||
data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
else
|
||||
data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
|
||||
i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
|
||||
}
|
||||
}
|
||||
|
||||
int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ARCTURUS:
|
||||
mmhub_v9_4_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
mmhub_v9_4_update_medium_grain_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
|
||||
{
|
||||
int data, data1;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||
data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
|
||||
|
||||
data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
|
||||
|
||||
if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
|
||||
!(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_LS */
|
||||
if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||
}
|
||||
|
|
|
@ -29,5 +29,8 @@ void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
|
|||
void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev,
|
||||
bool value);
|
||||
void mmhub_v9_4_init(struct amdgpu_device *adev);
|
||||
int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state);
|
||||
void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -91,6 +91,26 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
|
|||
WREG32(reg, doorbell_range);
|
||||
}
|
||||
|
||||
static void nbio_v7_0_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
|
||||
int doorbell_index, int instance)
|
||||
{
|
||||
u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
|
||||
|
||||
u32 doorbell_range = RREG32(reg);
|
||||
|
||||
if (use_doorbell) {
|
||||
doorbell_range = REG_SET_FIELD(doorbell_range,
|
||||
BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
|
||||
doorbell_index);
|
||||
doorbell_range = REG_SET_FIELD(doorbell_range,
|
||||
BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
|
||||
} else
|
||||
doorbell_range = REG_SET_FIELD(doorbell_range,
|
||||
BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
|
||||
|
||||
WREG32(reg, doorbell_range);
|
||||
}
|
||||
|
||||
static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
|
@ -282,6 +302,7 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
|
|||
.hdp_flush = nbio_v7_0_hdp_flush,
|
||||
.get_memsize = nbio_v7_0_get_memsize,
|
||||
.sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
|
||||
.vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range,
|
||||
.enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
|
||||
.enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
|
||||
.ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
|
||||
|
|
|
@ -576,7 +576,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
|
|||
|
||||
static int nv_common_early_init(void *handle)
|
||||
{
|
||||
bool psp_enabled = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->smc_rreg = NULL;
|
||||
|
@ -593,10 +592,6 @@ static int nv_common_early_init(void *handle)
|
|||
|
||||
adev->asic_funcs = &nv_asic_funcs;
|
||||
|
||||
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
|
||||
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
|
||||
psp_enabled = true;
|
||||
|
||||
adev->rev_id = nv_get_rev_id(adev);
|
||||
adev->external_rev_id = 0xff;
|
||||
switch (adev->asic_type) {
|
||||
|
@ -617,7 +612,6 @@ static int nv_common_early_init(void *handle)
|
|||
AMD_CG_SUPPORT_BIF_LS;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_MMHUB |
|
||||
AMD_PG_SUPPORT_ATHUB;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
break;
|
||||
|
@ -641,7 +635,21 @@ static int nv_common_early_init(void *handle)
|
|||
adev->external_rev_id = adev->rev_id + 20;
|
||||
break;
|
||||
case CHIP_NAVI12:
|
||||
adev->cg_flags = 0;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_RLC_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_VCN_MGCG;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0xa;
|
||||
break;
|
||||
|
|
|
@ -190,7 +190,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
|
|||
}
|
||||
|
||||
static int psp_v10_0_cmd_submit(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||
int index)
|
||||
{
|
||||
|
|
|
@ -498,7 +498,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
|
|||
}
|
||||
|
||||
static int psp_v11_0_cmd_submit(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||
int index)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,565 @@
|
|||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "soc15_common.h"
|
||||
#include "psp_v12_0.h"
|
||||
|
||||
#include "mp/mp_12_0_0_offset.h"
|
||||
#include "mp/mp_12_0_0_sh_mask.h"
|
||||
#include "gc/gc_9_0_offset.h"
|
||||
#include "sdma0/sdma0_4_0_offset.h"
|
||||
#include "nbio/nbio_7_4_offset.h"
|
||||
|
||||
#include "oss/osssys_4_0_offset.h"
|
||||
#include "oss/osssys_4_0_sh_mask.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
|
||||
static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err = 0;
|
||||
const struct psp_firmware_header_v1_0 *asd_hdr;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
|
||||
err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out1;
|
||||
|
||||
err = amdgpu_ucode_validate(adev->psp.asd_fw);
|
||||
if (err)
|
||||
goto out1;
|
||||
|
||||
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
|
||||
adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
|
||||
adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
|
||||
adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
|
||||
adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
|
||||
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
return 0;
|
||||
|
||||
out1:
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
|
||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg) {
|
||||
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
|
||||
printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = 1 << 16;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
unsigned int psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
|
||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg)
|
||||
return 0;
|
||||
|
||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
|
||||
/* Copy Secure OS binary to PSP memory */
|
||||
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
|
||||
|
||||
/* Provide the PSP secure OS to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = 2 << 16;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
|
||||
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
|
||||
0, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_v12_0_reroute_ih(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t tmp;
|
||||
|
||||
/* Change IH ring for VMC */
|
||||
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
|
||||
|
||||
mdelay(20);
|
||||
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
|
||||
/* Change IH ring for UMC */
|
||||
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
|
||||
|
||||
mdelay(20);
|
||||
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
}
|
||||
|
||||
static int psp_v12_0_ring_init(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct psp_ring *ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
psp_v12_0_reroute_ih(psp);
|
||||
|
||||
ring = &psp->km_ring;
|
||||
|
||||
ring->ring_type = ring_type;
|
||||
|
||||
/* allocate 4k Page of Local Frame Buffer memory for ring */
|
||||
ring->ring_size = 0x1000;
|
||||
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->firmware.rbuf,
|
||||
&ring->ring_mem_mc_addr,
|
||||
(void **)&ring->ring_mem);
|
||||
if (ret) {
|
||||
ring->ring_size = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
|
||||
{
|
||||
if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int psp_v12_0_ring_create(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int psp_ring_reg = 0;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (psp_v12_0_support_vmr_ring(psp)) {
|
||||
/* Write low address of the ring to C2PMSG_102 */
|
||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
|
||||
/* Write high address of the ring to C2PMSG_103 */
|
||||
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
|
||||
|
||||
/* Write the ring initialization command to C2PMSG_101 */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
|
||||
GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
/* Wait for response flag (bit 31) in C2PMSG_101 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
|
||||
} else {
|
||||
/* Write low address of the ring to C2PMSG_69 */
|
||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
|
||||
/* Write high address of the ring to C2PMSG_70 */
|
||||
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
|
||||
/* Write size of ring to C2PMSG_71 */
|
||||
psp_ring_reg = ring->ring_size;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
|
||||
/* Write the ring initialization command to C2PMSG_64 */
|
||||
psp_ring_reg = ring_type;
|
||||
psp_ring_reg = psp_ring_reg << 16;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
/* Wait for response flag (bit 31) in C2PMSG_64 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v12_0_ring_stop(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
/* Write the ring destroy command*/
|
||||
if (psp_v12_0_support_vmr_ring(psp))
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
|
||||
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
|
||||
else
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
|
||||
GFX_CTRL_CMD_ID_DESTROY_RINGS);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
/* Wait for response flag (bit 31) */
|
||||
if (psp_v12_0_support_vmr_ring(psp))
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
|
||||
0x80000000, 0x80000000, false);
|
||||
else
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x80000000, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v12_0_ring_destroy(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
ret = psp_v12_0_ring_stop(psp, ring_type);
|
||||
if (ret)
|
||||
DRM_ERROR("Fail to stop psp ring\n");
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
||||
&ring->ring_mem_mc_addr,
|
||||
(void **)&ring->ring_mem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v12_0_cmd_submit(struct psp_context *psp,
|
||||
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||
int index)
|
||||
{
|
||||
unsigned int psp_write_ptr_reg = 0;
|
||||
struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
|
||||
struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
|
||||
ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t ring_size_dw = ring->ring_size / 4;
|
||||
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
||||
|
||||
/* KM (GPCOM) prepare write pointer */
|
||||
if (psp_v12_0_support_vmr_ring(psp))
|
||||
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
|
||||
else
|
||||
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
||||
|
||||
/* Update KM RB frame pointer to new frame */
|
||||
/* write_frame ptr increments by size of rb_frame in bytes */
|
||||
/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
|
||||
if ((psp_write_ptr_reg % ring_size_dw) == 0)
|
||||
write_frame = ring_buffer_start;
|
||||
else
|
||||
write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
|
||||
/* Check invalid write_frame ptr address */
|
||||
if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
|
||||
DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
|
||||
ring_buffer_start, ring_buffer_end, write_frame);
|
||||
DRM_ERROR("write_frame is pointing to address out of bounds\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Initialize KM RB frame */
|
||||
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
|
||||
|
||||
/* Update KM RB frame */
|
||||
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
|
||||
write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
|
||||
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
|
||||
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
|
||||
write_frame->fence_value = index;
|
||||
|
||||
/* Update the write Pointer in DWORDs */
|
||||
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
|
||||
if (psp_v12_0_support_vmr_ring(psp)) {
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
|
||||
} else
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
psp_v12_0_sram_map(struct amdgpu_device *adev,
|
||||
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
|
||||
unsigned int *sram_data_reg_offset,
|
||||
enum AMDGPU_UCODE_ID ucode_id)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (ucode_id) {
|
||||
/* TODO: needs to confirm */
|
||||
#if 0
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
*sram_offset = 0;
|
||||
*sram_addr_reg_offset = 0;
|
||||
*sram_data_reg_offset = 0;
|
||||
break;
|
||||
#endif
|
||||
|
||||
case AMDGPU_UCODE_ID_CP_CE:
|
||||
*sram_offset = 0x0;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_CP_PFP:
|
||||
*sram_offset = 0x0;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
*sram_offset = 0x0;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
*sram_offset = 0x10000;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||
*sram_offset = 0x10000;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
*sram_offset = 0x2000;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
*sram_offset = 0x0;
|
||||
*sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
|
||||
*sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
|
||||
break;
|
||||
|
||||
/* TODO: needs to confirm */
|
||||
#if 0
|
||||
case AMDGPU_UCODE_ID_SDMA1:
|
||||
*sram_offset = ;
|
||||
*sram_addr_reg_offset = ;
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_UVD:
|
||||
*sram_offset = ;
|
||||
*sram_addr_reg_offset = ;
|
||||
break;
|
||||
|
||||
case AMDGPU_UCODE_ID_VCE:
|
||||
*sram_offset = ;
|
||||
*sram_addr_reg_offset = ;
|
||||
break;
|
||||
#endif
|
||||
|
||||
case AMDGPU_UCODE_ID_MAXIMUM:
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
enum AMDGPU_UCODE_ID ucode_type)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned int fw_sram_reg_val = 0;
|
||||
unsigned int fw_sram_addr_reg_offset = 0;
|
||||
unsigned int fw_sram_data_reg_offset = 0;
|
||||
unsigned int ucode_size;
|
||||
uint32_t *ucode_mem = NULL;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
|
||||
&fw_sram_data_reg_offset, ucode_type);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
|
||||
|
||||
ucode_size = ucode->ucode_size;
|
||||
ucode_mem = (uint32_t *)ucode->kaddr;
|
||||
while (ucode_size) {
|
||||
fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
|
||||
|
||||
if (*ucode_mem != fw_sram_reg_val)
|
||||
return false;
|
||||
|
||||
ucode_mem++;
|
||||
/* 4 bytes */
|
||||
ucode_size -= 4;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int psp_v12_0_mode1_reset(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t offset;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
|
||||
|
||||
ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
|
||||
|
||||
if (ret) {
|
||||
DRM_INFO("psp is not working correctly before mode1 reset!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*send the mode 1 reset command*/
|
||||
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
|
||||
|
||||
msleep(500);
|
||||
|
||||
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
|
||||
|
||||
ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
|
||||
|
||||
if (ret) {
|
||||
DRM_INFO("psp mode 1 reset failed!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_INFO("psp mode1 reset succeed \n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v12_0_funcs = {
|
||||
.init_microcode = psp_v12_0_init_microcode,
|
||||
.bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv,
|
||||
.bootloader_load_sos = psp_v12_0_bootloader_load_sos,
|
||||
.ring_init = psp_v12_0_ring_init,
|
||||
.ring_create = psp_v12_0_ring_create,
|
||||
.ring_stop = psp_v12_0_ring_stop,
|
||||
.ring_destroy = psp_v12_0_ring_destroy,
|
||||
.cmd_submit = psp_v12_0_cmd_submit,
|
||||
.compare_sram_data = psp_v12_0_compare_sram_data,
|
||||
.mode1_reset = psp_v12_0_mode1_reset,
|
||||
};
|
||||
|
||||
void psp_v12_0_set_psp_funcs(struct psp_context *psp)
|
||||
{
|
||||
psp->funcs = &psp_v12_0_funcs;
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __PSP_V12_0_H__
|
||||
#define __PSP_V12_0_H__
|
||||
|
||||
#include "amdgpu_psp.h"
|
||||
|
||||
void psp_v12_0_set_psp_funcs(struct psp_context *psp);
|
||||
|
||||
#endif
|
|
@ -411,7 +411,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
|
|||
}
|
||||
|
||||
static int psp_v3_1_cmd_submit(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
|
||||
int index)
|
||||
{
|
||||
|
|
|
@ -68,6 +68,7 @@ MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
|
|||
MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
|
||||
|
||||
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
|
||||
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
|
||||
|
@ -243,6 +244,18 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
|
|||
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
||||
};
|
||||
|
||||
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
|
||||
u32 instance, u32 offset)
|
||||
{
|
||||
|
@ -367,6 +380,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
golden_settings_sdma_rv1,
|
||||
ARRAY_SIZE(golden_settings_sdma_rv1));
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_4_3,
|
||||
ARRAY_SIZE(golden_settings_sdma_4_3));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -452,6 +470,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_ARCTURUS:
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -1640,7 +1661,7 @@ static int sdma_v4_0_early_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN)
|
||||
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR)
|
||||
adev->sdma.num_instances = 1;
|
||||
else if (adev->asic_type == CHIP_ARCTURUS)
|
||||
adev->sdma.num_instances = 8;
|
||||
|
@ -2086,61 +2107,35 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
|
|||
bool enable)
|
||||
{
|
||||
uint32_t data, def;
|
||||
int i;
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
|
||||
/* enable sdma0 clock gating */
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
|
||||
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
|
||||
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
|
||||
data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
|
||||
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
|
||||
WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
|
||||
}
|
||||
} else {
|
||||
/* disable sdma0 clock gating */
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
|
||||
data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
|
||||
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
|
||||
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
|
||||
data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
|
||||
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
|
||||
data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
|
||||
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
|
||||
WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2151,34 +2146,23 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
|
|||
bool enable)
|
||||
{
|
||||
uint32_t data, def;
|
||||
int i;
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
|
||||
/* 1-not override: enable sdma0 mem light sleep */
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
|
||||
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
|
||||
|
||||
/* 1-not override: enable sdma1 mem light sleep */
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
|
||||
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
/* 1-not override: enable sdma mem light sleep */
|
||||
def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
|
||||
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
|
||||
WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
|
||||
}
|
||||
} else {
|
||||
/* 0-override:disable sdma0 mem light sleep */
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
|
||||
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
|
||||
|
||||
/* 0-override:disable sdma1 mem light sleep */
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
|
||||
data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
/* 0-override:disable sdma mem light sleep */
|
||||
def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
|
||||
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
|
||||
if (def != data)
|
||||
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
|
||||
WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2196,6 +2180,8 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
|
|||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_RENOIR:
|
||||
sdma_v4_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
sdma_v4_0_update_medium_grain_light_sleep(adev,
|
||||
|
|
|
@ -1516,6 +1516,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
|
|||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
sdma_v5_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
sdma_v5_0_update_medium_grain_light_sleep(adev,
|
||||
|
@ -1627,7 +1628,8 @@ static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
|
|||
|
||||
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
|
||||
adev->sdma.num_instances;
|
||||
adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
|
||||
adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#include "uvd_v7_0.h"
|
||||
#include "vce_v4_0.h"
|
||||
#include "vcn_v1_0.h"
|
||||
#include "vcn_v2_0.h"
|
||||
#include "vcn_v2_5.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "mxgpu_ai.h"
|
||||
|
@ -508,6 +509,15 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int soc15_mode2_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!adev->powerplay.pp_funcs ||
|
||||
!adev->powerplay.pp_funcs->asic_reset_mode_2)
|
||||
return -ENOENT;
|
||||
|
||||
return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
|
||||
}
|
||||
|
||||
static enum amd_reset_method
|
||||
soc15_asic_reset_method(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -546,14 +556,14 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
|
|||
|
||||
static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (soc15_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
||||
ret = soc15_asic_baco_reset(adev);
|
||||
else
|
||||
ret = soc15_asic_mode1_reset(adev);
|
||||
|
||||
return ret;
|
||||
switch (soc15_asic_reset_method(adev)) {
|
||||
case AMD_RESET_METHOD_BACO:
|
||||
return soc15_asic_baco_reset(adev);
|
||||
case AMD_RESET_METHOD_MODE2:
|
||||
return soc15_mode2_reset(adev);
|
||||
default:
|
||||
return soc15_asic_mode1_reset(adev);
|
||||
}
|
||||
}
|
||||
|
||||
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
|
||||
|
@ -637,6 +647,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
vega10_reg_base_init(adev);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
|
@ -743,6 +754,20 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
|
||||
if (is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1111,18 +1136,54 @@ static int soc15_common_early_init(void *handle)
|
|||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
|
||||
}
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
adev->asic_funcs = &vega20_asic_funcs;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x32;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ROM_MGCG |
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_DF_MGCG;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
adev->asic_funcs = &vega20_asic_funcs;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x32;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
@ -1256,7 +1317,8 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
|
|||
{
|
||||
uint32_t def, data;
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA20) {
|
||||
if (adev->asic_type == CHIP_VEGA20 ||
|
||||
adev->asic_type == CHIP_ARCTURUS) {
|
||||
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
|
||||
|
@ -1375,6 +1437,7 @@ static int soc15_common_set_clockgating_state(void *handle,
|
|||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
adev->nbio_funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
adev->nbio_funcs->update_medium_grain_light_sleep(adev,
|
||||
|
@ -1388,6 +1451,10 @@ static int soc15_common_set_clockgating_state(void *handle,
|
|||
soc15_update_rom_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
soc15_update_hdp_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -247,7 +247,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_ARCTURUS &&
|
||||
if ((adev->asic_type == CHIP_ARCTURUS || adev->asic_type == CHIP_RENOIR) &&
|
||||
adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
if (adev->irq.ih.use_bus_addr) {
|
||||
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
|
||||
|
|
|
@ -81,6 +81,10 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
|
|||
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
|
||||
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
|
||||
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
|
||||
adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL64_VCN0_1;
|
||||
adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_DOORBELL64_VCN2_3;
|
||||
adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_DOORBELL64_VCN4_5;
|
||||
adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_DOORBELL64_VCN6_7;
|
||||
|
||||
adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP;
|
||||
adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP;
|
||||
|
|
|
@ -42,6 +42,7 @@ static atomic_t kfd_locked = ATOMIC_INIT(0);
|
|||
#ifdef KFD_SUPPORT_IOMMU_V2
|
||||
static const struct kfd_device_info kaveri_device_info = {
|
||||
.asic_family = CHIP_KAVERI,
|
||||
.asic_name = "kaveri",
|
||||
.max_pasid_bits = 16,
|
||||
/* max num of queues for KV.TODO should be a dynamic value */
|
||||
.max_no_of_hqd = 24,
|
||||
|
@ -60,6 +61,7 @@ static const struct kfd_device_info kaveri_device_info = {
|
|||
|
||||
static const struct kfd_device_info carrizo_device_info = {
|
||||
.asic_family = CHIP_CARRIZO,
|
||||
.asic_name = "carrizo",
|
||||
.max_pasid_bits = 16,
|
||||
/* max num of queues for CZ.TODO should be a dynamic value */
|
||||
.max_no_of_hqd = 24,
|
||||
|
@ -78,6 +80,7 @@ static const struct kfd_device_info carrizo_device_info = {
|
|||
|
||||
static const struct kfd_device_info raven_device_info = {
|
||||
.asic_family = CHIP_RAVEN,
|
||||
.asic_name = "raven",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -96,6 +99,7 @@ static const struct kfd_device_info raven_device_info = {
|
|||
|
||||
static const struct kfd_device_info hawaii_device_info = {
|
||||
.asic_family = CHIP_HAWAII,
|
||||
.asic_name = "hawaii",
|
||||
.max_pasid_bits = 16,
|
||||
/* max num of queues for KV.TODO should be a dynamic value */
|
||||
.max_no_of_hqd = 24,
|
||||
|
@ -114,6 +118,7 @@ static const struct kfd_device_info hawaii_device_info = {
|
|||
|
||||
static const struct kfd_device_info tonga_device_info = {
|
||||
.asic_family = CHIP_TONGA,
|
||||
.asic_name = "tonga",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -131,6 +136,7 @@ static const struct kfd_device_info tonga_device_info = {
|
|||
|
||||
static const struct kfd_device_info fiji_device_info = {
|
||||
.asic_family = CHIP_FIJI,
|
||||
.asic_name = "fiji",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -148,6 +154,7 @@ static const struct kfd_device_info fiji_device_info = {
|
|||
|
||||
static const struct kfd_device_info fiji_vf_device_info = {
|
||||
.asic_family = CHIP_FIJI,
|
||||
.asic_name = "fiji",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -166,6 +173,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
|
|||
|
||||
static const struct kfd_device_info polaris10_device_info = {
|
||||
.asic_family = CHIP_POLARIS10,
|
||||
.asic_name = "polaris10",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -183,6 +191,7 @@ static const struct kfd_device_info polaris10_device_info = {
|
|||
|
||||
static const struct kfd_device_info polaris10_vf_device_info = {
|
||||
.asic_family = CHIP_POLARIS10,
|
||||
.asic_name = "polaris10",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -200,6 +209,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
|
|||
|
||||
static const struct kfd_device_info polaris11_device_info = {
|
||||
.asic_family = CHIP_POLARIS11,
|
||||
.asic_name = "polaris11",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -217,6 +227,7 @@ static const struct kfd_device_info polaris11_device_info = {
|
|||
|
||||
static const struct kfd_device_info polaris12_device_info = {
|
||||
.asic_family = CHIP_POLARIS12,
|
||||
.asic_name = "polaris12",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -234,6 +245,7 @@ static const struct kfd_device_info polaris12_device_info = {
|
|||
|
||||
static const struct kfd_device_info vegam_device_info = {
|
||||
.asic_family = CHIP_VEGAM,
|
||||
.asic_name = "vegam",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 4,
|
||||
|
@ -251,6 +263,7 @@ static const struct kfd_device_info vegam_device_info = {
|
|||
|
||||
static const struct kfd_device_info vega10_device_info = {
|
||||
.asic_family = CHIP_VEGA10,
|
||||
.asic_name = "vega10",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -268,6 +281,7 @@ static const struct kfd_device_info vega10_device_info = {
|
|||
|
||||
static const struct kfd_device_info vega10_vf_device_info = {
|
||||
.asic_family = CHIP_VEGA10,
|
||||
.asic_name = "vega10",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -285,6 +299,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
|
|||
|
||||
static const struct kfd_device_info vega12_device_info = {
|
||||
.asic_family = CHIP_VEGA12,
|
||||
.asic_name = "vega12",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -302,6 +317,7 @@ static const struct kfd_device_info vega12_device_info = {
|
|||
|
||||
static const struct kfd_device_info vega20_device_info = {
|
||||
.asic_family = CHIP_VEGA20,
|
||||
.asic_name = "vega20",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -319,6 +335,7 @@ static const struct kfd_device_info vega20_device_info = {
|
|||
|
||||
static const struct kfd_device_info arcturus_device_info = {
|
||||
.asic_family = CHIP_ARCTURUS,
|
||||
.asic_name = "arcturus",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -336,6 +353,7 @@ static const struct kfd_device_info arcturus_device_info = {
|
|||
|
||||
static const struct kfd_device_info navi10_device_info = {
|
||||
.asic_family = CHIP_NAVI10,
|
||||
.asic_name = "navi10",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
|
@ -472,6 +490,7 @@ static const struct kfd_deviceid supported_devices[] = {
|
|||
{ 0x738C, &arcturus_device_info }, /* Arcturus */
|
||||
{ 0x7388, &arcturus_device_info }, /* Arcturus */
|
||||
{ 0x738E, &arcturus_device_info }, /* Arcturus */
|
||||
{ 0x7390, &arcturus_device_info }, /* Arcturus vf */
|
||||
{ 0x7310, &navi10_device_info }, /* Navi10 */
|
||||
{ 0x7312, &navi10_device_info }, /* Navi10 */
|
||||
{ 0x7318, &navi10_device_info }, /* Navi10 */
|
||||
|
|
|
@ -1814,7 +1814,8 @@ out_free:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, struct kfd_mem_obj *mqd)
|
||||
static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
|
||||
struct kfd_mem_obj *mqd)
|
||||
{
|
||||
WARN(!mqd, "No hiq sdma mqd trunk to free");
|
||||
|
||||
|
|
|
@ -195,6 +195,7 @@ struct kfd_event_interrupt_class {
|
|||
|
||||
struct kfd_device_info {
|
||||
enum amd_asic_type asic_family;
|
||||
const char *asic_name;
|
||||
const struct kfd_event_interrupt_class *event_interrupt_class;
|
||||
unsigned int max_pasid_bits;
|
||||
unsigned int max_no_of_hqd;
|
||||
|
|
|
@ -801,6 +801,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
amdgpu_vm_set_task_info(pdd->vm);
|
||||
|
||||
ret = kfd_process_device_reserve_ib_mem(pdd);
|
||||
if (ret)
|
||||
goto err_reserve_ib_mem;
|
||||
|
@ -1042,7 +1044,6 @@ static void restore_process_worker(struct work_struct *work)
|
|||
{
|
||||
struct delayed_work *dwork;
|
||||
struct kfd_process *p;
|
||||
struct kfd_process_device *pdd;
|
||||
int ret = 0;
|
||||
|
||||
dwork = to_delayed_work(work);
|
||||
|
@ -1051,16 +1052,6 @@ static void restore_process_worker(struct work_struct *work)
|
|||
* lifetime of this thread, kfd_process p will be valid
|
||||
*/
|
||||
p = container_of(dwork, struct kfd_process, restore_work);
|
||||
|
||||
/* Call restore_process_bos on the first KGD device. This function
|
||||
* takes care of restoring the whole process including other devices.
|
||||
* Restore can fail if enough memory is not available. If so,
|
||||
* reschedule again.
|
||||
*/
|
||||
pdd = list_first_entry(&p->per_device_data,
|
||||
struct kfd_process_device,
|
||||
per_device_list);
|
||||
|
||||
pr_debug("Started restoring pasid %d\n", p->pasid);
|
||||
|
||||
/* Setting last_restore_timestamp before successful restoration.
|
||||
|
|
|
@ -406,8 +406,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
|||
char *buffer)
|
||||
{
|
||||
struct kfd_topology_device *dev;
|
||||
char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
|
||||
uint32_t i;
|
||||
uint32_t log_max_watch_addr;
|
||||
|
||||
/* Making sure that the buffer is an empty string */
|
||||
|
@ -422,14 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
|||
if (strcmp(attr->name, "name") == 0) {
|
||||
dev = container_of(attr, struct kfd_topology_device,
|
||||
attr_name);
|
||||
for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
|
||||
public_name[i] =
|
||||
(char)dev->node_props.marketing_name[i];
|
||||
if (dev->node_props.marketing_name[i] == 0)
|
||||
break;
|
||||
}
|
||||
public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
|
||||
return sysfs_show_str_val(buffer, public_name);
|
||||
|
||||
return sysfs_show_str_val(buffer, dev->node_props.name);
|
||||
}
|
||||
|
||||
dev = container_of(attr, struct kfd_topology_device,
|
||||
|
@ -1274,6 +1266,10 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
|||
*/
|
||||
|
||||
amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
|
||||
|
||||
strncpy(dev->node_props.name, gpu->device_info->asic_name,
|
||||
KFD_TOPOLOGY_PUBLIC_NAME_SIZE);
|
||||
|
||||
dev->node_props.simd_arrays_per_engine =
|
||||
cu_info.num_shader_arrays_per_engine;
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <linux/list.h>
|
||||
#include "kfd_crat.h"
|
||||
|
||||
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
|
||||
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
|
||||
|
||||
#define HSA_CAP_HOT_PLUGGABLE 0x00000001
|
||||
#define HSA_CAP_ATS_PRESENT 0x00000002
|
||||
|
@ -81,7 +81,7 @@ struct kfd_node_properties {
|
|||
int32_t drm_render_minor;
|
||||
uint32_t num_sdma_engines;
|
||||
uint32_t num_sdma_xgmi_engines;
|
||||
uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
|
||||
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
|
||||
};
|
||||
|
||||
#define HSA_MEM_HEAP_TYPE_SYSTEM 0
|
||||
|
|
|
@ -694,6 +694,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
|
||||
init_data.flags.fbc_support = true;
|
||||
|
||||
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
|
||||
init_data.flags.multi_mon_pp_mclk_switch = true;
|
||||
|
||||
init_data.flags.power_down_display_on_boot = true;
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||
|
@ -3006,6 +3009,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
|||
plane_info->visible = true;
|
||||
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
|
||||
|
||||
plane_info->layer_index = 0;
|
||||
|
||||
ret = fill_plane_color_attributes(plane_state, plane_info->format,
|
||||
&plane_info->color_space);
|
||||
if (ret)
|
||||
|
@ -3071,6 +3076,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
|||
dc_plane_state->global_alpha = plane_info.global_alpha;
|
||||
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
|
||||
dc_plane_state->dcc = plane_info.dcc;
|
||||
dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
|
||||
|
||||
/*
|
||||
* Always set input transfer function, since plane state is refreshed
|
||||
|
@ -3142,13 +3148,25 @@ static enum dc_color_depth
|
|||
convert_color_depth_from_display_info(const struct drm_connector *connector,
|
||||
const struct drm_connector_state *state)
|
||||
{
|
||||
uint32_t bpc = connector->display_info.bpc;
|
||||
uint8_t bpc = (uint8_t)connector->display_info.bpc;
|
||||
|
||||
/* Assume 8 bpc by default if no bpc is specified. */
|
||||
bpc = bpc ? bpc : 8;
|
||||
|
||||
if (!state)
|
||||
state = connector->state;
|
||||
|
||||
if (state) {
|
||||
bpc = state->max_bpc;
|
||||
/*
|
||||
* Cap display bpc based on the user requested value.
|
||||
*
|
||||
* The value for state->max_bpc may not correctly updated
|
||||
* depending on when the connector gets added to the state
|
||||
* or if this was called outside of atomic check, so it
|
||||
* can't be used directly.
|
||||
*/
|
||||
bpc = min(bpc, state->max_requested_bpc);
|
||||
|
||||
/* Round down to the nearest even number. */
|
||||
bpc = bpc - (bpc & 1);
|
||||
}
|
||||
|
@ -3502,6 +3520,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
|
||||
int mode_refresh;
|
||||
int preferred_refresh = 0;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
struct dsc_dec_dpcd_caps dsc_caps;
|
||||
uint32_t link_bandwidth_kbps;
|
||||
#endif
|
||||
|
||||
struct dc_sink *sink = NULL;
|
||||
if (aconnector == NULL) {
|
||||
|
@ -3574,17 +3596,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
&mode, &aconnector->base, con_state, old_stream);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
/* stream->timing.flags.DSC = 0; */
|
||||
/* */
|
||||
/* if (aconnector->dc_link && */
|
||||
/* aconnector->dc_link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT #<{(|&& */
|
||||
/* aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.is_dsc_supported|)}>#) */
|
||||
/* if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, */
|
||||
/* &aconnector->dc_link->dpcd_caps.dsc_caps, */
|
||||
/* dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)), */
|
||||
/* &stream->timing, */
|
||||
/* &stream->timing.dsc_cfg)) */
|
||||
/* stream->timing.flags.DSC = 1; */
|
||||
stream->timing.flags.DSC = 0;
|
||||
|
||||
if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
|
||||
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
|
||||
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
|
||||
&dsc_caps);
|
||||
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
|
||||
dc_link_get_link_cap(aconnector->dc_link));
|
||||
|
||||
if (dsc_caps.is_dsc_supported)
|
||||
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
|
||||
&dsc_caps,
|
||||
link_bandwidth_kbps,
|
||||
&stream->timing,
|
||||
&stream->timing.dsc_cfg))
|
||||
stream->timing.flags.DSC = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
update_stream_scaling_settings(&mode, dm_state, stream);
|
||||
|
@ -6003,11 +6031,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
|
|||
/* The stream has changed so CRC capture needs to re-enabled. */
|
||||
source = dm_new_crtc_state->crc_src;
|
||||
if (amdgpu_dm_is_valid_crc_source(source)) {
|
||||
dm_new_crtc_state->crc_src = AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
|
||||
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC)
|
||||
amdgpu_dm_crtc_set_crc_source(crtc, "crtc");
|
||||
else if (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX)
|
||||
amdgpu_dm_crtc_set_crc_source(crtc, "dprx");
|
||||
amdgpu_dm_crtc_configure_crc_source(
|
||||
crtc, dm_new_crtc_state,
|
||||
dm_new_crtc_state->crc_src);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -6058,23 +6084,8 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
|||
|
||||
if (dm_old_crtc_state->interrupts_enabled &&
|
||||
(!dm_new_crtc_state->interrupts_enabled ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
|
||||
/*
|
||||
* Drop the extra vblank reference added by CRC
|
||||
* capture if applicable.
|
||||
*/
|
||||
if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src))
|
||||
drm_crtc_vblank_put(crtc);
|
||||
|
||||
/*
|
||||
* Only keep CRC capture enabled if there's
|
||||
* still a stream for the CRTC.
|
||||
*/
|
||||
if (!dm_new_crtc_state->stream)
|
||||
dm_new_crtc_state->crc_src = AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
|
||||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state)))
|
||||
manage_dm_interrupts(adev, acrtc, false);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Add check here for SoC's that support hardware cursor plane, to
|
||||
|
|
|
@ -97,17 +97,52 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
||||
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
struct dm_crtc_state *dm_crtc_state,
|
||||
enum amdgpu_dm_pipe_crc_source source)
|
||||
{
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
|
||||
struct dc_stream_state *stream_state = crtc_state->stream;
|
||||
struct amdgpu_dm_connector *aconn;
|
||||
struct dc_stream_state *stream_state = dm_crtc_state->stream;
|
||||
bool enable = amdgpu_dm_is_valid_crc_source(source);
|
||||
int ret = 0;
|
||||
|
||||
/* Configuration will be deferred to stream enable. */
|
||||
if (!stream_state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
|
||||
/* Enable CRTC CRC generation if necessary. */
|
||||
if (dm_is_crc_source_crtc(source)) {
|
||||
if (!dc_stream_configure_crc(stream_state->ctx->dc,
|
||||
stream_state, enable, enable)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Configure dithering */
|
||||
if (!dm_need_crc_dither(source))
|
||||
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
|
||||
else
|
||||
dc_stream_set_dither_option(stream_state,
|
||||
DITHER_OPTION_DEFAULT);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
||||
{
|
||||
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
|
||||
struct drm_crtc_commit *commit;
|
||||
struct dm_crtc_state *crtc_state;
|
||||
struct drm_dp_aux *aux = NULL;
|
||||
bool enable = false;
|
||||
bool enabled = false;
|
||||
|
||||
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
|
||||
int ret = 0;
|
||||
|
||||
if (source < 0) {
|
||||
DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
|
||||
|
@ -115,14 +150,34 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!stream_state) {
|
||||
DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
|
||||
return -EINVAL;
|
||||
ret = drm_modeset_lock(&crtc->mutex, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock(&crtc->commit_lock);
|
||||
commit = list_first_entry_or_null(&crtc->commit_list,
|
||||
struct drm_crtc_commit, commit_entry);
|
||||
if (commit)
|
||||
drm_crtc_commit_get(commit);
|
||||
spin_unlock(&crtc->commit_lock);
|
||||
|
||||
if (commit) {
|
||||
/*
|
||||
* Need to wait for all outstanding programming to complete
|
||||
* in commit tail since it can modify CRC related fields and
|
||||
* hardware state. Since we're holding the CRTC lock we're
|
||||
* guaranteed that no other commit work can be queued off
|
||||
* before we modify the state below.
|
||||
*/
|
||||
ret = wait_for_completion_interruptible_timeout(
|
||||
&commit->hw_done, 10 * HZ);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
enable = amdgpu_dm_is_valid_crc_source(source);
|
||||
crtc_state = to_dm_crtc_state(crtc->state);
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
/*
|
||||
* USER REQ SRC | CURRENT SRC | BEHAVIOR
|
||||
* -----------------------------
|
||||
|
@ -137,38 +192,41 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||
* DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither
|
||||
*/
|
||||
if (dm_is_crc_source_dprx(source) ||
|
||||
(source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
|
||||
dm_is_crc_source_dprx(crtc_state->crc_src))) {
|
||||
aconn = stream_state->link->priv;
|
||||
(source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE &&
|
||||
dm_is_crc_source_dprx(crtc_state->crc_src))) {
|
||||
struct amdgpu_dm_connector *aconn = NULL;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
|
||||
drm_connector_list_iter_begin(crtc->dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
if (!connector->state || connector->state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
aconn = to_amdgpu_dm_connector(connector);
|
||||
break;
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
if (!aconn) {
|
||||
DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
aux = &aconn->dm_dp_aux.aux;
|
||||
|
||||
if (!aux) {
|
||||
DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (dm_is_crc_source_crtc(source)) {
|
||||
if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
|
||||
enable, enable)) {
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/* configure dithering */
|
||||
if (!dm_need_crc_dither(source))
|
||||
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
|
||||
else if (!dm_need_crc_dither(crtc_state->crc_src))
|
||||
dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT);
|
||||
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reading the CRC requires the vblank interrupt handler to be
|
||||
|
@ -176,11 +234,15 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||
*/
|
||||
enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src);
|
||||
if (!enabled && enable) {
|
||||
drm_crtc_vblank_get(crtc);
|
||||
ret = drm_crtc_vblank_get(crtc);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
if (dm_is_crc_source_dprx(source)) {
|
||||
if (drm_dp_start_crc(aux, crtc)) {
|
||||
DRM_DEBUG_DRIVER("dp start crc failed\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
} else if (enabled && !enable) {
|
||||
|
@ -188,7 +250,8 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||
if (dm_is_crc_source_dprx(source)) {
|
||||
if (drm_dp_stop_crc(aux)) {
|
||||
DRM_DEBUG_DRIVER("dp stop crc failed\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +260,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
|||
|
||||
/* Reset crc_skipped on dm state */
|
||||
crtc_state->crc_skip_count = 0;
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
if (commit)
|
||||
drm_crtc_commit_put(commit);
|
||||
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
#ifndef AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_
|
||||
#define AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_
|
||||
|
||||
struct drm_crtc;
|
||||
struct dm_crtc_state;
|
||||
|
||||
enum amdgpu_dm_pipe_crc_source {
|
||||
AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
|
||||
AMDGPU_DM_PIPE_CRC_SOURCE_CRTC,
|
||||
|
@ -44,6 +47,9 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
|
|||
|
||||
/* amdgpu_dm_crc.c */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
struct dm_crtc_state *dm_crtc_state,
|
||||
enum amdgpu_dm_pipe_crc_source source);
|
||||
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
|
||||
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
|
||||
const char *src_name,
|
||||
|
|
|
@ -1053,9 +1053,33 @@ static int target_backlight_read(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mst_topo(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
|
||||
drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
|
||||
{"amdgpu_current_backlight_pwm", ¤t_backlight_read},
|
||||
{"amdgpu_target_backlight_pwm", &target_backlight_read},
|
||||
{"amdgpu_mst_topology", &mst_topo},
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -548,7 +548,9 @@ bool dm_helpers_dp_write_dsc_enable(
|
|||
bool enable
|
||||
)
|
||||
{
|
||||
return false;
|
||||
uint8_t enable_dsc = enable ? 1 : 0;
|
||||
|
||||
return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -2796,8 +2796,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
|||
|
||||
.get_device_tag = bios_parser_get_device_tag,
|
||||
|
||||
.get_firmware_info = bios_parser_get_firmware_info,
|
||||
|
||||
.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
|
||||
|
||||
.get_ss_entry_number = bios_parser_get_ss_entry_number,
|
||||
|
@ -2922,6 +2920,7 @@ static bool bios_parser_construct(
|
|||
dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
|
||||
|
||||
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
|
||||
bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1881,8 +1881,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
|||
|
||||
.get_device_tag = bios_parser_get_device_tag,
|
||||
|
||||
.get_firmware_info = bios_parser_get_firmware_info,
|
||||
|
||||
.get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
|
||||
|
||||
.get_ss_entry_number = bios_parser_get_ss_entry_number,
|
||||
|
@ -1998,6 +1996,7 @@ static bool bios_parser_construct(
|
|||
dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
|
||||
|
||||
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
|
||||
bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -153,38 +153,10 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
|
|||
|
||||
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
|
||||
{
|
||||
uint8_t atom_dig_encoder_sel = 0;
|
||||
|
||||
switch (id) {
|
||||
case ENGINE_ID_DIGA:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGB:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGC:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGD:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGE:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGF:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGG:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
|
||||
break;
|
||||
case ENGINE_ID_UNKNOWN:
|
||||
/* No DIG_FRONT is associated to DIG_BACKEND */
|
||||
atom_dig_encoder_sel = 0;
|
||||
break;
|
||||
default:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
|
||||
break;
|
||||
}
|
||||
/* On any ASIC after DCE80, we manually program the DIG_FE
|
||||
* selection (see connect_dig_be_to_fe function of the link
|
||||
* encoder), so translation should always return 0 (no FE).
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -150,38 +150,10 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
|
|||
|
||||
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
|
||||
{
|
||||
uint8_t atom_dig_encoder_sel = 0;
|
||||
|
||||
switch (id) {
|
||||
case ENGINE_ID_DIGA:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGB:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGC:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGD:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGE:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGF:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGG:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
|
||||
break;
|
||||
case ENGINE_ID_UNKNOWN:
|
||||
/* No DIG_FRONT is associated to DIG_BACKEND */
|
||||
atom_dig_encoder_sel = 0;
|
||||
break;
|
||||
default:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
|
||||
break;
|
||||
}
|
||||
/* On any ASIC after DCE80, we manually program the DIG_FE
|
||||
* selection (see connect_dig_be_to_fe function of the link
|
||||
* encoder), so translation should always return 0 (no FE).
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -150,38 +150,10 @@ static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
|
|||
|
||||
static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
|
||||
{
|
||||
uint8_t atom_dig_encoder_sel = 0;
|
||||
|
||||
switch (id) {
|
||||
case ENGINE_ID_DIGA:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGB:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGC:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGD:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGE:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGF:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
|
||||
break;
|
||||
case ENGINE_ID_DIGG:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
|
||||
break;
|
||||
case ENGINE_ID_UNKNOWN:
|
||||
/* No DIG_FRONT is associated to DIG_BACKEND */
|
||||
atom_dig_encoder_sel = 0;
|
||||
break;
|
||||
default:
|
||||
atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
|
||||
break;
|
||||
}
|
||||
/* On any ASIC after DCE80, we manually program the DIG_FE
|
||||
* selection (see connect_dig_be_to_fe function of the link
|
||||
* encoder), so translation should always return 0 (no FE).
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "resource.h"
|
||||
#include "dm_services.h"
|
||||
#include "dce_calcs.h"
|
||||
#include "dc.h"
|
||||
|
@ -2977,6 +2978,32 @@ static void populate_initial_data(
|
|||
data->number_of_displays = num_displays;
|
||||
}
|
||||
|
||||
static bool all_displays_in_sync(const struct pipe_ctx pipe[],
|
||||
int pipe_count)
|
||||
{
|
||||
const struct pipe_ctx *active_pipes[MAX_PIPES];
|
||||
int i, num_active_pipes = 0;
|
||||
|
||||
for (i = 0; i < pipe_count; i++) {
|
||||
if (!pipe[i].stream || pipe[i].top_pipe)
|
||||
continue;
|
||||
|
||||
active_pipes[num_active_pipes++] = &pipe[i];
|
||||
}
|
||||
|
||||
if (!num_active_pipes)
|
||||
return false;
|
||||
|
||||
for (i = 1; i < num_active_pipes; ++i) {
|
||||
if (!resource_are_streams_timing_synchronizable(
|
||||
active_pipes[0]->stream, active_pipes[i]->stream)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return:
|
||||
* true - Display(s) configuration supported.
|
||||
|
@ -2998,8 +3025,10 @@ bool bw_calcs(struct dc_context *ctx,
|
|||
|
||||
populate_initial_data(pipe, pipe_count, data);
|
||||
|
||||
/*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
|
||||
calcs_output->all_displays_in_sync = false;
|
||||
if (ctx->dc->config.multi_mon_pp_mclk_switch)
|
||||
calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, pipe_count);
|
||||
else
|
||||
calcs_output->all_displays_in_sync = false;
|
||||
|
||||
if (data->number_of_displays != 0) {
|
||||
uint8_t yclk_lvl, sclk_lvl;
|
||||
|
|
|
@ -705,6 +705,13 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
|
|||
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
|
||||
}
|
||||
|
||||
|
||||
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
|
||||
{
|
||||
/* we are ok with all levels */
|
||||
return 4;
|
||||
}
|
||||
|
||||
bool dcn_validate_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
|
@ -732,6 +739,7 @@ bool dcn_validate_bandwidth(
|
|||
|
||||
memset(v, 0, sizeof(*v));
|
||||
kernel_fpu_begin();
|
||||
|
||||
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
|
||||
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
|
||||
v->urgent_latency = dc->dcn_soc->urgent_latency;
|
||||
|
@ -1268,7 +1276,7 @@ bool dcn_validate_bandwidth(
|
|||
PERFORMANCE_TRACE_END();
|
||||
BW_VAL_TRACE_FINISH();
|
||||
|
||||
if (bw_limit_pass && v->voltage_level != 5)
|
||||
if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
|
|
@ -273,18 +273,12 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
|
|||
{
|
||||
struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
|
||||
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
|
||||
struct integrated_info info = { { { 0 } } };
|
||||
struct dc_firmware_info fw_info = { { 0 } };
|
||||
int i;
|
||||
|
||||
if (bp->integrated_info)
|
||||
info = *bp->integrated_info;
|
||||
|
||||
clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
|
||||
clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
|
||||
if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
|
||||
bp->funcs->get_firmware_info(bp, &fw_info);
|
||||
clk_mgr_dce->dentist_vco_freq_khz =
|
||||
fw_info.smu_gpu_pll_output_freq;
|
||||
clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
|
||||
if (clk_mgr_dce->dentist_vco_freq_khz == 0)
|
||||
clk_mgr_dce->dentist_vco_freq_khz = 3600000;
|
||||
}
|
||||
|
@ -317,9 +311,10 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
|
|||
|
||||
/*Do not allow bad VBIOS/SBIOS to override with invalid values,
|
||||
* check for > 100MHz*/
|
||||
if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
|
||||
clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
|
||||
info.disp_clk_voltage[i].max_supported_clk;
|
||||
if (bp->integrated_info)
|
||||
if (bp->integrated_info->disp_clk_voltage[i].max_supported_clk >= 100000)
|
||||
clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
|
||||
bp->integrated_info->disp_clk_voltage[i].max_supported_clk;
|
||||
}
|
||||
|
||||
if (!debug->disable_dfs_bypass && bp->integrated_info)
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
#include "rv1_clk_mgr_vbios_smu.h"
|
||||
#include "rv1_clk_mgr_clk.h"
|
||||
|
||||
void rv1_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
}
|
||||
|
||||
static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
|
||||
{
|
||||
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
|
||||
|
@ -232,6 +237,7 @@ static void rv1_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
|||
}
|
||||
|
||||
static struct clk_mgr_funcs rv1_clk_funcs = {
|
||||
.init_clocks = rv1_init_clocks,
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.update_clocks = rv1_update_clocks,
|
||||
.enable_pme_wa = rv1_enable_pme_wa,
|
||||
|
@ -246,7 +252,6 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
|
|||
{
|
||||
struct dc_debug_options *debug = &ctx->dc->debug;
|
||||
struct dc_bios *bp = ctx->dc_bios;
|
||||
struct dc_firmware_info fw_info = { { 0 } };
|
||||
|
||||
clk_mgr->base.ctx = ctx;
|
||||
clk_mgr->pp_smu = pp_smu;
|
||||
|
@ -262,9 +267,8 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
|
|||
|
||||
if (bp->integrated_info)
|
||||
clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
|
||||
if (clk_mgr->dentist_vco_freq_khz == 0) {
|
||||
bp->funcs->get_firmware_info(bp, &fw_info);
|
||||
clk_mgr->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
|
||||
if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) {
|
||||
clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
|
||||
if (clk_mgr->dentist_vco_freq_khz == 0)
|
||||
clk_mgr->dentist_vco_freq_khz = 3600000;
|
||||
}
|
||||
|
|
|
@ -104,7 +104,6 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
|||
{
|
||||
int i;
|
||||
|
||||
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
int dpp_inst, dppclk_khz;
|
||||
|
||||
|
@ -114,28 +113,75 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
|||
dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
|
||||
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||
clk_mgr->dccg->funcs->update_dpp_dto(
|
||||
clk_mgr->dccg, dpp_inst, dppclk_khz);
|
||||
clk_mgr->dccg, dpp_inst, dppclk_khz, false);
|
||||
}
|
||||
}
|
||||
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
|
||||
static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
|
||||
{
|
||||
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
|
||||
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
|
||||
* clk_mgr->dentist_vco_freq_khz / khz;
|
||||
|
||||
uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
|
||||
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
|
||||
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
|
||||
// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
|
||||
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
|
||||
}
|
||||
|
||||
static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
|
||||
{
|
||||
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->dentist_vco_freq_khz / khz;
|
||||
|
||||
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
|
||||
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
|
||||
}
|
||||
|
||||
static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
struct pp_smu_funcs_nv *pp_smu = NULL;
|
||||
bool going_up = clk_mgr->base.clks.dispclk_khz < khz;
|
||||
|
||||
if (dc->res_pool->pp_smu)
|
||||
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
|
||||
|
||||
clk_mgr->base.clks.dispclk_khz = khz;
|
||||
|
||||
if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
|
||||
|
||||
update_display_clk(clk_mgr, khz);
|
||||
|
||||
if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
|
||||
}
|
||||
|
||||
static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
struct pp_smu_funcs_nv *pp_smu = NULL;
|
||||
bool going_up = clk_mgr->base.clks.dppclk_khz < khz;
|
||||
|
||||
if (dc->res_pool->pp_smu)
|
||||
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
|
||||
|
||||
clk_mgr->base.clks.dppclk_khz = khz;
|
||||
clk_mgr->dccg->ref_dppclk = khz;
|
||||
|
||||
if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
|
||||
|
||||
update_global_dpp_clk(clk_mgr, khz);
|
||||
|
||||
if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
|
||||
}
|
||||
|
||||
void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
|
@ -146,12 +192,14 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
struct pp_smu_funcs_nv *pp_smu = NULL;
|
||||
int display_count;
|
||||
bool update_dppclk = false;
|
||||
bool update_dispclk = false;
|
||||
bool enter_display_off = false;
|
||||
bool dpp_clock_lowered = false;
|
||||
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
|
||||
bool force_reset = false;
|
||||
int i;
|
||||
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
if (clk_mgr_base->clks.dispclk_khz == 0 ||
|
||||
dc->debug.force_clock_mode & 0x1) {
|
||||
|
@ -177,6 +225,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
|
||||
}
|
||||
|
||||
|
||||
if (dc->debug.force_min_dcfclk_mhz > 0)
|
||||
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
|
||||
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
|
||||
|
@ -202,10 +251,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
|
||||
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
|
||||
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
|
||||
|
||||
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
|
||||
if (pp_smu && pp_smu->set_pstate_handshake_support)
|
||||
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
|
||||
}
|
||||
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
|
||||
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
|
||||
|
@ -213,35 +264,48 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
|
||||
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
|
||||
dpp_clock_lowered = true;
|
||||
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
|
||||
if (dc->config.forced_clocks == false) {
|
||||
// First update display clock
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz))
|
||||
request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz);
|
||||
|
||||
if (pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
|
||||
// Updating DPP clock requires some more logic
|
||||
if (!safe_to_lower) {
|
||||
// For pre-programming, we need to make sure any DPP clock that will go up has to go up
|
||||
|
||||
update_dppclk = true;
|
||||
}
|
||||
// First raise the global reference if needed
|
||||
if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz)
|
||||
request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
if (pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
|
||||
// Then raise any dividers that need raising
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
int dpp_inst, dppclk_khz;
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
|
||||
if (dpp_clock_lowered) {
|
||||
// if clock is being lowered, increase DTO before lowering refclk
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
if (!context->res_ctx.pipe_ctx[i].plane_state)
|
||||
continue;
|
||||
|
||||
dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
|
||||
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||
|
||||
clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true);
|
||||
}
|
||||
} else {
|
||||
// if clock is being raised, increase refclk before lowering DTO
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
if (update_dppclk)
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
|
||||
// For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs
|
||||
|
||||
if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz)
|
||||
request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
|
||||
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
int dpp_inst, dppclk_khz;
|
||||
|
||||
if (!context->res_ctx.pipe_ctx[i].plane_state)
|
||||
continue;
|
||||
|
||||
dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
|
||||
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||
|
||||
clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (update_dispclk &&
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
|
||||
|
@ -290,7 +291,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
|||
dc->hwss.set_drr(&pipe,
|
||||
1,
|
||||
adjust->v_total_min,
|
||||
adjust->v_total_max);
|
||||
adjust->v_total_max,
|
||||
adjust->v_total_mid,
|
||||
adjust->v_total_mid_frame_num);
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
@ -959,7 +962,7 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
|||
{
|
||||
struct timing_generator *tg;
|
||||
struct dc_link *link = sink->link;
|
||||
unsigned int inst;
|
||||
unsigned int enc_inst, tg_inst;
|
||||
|
||||
/* Check for enabled DIG to identify enabled display */
|
||||
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
|
@ -971,13 +974,22 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
|||
* current implementation always map 1-to-1, so this code makes
|
||||
* the same assumption and doesn't check OTG source.
|
||||
*/
|
||||
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
|
||||
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
|
||||
|
||||
/* Instance should be within the range of the pool */
|
||||
if (inst >= dc->res_pool->pipe_count)
|
||||
if (enc_inst >= dc->res_pool->pipe_count)
|
||||
return false;
|
||||
|
||||
tg = dc->res_pool->timing_generators[inst];
|
||||
if (enc_inst >= dc->res_pool->stream_enc_count)
|
||||
return false;
|
||||
|
||||
tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
|
||||
dc->res_pool->stream_enc[enc_inst]);
|
||||
|
||||
if (tg_inst >= dc->res_pool->timing_generator_count)
|
||||
return false;
|
||||
|
||||
tg = dc->res_pool->timing_generators[tg_inst];
|
||||
|
||||
if (!tg->funcs->is_matching_timing)
|
||||
return false;
|
||||
|
@ -990,10 +1002,11 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
|||
|
||||
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
|
||||
dc->res_pool->dp_clock_source,
|
||||
inst, &pix_clk_100hz);
|
||||
tg_inst, &pix_clk_100hz);
|
||||
|
||||
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -1183,8 +1196,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
|||
|
||||
struct dc_state *dc_create_state(struct dc *dc)
|
||||
{
|
||||
struct dc_state *context = kzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
struct dc_state *context = kvzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!context)
|
||||
return NULL;
|
||||
|
@ -1204,11 +1217,11 @@ struct dc_state *dc_create_state(struct dc *dc)
|
|||
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_state *new_ctx = kmemdup(src_ctx,
|
||||
sizeof(struct dc_state), GFP_KERNEL);
|
||||
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
|
||||
|
||||
if (!new_ctx)
|
||||
return NULL;
|
||||
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
||||
|
@ -1219,6 +1232,12 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
|||
if (cur_pipe->bottom_pipe)
|
||||
cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
|
||||
|
||||
if (cur_pipe->prev_odm_pipe)
|
||||
cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
|
||||
|
||||
if (cur_pipe->next_odm_pipe)
|
||||
cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < new_ctx->stream_count; i++) {
|
||||
|
@ -1242,7 +1261,7 @@ static void dc_state_free(struct kref *kref)
|
|||
{
|
||||
struct dc_state *context = container_of(kref, struct dc_state, refcount);
|
||||
dc_resource_state_destruct(context);
|
||||
kfree(context);
|
||||
kvfree(context);
|
||||
}
|
||||
|
||||
void dc_release_state(struct dc_state *context)
|
||||
|
@ -1602,6 +1621,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
|||
for (i = 0; i < surface_count; i++)
|
||||
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
|
||||
|
||||
if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
|
||||
dc->optimized_required = true;
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
|
@ -1678,6 +1700,8 @@ static void copy_surface_update_to_plane(
|
|||
srf_update->plane_info->dcc;
|
||||
surface->sdr_white_level =
|
||||
srf_update->plane_info->sdr_white_level;
|
||||
surface->layer_index =
|
||||
srf_update->plane_info->layer_index;
|
||||
}
|
||||
|
||||
if (srf_update->gamma &&
|
||||
|
@ -1844,9 +1868,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (!pipe_ctx->top_pipe &&
|
||||
pipe_ctx->stream &&
|
||||
pipe_ctx->stream == stream) {
|
||||
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
|
||||
|
||||
if (stream_update->periodic_interrupt0 &&
|
||||
dc->hwss.setup_periodic_interrupt)
|
||||
|
@ -1872,7 +1894,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|||
|
||||
if (stream_update->dither_option) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
|
||||
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
|
||||
#endif
|
||||
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
|
||||
&pipe_ctx->stream->bit_depth_params);
|
||||
|
@ -1880,10 +1902,12 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|||
&stream->bit_depth_params,
|
||||
&stream->clamping);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
if (odm_pipe)
|
||||
while (odm_pipe) {
|
||||
odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
|
||||
&stream->bit_depth_params,
|
||||
&stream->clamping);
|
||||
odm_pipe = odm_pipe->next_odm_pipe;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1900,13 +1924,21 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|||
|
||||
if (stream_update->dpms_off) {
|
||||
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
|
||||
|
||||
if (*stream_update->dpms_off) {
|
||||
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
/* for dpms, keep acquired resources*/
|
||||
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
|
||||
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
|
||||
|
||||
dc->hwss.optimize_bandwidth(dc, dc->current_state);
|
||||
} else {
|
||||
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
||||
if (!dc->optimize_seamless_boot)
|
||||
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
||||
|
||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
||||
}
|
||||
|
||||
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
|
||||
}
|
||||
|
||||
|
@ -1996,6 +2028,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
|||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (!pipe_ctx->top_pipe &&
|
||||
!pipe_ctx->prev_odm_pipe &&
|
||||
pipe_ctx->stream &&
|
||||
pipe_ctx->stream == stream) {
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
|
@ -2110,7 +2143,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
enum surface_update_type update_type;
|
||||
struct dc_state *context;
|
||||
struct dc_context *dc_ctx = dc->ctx;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
stream_status = dc_stream_get_status(stream);
|
||||
context = dc->current_state;
|
||||
|
@ -2148,16 +2181,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
|
||||
copy_surface_update_to_plane(surface, &srf_updates[i]);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_MED) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (pipe_ctx->plane_state != surface)
|
||||
continue;
|
||||
|
||||
resource_build_scaling_params(pipe_ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
copy_stream_update_to_stream(dc, context, stream, stream_update);
|
||||
|
@ -2247,6 +2270,14 @@ void dc_set_power_state(
|
|||
dc_resource_state_construct(dc, dc->current_state);
|
||||
|
||||
dc->hwss.init_hw(dc);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||
if (dc->hwss.init_sys_ctx != NULL &&
|
||||
dc->vm_pa_config.valid) {
|
||||
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
|
||||
}
|
||||
#endif
|
||||
|
||||
break;
|
||||
default:
|
||||
ASSERT(dc->current_state->stream_count == 0);
|
||||
|
|
|
@ -45,10 +45,6 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "dmcu.h"
|
||||
#include "hw/clk_mgr.h"
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
#include "resource.h"
|
||||
#endif
|
||||
#include "hw/clk_mgr.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
@ -684,6 +680,56 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
|
|||
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
|
||||
}
|
||||
|
||||
bool wait_for_alt_mode(struct dc_link *link)
|
||||
{
|
||||
|
||||
/**
|
||||
* something is terribly wrong if time out is > 200ms. (5Hz)
|
||||
* 500 microseconds * 400 tries us 200 ms
|
||||
**/
|
||||
unsigned int sleep_time_in_microseconds = 500;
|
||||
unsigned int tries_allowed = 400;
|
||||
bool is_in_alt_mode;
|
||||
unsigned long long enter_timestamp;
|
||||
unsigned long long finish_timestamp;
|
||||
unsigned long long time_taken_in_ns;
|
||||
int tries_taken;
|
||||
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
if (link->link_enc->funcs->is_in_alt_mode == NULL)
|
||||
return true;
|
||||
|
||||
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
|
||||
DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
|
||||
|
||||
if (is_in_alt_mode)
|
||||
return true;
|
||||
|
||||
enter_timestamp = dm_get_timestamp(link->ctx);
|
||||
|
||||
for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) {
|
||||
udelay(sleep_time_in_microseconds);
|
||||
/* ask the link if alt mode is enabled, if so return ok */
|
||||
if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
|
||||
|
||||
finish_timestamp = dm_get_timestamp(link->ctx);
|
||||
time_taken_in_ns = dm_get_elapse_time_in_ns(
|
||||
link->ctx, finish_timestamp, enter_timestamp);
|
||||
DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
|
||||
div_u64(time_taken_in_ns, 1000000));
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
finish_timestamp = dm_get_timestamp(link->ctx);
|
||||
time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
|
||||
enter_timestamp);
|
||||
DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
|
||||
div_u64(time_taken_in_ns, 1000000));
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_link_detect() - Detect if a sink is attached to a given link
|
||||
*
|
||||
|
@ -772,6 +818,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
}
|
||||
|
||||
case SIGNAL_TYPE_DISPLAY_PORT: {
|
||||
/* wa HPD high coming too early*/
|
||||
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
|
||||
|
||||
/* if alt mode times out, return false */
|
||||
if (wait_for_alt_mode(link) == false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!detect_dp(
|
||||
link,
|
||||
&sink_caps,
|
||||
|
@ -795,16 +850,9 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
dc_sink_release(prev_sink);
|
||||
} else {
|
||||
/* Empty dongle plug in */
|
||||
for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
|
||||
int fail_count = 0;
|
||||
|
||||
dp_verify_link_cap(link,
|
||||
&link->reported_link_cap,
|
||||
&fail_count);
|
||||
|
||||
if (fail_count == 0)
|
||||
break;
|
||||
}
|
||||
dp_verify_link_cap_with_retries(link,
|
||||
&link->reported_link_cap,
|
||||
LINK_TRAINING_MAX_VERIFY_RETRY);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -908,17 +956,9 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
*/
|
||||
|
||||
/* deal with non-mst cases */
|
||||
for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
|
||||
int fail_count = 0;
|
||||
|
||||
dp_verify_link_cap(link,
|
||||
&link->reported_link_cap,
|
||||
&fail_count);
|
||||
|
||||
if (fail_count == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
dp_verify_link_cap_with_retries(link,
|
||||
&link->reported_link_cap,
|
||||
LINK_TRAINING_MAX_VERIFY_RETRY);
|
||||
} else {
|
||||
// If edid is the same, then discard new sink and revert back to original sink
|
||||
if (same_edid) {
|
||||
|
@ -1387,57 +1427,6 @@ void link_destroy(struct dc_link **link)
|
|||
*link = NULL;
|
||||
}
|
||||
|
||||
static void dpcd_configure_panel_mode(
|
||||
struct dc_link *link,
|
||||
enum dp_panel_mode panel_mode)
|
||||
{
|
||||
union dpcd_edp_config edp_config_set;
|
||||
bool panel_mode_edp = false;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
|
||||
|
||||
if (DP_PANEL_MODE_DEFAULT != panel_mode) {
|
||||
|
||||
switch (panel_mode) {
|
||||
case DP_PANEL_MODE_EDP:
|
||||
case DP_PANEL_MODE_SPECIAL:
|
||||
panel_mode_edp = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/*set edp panel mode in receiver*/
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_EDP_CONFIGURATION_SET,
|
||||
&edp_config_set.raw,
|
||||
sizeof(edp_config_set.raw));
|
||||
|
||||
if (edp_config_set.bits.PANEL_MODE_EDP
|
||||
!= panel_mode_edp) {
|
||||
enum ddc_result result = DDC_RESULT_UNKNOWN;
|
||||
|
||||
edp_config_set.bits.PANEL_MODE_EDP =
|
||||
panel_mode_edp;
|
||||
result = core_link_write_dpcd(
|
||||
link,
|
||||
DP_EDP_CONFIGURATION_SET,
|
||||
&edp_config_set.raw,
|
||||
sizeof(edp_config_set.raw));
|
||||
|
||||
ASSERT(result == DDC_RESULT_SUCESSFULL);
|
||||
}
|
||||
}
|
||||
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
|
||||
"eDP panel mode enabled: %d \n",
|
||||
link->link_index,
|
||||
link->dpcd_caps.panel_mode_edp,
|
||||
panel_mode_edp);
|
||||
}
|
||||
|
||||
static void enable_stream_features(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
|
@ -1472,6 +1461,16 @@ static enum dc_status enable_link_dp(
|
|||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
bool fec_enable;
|
||||
#endif
|
||||
int i;
|
||||
bool apply_seamless_boot_optimization = false;
|
||||
|
||||
// check for seamless boot
|
||||
for (i = 0; i < state->stream_count; i++) {
|
||||
if (state->streams[i]->apply_seamless_boot_optimization) {
|
||||
apply_seamless_boot_optimization = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* get link settings for video mode timing */
|
||||
decide_link_settings(stream, &link_settings);
|
||||
|
@ -1493,7 +1492,8 @@ static enum dc_status enable_link_dp(
|
|||
|
||||
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
|
||||
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
|
||||
if (!apply_seamless_boot_optimization)
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
|
||||
|
||||
dp_enable_link_phy(
|
||||
link,
|
||||
|
@ -1508,7 +1508,7 @@ static enum dc_status enable_link_dp(
|
|||
}
|
||||
|
||||
panel_mode = dp_get_panel_mode(link);
|
||||
dpcd_configure_panel_mode(link, panel_mode);
|
||||
dp_set_panel_mode(link, panel_mode);
|
||||
|
||||
skip_video_pattern = true;
|
||||
|
||||
|
@ -2767,21 +2767,27 @@ void core_link_enable_stream(
|
|||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
COLOR_DEPTH_UNDEFINED);
|
||||
|
||||
core_dc->hwss.enable_stream(pipe_ctx);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
allocate_mst_payload(pipe_ctx);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_enable(pipe_ctx, true);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(
|
||||
pipe_ctx->stream_res.tg,
|
||||
CRTC_STATE_VBLANK);
|
||||
}
|
||||
#endif
|
||||
core_dc->hwss.enable_stream(pipe_ctx);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
/* Set DPS PPS SDP (AKA "info frames") */
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_pps_sdp(pipe_ctx, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
allocate_mst_payload(pipe_ctx);
|
||||
|
||||
core_dc->hwss.unblank_stream(pipe_ctx,
|
||||
&pipe_ctx->stream->link->cur_link_settings);
|
||||
|
||||
|
@ -2798,7 +2804,7 @@ void core_link_enable_stream(
|
|||
#endif
|
||||
}
|
||||
|
||||
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
||||
void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
|
@ -2833,7 +2839,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
write_i2c_redriver_setting(pipe_ctx, false);
|
||||
}
|
||||
}
|
||||
core_dc->hwss.disable_stream(pipe_ctx, option);
|
||||
core_dc->hwss.disable_stream(pipe_ctx);
|
||||
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
|
|
|
@ -294,7 +294,7 @@ static uint32_t defer_delay_converter_wa(
|
|||
{
|
||||
struct dc_link *link = ddc->link;
|
||||
|
||||
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
|
||||
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 &&
|
||||
!memcmp(link->dpcd_caps.branch_dev_name,
|
||||
DP_DVI_CONVERTER_ID_4,
|
||||
sizeof(link->dpcd_caps.branch_dev_name)))
|
||||
|
|
|
@ -965,6 +965,7 @@ static inline enum link_training_result perform_link_training_int(
|
|||
static void initialize_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting,
|
||||
const struct dc_link_training_overrides *overrides,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
uint32_t lane;
|
||||
|
@ -997,23 +998,23 @@ static void initialize_training_settings(
|
|||
/* Initialize link spread */
|
||||
if (link->dp_ss_off)
|
||||
lt_settings->link_settings.link_spread = LINK_SPREAD_DISABLED;
|
||||
else if (link->preferred_training_settings.downspread != NULL)
|
||||
else if (overrides->downspread != NULL)
|
||||
lt_settings->link_settings.link_spread
|
||||
= *link->preferred_training_settings.downspread
|
||||
= *overrides->downspread
|
||||
? LINK_SPREAD_05_DOWNSPREAD_30KHZ
|
||||
: LINK_SPREAD_DISABLED;
|
||||
else
|
||||
lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
|
||||
|
||||
/* Initialize lane settings overrides */
|
||||
if (link->preferred_training_settings.voltage_swing != NULL)
|
||||
lt_settings->voltage_swing = link->preferred_training_settings.voltage_swing;
|
||||
if (overrides->voltage_swing != NULL)
|
||||
lt_settings->voltage_swing = overrides->voltage_swing;
|
||||
|
||||
if (link->preferred_training_settings.pre_emphasis != NULL)
|
||||
lt_settings->pre_emphasis = link->preferred_training_settings.pre_emphasis;
|
||||
if (overrides->pre_emphasis != NULL)
|
||||
lt_settings->pre_emphasis = overrides->pre_emphasis;
|
||||
|
||||
if (link->preferred_training_settings.post_cursor2 != NULL)
|
||||
lt_settings->post_cursor2 = link->preferred_training_settings.post_cursor2;
|
||||
if (overrides->post_cursor2 != NULL)
|
||||
lt_settings->post_cursor2 = overrides->post_cursor2;
|
||||
|
||||
/* Initialize lane settings (VS/PE/PC2) */
|
||||
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
|
||||
|
@ -1032,23 +1033,23 @@ static void initialize_training_settings(
|
|||
}
|
||||
|
||||
/* Initialize training timings */
|
||||
if (link->preferred_training_settings.cr_pattern_time != NULL)
|
||||
lt_settings->cr_pattern_time = *link->preferred_training_settings.cr_pattern_time;
|
||||
if (overrides->cr_pattern_time != NULL)
|
||||
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
|
||||
else
|
||||
lt_settings->cr_pattern_time = 100;
|
||||
lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100);
|
||||
|
||||
if (link->preferred_training_settings.eq_pattern_time != NULL)
|
||||
lt_settings->eq_pattern_time = *link->preferred_training_settings.eq_pattern_time;
|
||||
if (overrides->eq_pattern_time != NULL)
|
||||
lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
|
||||
else
|
||||
lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400);
|
||||
|
||||
if (link->preferred_training_settings.pattern_for_eq != NULL)
|
||||
lt_settings->pattern_for_eq = *link->preferred_training_settings.pattern_for_eq;
|
||||
if (overrides->pattern_for_eq != NULL)
|
||||
lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
|
||||
else
|
||||
lt_settings->pattern_for_eq = get_supported_tp(link);
|
||||
|
||||
if (link->preferred_training_settings.enhanced_framing != NULL)
|
||||
lt_settings->enhanced_framing = *link->preferred_training_settings.enhanced_framing;
|
||||
if (overrides->enhanced_framing != NULL)
|
||||
lt_settings->enhanced_framing = *overrides->enhanced_framing;
|
||||
else
|
||||
lt_settings->enhanced_framing = 1;
|
||||
}
|
||||
|
@ -1139,7 +1140,11 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
struct link_training_settings lt_settings;
|
||||
enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1;
|
||||
|
||||
initialize_training_settings(link, link_setting, <_settings);
|
||||
initialize_training_settings(
|
||||
link,
|
||||
link_setting,
|
||||
&link->preferred_training_settings,
|
||||
<_settings);
|
||||
|
||||
/* 1. Perform_clock_recovery_sequence. */
|
||||
|
||||
|
@ -1184,7 +1189,11 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
bool fec_enable;
|
||||
#endif
|
||||
|
||||
initialize_training_settings(link, link_setting, <_settings);
|
||||
initialize_training_settings(
|
||||
link,
|
||||
link_setting,
|
||||
&link->preferred_training_settings,
|
||||
<_settings);
|
||||
|
||||
/* 1. set link rate, lane count and spread. */
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
@ -1247,6 +1256,146 @@ bool perform_link_training_with_retries(
|
|||
return false;
|
||||
}
|
||||
|
||||
static enum clock_source_id get_clock_source_id(struct dc_link *link)
|
||||
{
|
||||
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED;
|
||||
struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source;
|
||||
|
||||
if (dp_cs != NULL) {
|
||||
dp_cs_id = dp_cs->id;
|
||||
} else {
|
||||
/*
|
||||
* dp clock source is not initialized for some reason.
|
||||
* Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
|
||||
*/
|
||||
ASSERT(dp_cs);
|
||||
}
|
||||
|
||||
return dp_cs_id;
|
||||
}
|
||||
|
||||
static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
|
||||
{
|
||||
if (mst_enable == false &&
|
||||
link->type == dc_connection_mst_branch) {
|
||||
/* Disable MST on link. Use only local sink. */
|
||||
dp_disable_link_phy_mst(link, link->connector_signal);
|
||||
|
||||
link->type = dc_connection_single;
|
||||
link->local_sink = link->remote_sinks[0];
|
||||
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
} else if (mst_enable == true &&
|
||||
link->type == dc_connection_single &&
|
||||
link->remote_sinks[0] != NULL) {
|
||||
/* Re-enable MST on link. */
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
dp_enable_mst_on_sink(link, true);
|
||||
|
||||
link->type = dc_connection_mst_branch;
|
||||
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
|
||||
}
|
||||
}
|
||||
|
||||
bool dc_link_dp_sync_lt_begin(struct dc_link *link)
|
||||
{
|
||||
/* Begin Sync LT. During this time,
|
||||
* DPCD:600h must not be powered down.
|
||||
*/
|
||||
link->sync_lt_in_progress = true;
|
||||
|
||||
/*Clear any existing preferred settings.*/
|
||||
memset(&link->preferred_training_settings, 0,
|
||||
sizeof(struct dc_link_training_overrides));
|
||||
memset(&link->preferred_link_setting, 0,
|
||||
sizeof(struct dc_link_settings));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
enum link_training_result dc_link_dp_sync_lt_attempt(
|
||||
struct dc_link *link,
|
||||
struct dc_link_settings *link_settings,
|
||||
struct dc_link_training_overrides *lt_overrides)
|
||||
{
|
||||
struct link_training_settings lt_settings;
|
||||
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
|
||||
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
|
||||
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
bool fec_enable = false;
|
||||
#endif
|
||||
|
||||
initialize_training_settings(
|
||||
link,
|
||||
link_settings,
|
||||
lt_overrides,
|
||||
<_settings);
|
||||
|
||||
/* Setup MST Mode */
|
||||
if (lt_overrides->mst_enable)
|
||||
set_dp_mst_mode(link, *lt_overrides->mst_enable);
|
||||
|
||||
/* Disable link */
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
|
||||
/* Enable link */
|
||||
dp_cs_id = get_clock_source_id(link);
|
||||
dp_enable_link_phy(link, link->connector_signal,
|
||||
dp_cs_id, link_settings);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
/* Set FEC enable */
|
||||
fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
|
||||
dp_set_fec_ready(link, fec_enable);
|
||||
#endif
|
||||
|
||||
if (lt_overrides->alternate_scrambler_reset) {
|
||||
if (*lt_overrides->alternate_scrambler_reset)
|
||||
panel_mode = DP_PANEL_MODE_EDP;
|
||||
else
|
||||
panel_mode = DP_PANEL_MODE_DEFAULT;
|
||||
} else
|
||||
panel_mode = dp_get_panel_mode(link);
|
||||
|
||||
dp_set_panel_mode(link, panel_mode);
|
||||
|
||||
/* Attempt to train with given link training settings */
|
||||
|
||||
/* Set link rate, lane count and spread. */
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
||||
/* 2. perform link training (set link training done
|
||||
* to false is done as well)
|
||||
*/
|
||||
lt_status = perform_clock_recovery_sequence(link, <_settings);
|
||||
if (lt_status == LINK_TRAINING_SUCCESS) {
|
||||
lt_status = perform_channel_equalization_sequence(link,
|
||||
<_settings);
|
||||
}
|
||||
|
||||
/* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/
|
||||
/* 4. print status message*/
|
||||
print_status_message(link, <_settings, lt_status);
|
||||
|
||||
return lt_status;
|
||||
}
|
||||
|
||||
bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
|
||||
{
|
||||
/* If input parameter is set, shut down phy.
|
||||
* Still shouldn't turn off dp_receiver (DPCD:600h)
|
||||
*/
|
||||
if (link_down == true) {
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
dp_set_fec_ready(link, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
link->sync_lt_in_progress = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
||||
{
|
||||
/* Set Default link settings */
|
||||
|
@ -1401,7 +1550,6 @@ bool dp_verify_link_cap(
|
|||
bool success;
|
||||
bool skip_link_training;
|
||||
bool skip_video_pattern;
|
||||
struct clock_source *dp_cs;
|
||||
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
|
||||
enum link_training_result status;
|
||||
union hpd_irq_data irq_data;
|
||||
|
@ -1425,17 +1573,7 @@ bool dp_verify_link_cap(
|
|||
/* disable PHY done possible by BIOS, will be done by driver itself */
|
||||
dp_disable_link_phy(link, link->connector_signal);
|
||||
|
||||
dp_cs = link->dc->res_pool->dp_clock_source;
|
||||
|
||||
if (dp_cs)
|
||||
dp_cs_id = dp_cs->id;
|
||||
else {
|
||||
/*
|
||||
* dp clock source is not initialized for some reason.
|
||||
* Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
|
||||
*/
|
||||
ASSERT(dp_cs);
|
||||
}
|
||||
dp_cs_id = get_clock_source_id(link);
|
||||
|
||||
/* link training starts with the maximum common settings
|
||||
* supported by both sink and ASIC.
|
||||
|
@ -1505,6 +1643,33 @@ bool dp_verify_link_cap(
|
|||
return success;
|
||||
}
|
||||
|
||||
bool dp_verify_link_cap_with_retries(
|
||||
struct dc_link *link,
|
||||
struct dc_link_settings *known_limit_link_setting,
|
||||
int attempts)
|
||||
{
|
||||
uint8_t i = 0;
|
||||
bool success = false;
|
||||
|
||||
for (i = 0; i < attempts; i++) {
|
||||
int fail_count = 0;
|
||||
enum dc_connection_type type;
|
||||
|
||||
memset(&link->verified_link_cap, 0,
|
||||
sizeof(struct dc_link_settings));
|
||||
if (!dc_link_detect_sink(link, &type)) {
|
||||
break;
|
||||
} else if (dp_verify_link_cap(link,
|
||||
&link->reported_link_cap,
|
||||
&fail_count) && fail_count == 0) {
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
msleep(10);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
static struct dc_link_settings get_common_supported_link_settings(
|
||||
struct dc_link_settings link_setting_a,
|
||||
struct dc_link_settings link_setting_b)
|
||||
|
@ -2307,6 +2472,11 @@ bool is_mst_supported(struct dc_link *link)
|
|||
union dpcd_rev rev;
|
||||
union mstm_cap cap;
|
||||
|
||||
if (link->preferred_training_settings.mst_enable &&
|
||||
*link->preferred_training_settings.mst_enable == false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
rev.raw = 0;
|
||||
cap.raw = 0;
|
||||
|
||||
|
@ -2514,13 +2684,13 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
|
|||
|
||||
if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
|
||||
switch (link->dpcd_caps.branch_dev_id) {
|
||||
/* Some active dongles (DP-VGA, DP-DLDVI converters) power down
|
||||
/* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
|
||||
* all internal circuits including AUX communication preventing
|
||||
* reading DPCD table and EDID (spec violation).
|
||||
* Encoder will skip DP RX power down on disable_output to
|
||||
* keep receiver powered all the time.*/
|
||||
case DP_BRANCH_DEVICE_ID_1:
|
||||
case DP_BRANCH_DEVICE_ID_4:
|
||||
case DP_BRANCH_DEVICE_ID_0010FA:
|
||||
case DP_BRANCH_DEVICE_ID_0080E1:
|
||||
link->wa_flags.dp_keep_receiver_powered = true;
|
||||
break;
|
||||
|
||||
|
@ -2925,14 +3095,19 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
|||
controller_test_pattern, color_depth);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
else if (opp->funcs->opp_set_disp_pattern_generator) {
|
||||
struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
|
||||
if (bot_odm_pipe) {
|
||||
struct output_pixel_processor *bot_opp = bot_odm_pipe->stream_res.opp;
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
opp_cnt++;
|
||||
|
||||
bot_opp->funcs->opp_program_bit_depth_reduction(bot_opp, ¶ms);
|
||||
width /= 2;
|
||||
bot_opp->funcs->opp_set_disp_pattern_generator(bot_opp,
|
||||
width /= opp_cnt;
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
|
||||
|
||||
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms);
|
||||
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
|
||||
controller_test_pattern,
|
||||
color_depth,
|
||||
NULL,
|
||||
|
@ -2961,14 +3136,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
|||
color_depth);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
else if (opp->funcs->opp_set_disp_pattern_generator) {
|
||||
struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
|
||||
if (bot_odm_pipe) {
|
||||
struct output_pixel_processor *bot_opp = bot_odm_pipe->stream_res.opp;
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
opp_cnt++;
|
||||
|
||||
bot_opp->funcs->opp_program_bit_depth_reduction(bot_opp, ¶ms);
|
||||
width /= 2;
|
||||
bot_opp->funcs->opp_set_disp_pattern_generator(bot_opp,
|
||||
width /= opp_cnt;
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
|
||||
|
||||
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms);
|
||||
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
|
||||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
color_depth,
|
||||
NULL,
|
||||
|
@ -3009,7 +3188,7 @@ bool dc_link_dp_set_test_pattern(
|
|||
memset(&training_pattern, 0, sizeof(training_pattern));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream->link == link) {
|
||||
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
|
||||
pipe_ctx = &pipes[i];
|
||||
break;
|
||||
}
|
||||
|
@ -3158,6 +3337,105 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
|
|||
core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
|
||||
}
|
||||
|
||||
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
|
||||
{
|
||||
union dpcd_edp_config edp_config_set;
|
||||
bool panel_mode_edp = false;
|
||||
|
||||
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
|
||||
|
||||
if (panel_mode != DP_PANEL_MODE_DEFAULT) {
|
||||
|
||||
switch (panel_mode) {
|
||||
case DP_PANEL_MODE_EDP:
|
||||
case DP_PANEL_MODE_SPECIAL:
|
||||
panel_mode_edp = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/*set edp panel mode in receiver*/
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_EDP_CONFIGURATION_SET,
|
||||
&edp_config_set.raw,
|
||||
sizeof(edp_config_set.raw));
|
||||
|
||||
if (edp_config_set.bits.PANEL_MODE_EDP
|
||||
!= panel_mode_edp) {
|
||||
enum ddc_result result = DDC_RESULT_UNKNOWN;
|
||||
|
||||
edp_config_set.bits.PANEL_MODE_EDP =
|
||||
panel_mode_edp;
|
||||
result = core_link_write_dpcd(
|
||||
link,
|
||||
DP_EDP_CONFIGURATION_SET,
|
||||
&edp_config_set.raw,
|
||||
sizeof(edp_config_set.raw));
|
||||
|
||||
ASSERT(result == DDC_RESULT_SUCESSFULL);
|
||||
}
|
||||
}
|
||||
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
|
||||
"eDP panel mode enabled: %d \n",
|
||||
link->link_index,
|
||||
link->dpcd_caps.panel_mode_edp,
|
||||
panel_mode_edp);
|
||||
}
|
||||
|
||||
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
|
||||
{
|
||||
/* We need to explicitly check that connector
|
||||
* is not DP. Some Travis_VGA get reported
|
||||
* by video bios as DP.
|
||||
*/
|
||||
if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
|
||||
|
||||
switch (link->dpcd_caps.branch_dev_id) {
|
||||
case DP_BRANCH_DEVICE_ID_0022B9:
|
||||
/* alternate scrambler reset is required for Travis
|
||||
* for the case when external chip does not
|
||||
* provide sink device id, alternate scrambler
|
||||
* scheme will be overriden later by querying
|
||||
* Encoder features
|
||||
*/
|
||||
if (strncmp(
|
||||
link->dpcd_caps.branch_dev_name,
|
||||
DP_VGA_LVDS_CONVERTER_ID_2,
|
||||
sizeof(
|
||||
link->dpcd_caps.
|
||||
branch_dev_name)) == 0) {
|
||||
return DP_PANEL_MODE_SPECIAL;
|
||||
}
|
||||
break;
|
||||
case DP_BRANCH_DEVICE_ID_00001A:
|
||||
/* alternate scrambler reset is required for Travis
|
||||
* for the case when external chip does not provide
|
||||
* sink device id, alternate scrambler scheme will
|
||||
* be overriden later by querying Encoder feature
|
||||
*/
|
||||
if (strncmp(link->dpcd_caps.branch_dev_name,
|
||||
DP_VGA_LVDS_CONVERTER_ID_3,
|
||||
sizeof(
|
||||
link->dpcd_caps.
|
||||
branch_dev_name)) == 0) {
|
||||
return DP_PANEL_MODE_SPECIAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (link->dpcd_caps.panel_mode_edp) {
|
||||
return DP_PANEL_MODE_EDP;
|
||||
}
|
||||
|
||||
return DP_PANEL_MODE_DEFAULT;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
void dp_set_fec_ready(struct dc_link *link, bool ready)
|
||||
{
|
||||
|
|
|
@ -55,6 +55,9 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
|
|||
|
||||
state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
|
||||
|
||||
if (link->sync_lt_in_progress)
|
||||
return;
|
||||
|
||||
core_link_write_dpcd(link, DP_SET_POWER, &state,
|
||||
sizeof(state));
|
||||
}
|
||||
|
@ -245,46 +248,6 @@ void dp_set_hw_lane_settings(
|
|||
encoder->funcs->dp_set_lane_settings(encoder, link_settings);
|
||||
}
|
||||
|
||||
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
|
||||
{
|
||||
/* We need to explicitly check that connector
|
||||
* is not DP. Some Travis_VGA get reported
|
||||
* by video bios as DP.
|
||||
*/
|
||||
if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
|
||||
|
||||
switch (link->dpcd_caps.branch_dev_id) {
|
||||
case DP_BRANCH_DEVICE_ID_2:
|
||||
if (strncmp(
|
||||
link->dpcd_caps.branch_dev_name,
|
||||
DP_VGA_LVDS_CONVERTER_ID_2,
|
||||
sizeof(
|
||||
link->dpcd_caps.
|
||||
branch_dev_name)) == 0) {
|
||||
return DP_PANEL_MODE_SPECIAL;
|
||||
}
|
||||
break;
|
||||
case DP_BRANCH_DEVICE_ID_3:
|
||||
if (strncmp(link->dpcd_caps.branch_dev_name,
|
||||
DP_VGA_LVDS_CONVERTER_ID_3,
|
||||
sizeof(
|
||||
link->dpcd_caps.
|
||||
branch_dev_name)) == 0) {
|
||||
return DP_PANEL_MODE_SPECIAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (link->dpcd_caps.panel_mode_edp) {
|
||||
return DP_PANEL_MODE_EDP;
|
||||
}
|
||||
|
||||
return DP_PANEL_MODE_DEFAULT;
|
||||
}
|
||||
|
||||
void dp_set_hw_test_pattern(
|
||||
struct dc_link *link,
|
||||
enum dp_test_pattern test_pattern,
|
||||
|
@ -312,7 +275,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
|||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream != NULL &&
|
||||
!pipes[i].top_pipe &&
|
||||
!pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
|
||||
pipes[i].stream->link != NULL &&
|
||||
pipes[i].stream_res.stream_enc != NULL) {
|
||||
udelay(100);
|
||||
|
@ -326,7 +289,9 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
|||
|
||||
dp_receiver_power_ctrl(link, false);
|
||||
|
||||
link->dc->hwss.disable_stream(&pipes[i], KEEP_ACQUIRED_RESOURCE);
|
||||
link->dc->hwss.disable_stream(&pipes[i]);
|
||||
if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
|
||||
(&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
|
||||
|
||||
link->link_enc->funcs->disable_output(
|
||||
link->link_enc,
|
||||
|
@ -379,10 +344,22 @@ void dp_retrain_link_dp_test(struct dc_link *link,
|
|||
static void dsc_optc_config_log(struct display_stream_compressor *dsc,
|
||||
struct dsc_optc_config *config)
|
||||
{
|
||||
DC_LOG_DSC("Setting optc DSC config at DSC inst %d", dsc->inst);
|
||||
DC_LOG_DSC("\n\tbytes_per_pixel %d\n\tis_pixel_format_444 %d\n\tslice_width %d",
|
||||
config->bytes_per_pixel,
|
||||
config->is_pixel_format_444, config->slice_width);
|
||||
uint32_t precision = 1 << 28;
|
||||
uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
|
||||
uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
|
||||
uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
|
||||
|
||||
/* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
|
||||
* bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
|
||||
* 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
|
||||
*/
|
||||
ll_bytes_per_pix_fraq *= 10000000;
|
||||
ll_bytes_per_pix_fraq /= precision;
|
||||
|
||||
DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
|
||||
config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
|
||||
DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
|
||||
DC_LOG_DSC("\tslice_width %d", config->slice_width);
|
||||
}
|
||||
|
||||
static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
|
@ -398,55 +375,62 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
return result;
|
||||
}
|
||||
|
||||
/* This has to be done after DSC was enabled on RX first, i.e. after dp_enable_dsc_on_rx() had been called
|
||||
/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
|
||||
* i.e. after dp_enable_dsc_on_rx() had been called
|
||||
*/
|
||||
void set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
opp_cnt++;
|
||||
|
||||
if (enable) {
|
||||
/* TODO proper function */
|
||||
struct dsc_config dsc_cfg;
|
||||
struct dsc_optc_config dsc_optc_cfg;
|
||||
enum optc_dsc_mode optc_dsc_mode;
|
||||
uint8_t dsc_packed_pps[128];
|
||||
|
||||
/* Enable DSC hw block */
|
||||
dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
|
||||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
|
||||
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
|
||||
|
||||
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps[0]);
|
||||
if (odm_pipe) {
|
||||
struct display_stream_compressor *bot_dsc = odm_pipe->stream_res.dsc;
|
||||
uint8_t dsc_packed_pps_odm[128];
|
||||
|
||||
dsc_cfg.pic_width /= 2;
|
||||
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % 2 == 0);
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h /= 2;
|
||||
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps_odm[0]);
|
||||
bot_dsc->funcs->dsc_set_config(bot_dsc, &dsc_cfg, &dsc_optc_cfg, &dsc_packed_pps_odm[0]);
|
||||
bot_dsc->funcs->dsc_enable(bot_dsc, odm_pipe->stream_res.opp->inst);
|
||||
}
|
||||
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
|
||||
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
|
||||
|
||||
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
|
||||
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
|
||||
}
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
|
||||
dsc_cfg.pic_width *= opp_cnt;
|
||||
|
||||
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
|
||||
|
||||
dsc_optc_config_log(dsc, &dsc_optc_cfg);
|
||||
/* Enable DSC in encoder */
|
||||
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
|
||||
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
|
||||
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
|
||||
dsc_optc_config_log(dsc, &dsc_optc_cfg);
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
|
||||
optc_dsc_mode,
|
||||
dsc_optc_cfg.bytes_per_pixel,
|
||||
dsc_optc_cfg.slice_width,
|
||||
&dsc_packed_pps[0]);
|
||||
dsc_optc_cfg.slice_width);
|
||||
|
||||
/* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
|
||||
}
|
||||
|
||||
/* Enable DSC in OPTC */
|
||||
DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
|
||||
dsc_optc_config_log(dsc, &dsc_optc_cfg);
|
||||
pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
|
||||
optc_dsc_mode,
|
||||
dsc_optc_cfg.bytes_per_pixel,
|
||||
|
@ -458,14 +442,18 @@ void set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
OPTC_DSC_DISABLED, 0, 0);
|
||||
|
||||
/* disable DSC in stream encoder */
|
||||
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
|
||||
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
OPTC_DSC_DISABLED, 0, 0, NULL);
|
||||
OPTC_DSC_DISABLED, 0, 0);
|
||||
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
|
||||
pipe_ctx->stream_res.stream_enc, false, NULL);
|
||||
}
|
||||
|
||||
/* disable DSC block */
|
||||
pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
|
||||
if (odm_pipe)
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
|
||||
}
|
||||
}
|
||||
|
@ -482,18 +470,59 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
|
||||
if (enable) {
|
||||
if (dp_set_dsc_on_rx(pipe_ctx, true)) {
|
||||
set_dsc_on_stream(pipe_ctx, true);
|
||||
dp_set_dsc_on_stream(pipe_ctx, true);
|
||||
result = true;
|
||||
}
|
||||
} else {
|
||||
dp_set_dsc_on_rx(pipe_ctx, false);
|
||||
set_dsc_on_stream(pipe_ctx, false);
|
||||
dp_set_dsc_on_stream(pipe_ctx, false);
|
||||
result = true;
|
||||
}
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
|
||||
if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
|
||||
return false;
|
||||
|
||||
if (enable) {
|
||||
struct dsc_config dsc_cfg;
|
||||
uint8_t dsc_packed_pps[128];
|
||||
|
||||
/* Enable DSC hw block */
|
||||
dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
|
||||
|
||||
DC_LOG_DSC(" ");
|
||||
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
|
||||
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
|
||||
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
true,
|
||||
&dsc_packed_pps[0]);
|
||||
}
|
||||
} else {
|
||||
/* disable DSC PPS in stream encoder */
|
||||
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
|
||||
pipe_ctx->stream_res.stream_enc, false, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
|
||||
|
@ -503,9 +532,9 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
|
|||
if (!dsc)
|
||||
return false;
|
||||
|
||||
set_dsc_on_stream(pipe_ctx, true);
|
||||
dp_set_dsc_on_stream(pipe_ctx, true);
|
||||
dp_set_dsc_pps_sdp(pipe_ctx, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -172,13 +172,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
if (res_pool != NULL) {
|
||||
struct dc_firmware_info fw_info = { { 0 } };
|
||||
|
||||
if (dc->ctx->dc_bios->funcs->get_firmware_info(dc->ctx->dc_bios,
|
||||
&fw_info) == BP_RESULT_OK) {
|
||||
if (res_pool != NULL) {
|
||||
if (dc->ctx->dc_bios->fw_info_valid) {
|
||||
res_pool->ref_clocks.xtalin_clock_inKhz =
|
||||
fw_info.pll_info.crystal_frequency;
|
||||
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
|
||||
/* initialize with firmware data first, no all
|
||||
* ASIC have DCCG SW component. FPGA or
|
||||
* simulation need initialization of
|
||||
|
@ -265,12 +263,10 @@ bool resource_construct(
|
|||
DC_ERR("DC: failed to create audio!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!aud->funcs->endpoint_valid(aud)) {
|
||||
aud->funcs->destroy(&aud);
|
||||
break;
|
||||
}
|
||||
|
||||
pool->audios[i] = aud;
|
||||
pool->audio_count++;
|
||||
}
|
||||
|
@ -1119,25 +1115,21 @@ struct pipe_ctx *resource_get_head_pipe_for_stream(
|
|||
struct dc_stream_state *stream)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (res_ctx->pipe_ctx[i].stream == stream &&
|
||||
!res_ctx->pipe_ctx[i].top_pipe) {
|
||||
if (res_ctx->pipe_ctx[i].stream == stream
|
||||
&& !res_ctx->pipe_ctx[i].top_pipe
|
||||
&& !res_ctx->pipe_ctx[i].prev_odm_pipe)
|
||||
return &res_ctx->pipe_ctx[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct pipe_ctx *resource_get_tail_pipe_for_stream(
|
||||
static struct pipe_ctx *resource_get_tail_pipe(
|
||||
struct resource_context *res_ctx,
|
||||
struct dc_stream_state *stream)
|
||||
struct pipe_ctx *head_pipe)
|
||||
{
|
||||
struct pipe_ctx *head_pipe, *tail_pipe;
|
||||
head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
|
||||
|
||||
if (!head_pipe)
|
||||
return NULL;
|
||||
struct pipe_ctx *tail_pipe;
|
||||
|
||||
tail_pipe = head_pipe->bottom_pipe;
|
||||
|
||||
|
@ -1153,31 +1145,20 @@ static struct pipe_ctx *resource_get_tail_pipe_for_stream(
|
|||
* A free_pipe for a stream is defined here as a pipe
|
||||
* that has no surface attached yet
|
||||
*/
|
||||
static struct pipe_ctx *acquire_free_pipe_for_stream(
|
||||
static struct pipe_ctx *acquire_free_pipe_for_head(
|
||||
struct dc_state *context,
|
||||
const struct resource_pool *pool,
|
||||
struct dc_stream_state *stream)
|
||||
struct pipe_ctx *head_pipe)
|
||||
{
|
||||
int i;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
|
||||
struct pipe_ctx *head_pipe = NULL;
|
||||
|
||||
/* Find head pipe, which has the back end set up*/
|
||||
|
||||
head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
|
||||
|
||||
if (!head_pipe) {
|
||||
ASSERT(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!head_pipe->plane_state)
|
||||
return head_pipe;
|
||||
|
||||
/* Re-use pipe already acquired for this stream if available*/
|
||||
for (i = pool->pipe_count - 1; i >= 0; i--) {
|
||||
if (res_ctx->pipe_ctx[i].stream == stream &&
|
||||
if (res_ctx->pipe_ctx[i].stream == head_pipe->stream &&
|
||||
!res_ctx->pipe_ctx[i].plane_state) {
|
||||
return &res_ctx->pipe_ctx[i];
|
||||
}
|
||||
|
@ -1191,8 +1172,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
|
|||
if (!pool->funcs->acquire_idle_pipe_for_layer)
|
||||
return NULL;
|
||||
|
||||
return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
|
||||
|
||||
return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
|
@ -1206,7 +1186,7 @@ static int acquire_first_split_pipe(
|
|||
for (i = 0; i < pool->pipe_count; i++) {
|
||||
struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (split_pipe->top_pipe && !dc_res_is_odm_head_pipe(split_pipe) &&
|
||||
if (split_pipe->top_pipe &&
|
||||
split_pipe->top_pipe->plane_state == split_pipe->plane_state) {
|
||||
split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe;
|
||||
if (split_pipe->bottom_pipe)
|
||||
|
@ -1267,39 +1247,41 @@ bool dc_add_plane_to_context(
|
|||
return false;
|
||||
}
|
||||
|
||||
tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
|
||||
ASSERT(tail_pipe);
|
||||
|
||||
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
if (!free_pipe) {
|
||||
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
|
||||
if (pipe_idx >= 0)
|
||||
free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
|
||||
}
|
||||
#endif
|
||||
if (!free_pipe)
|
||||
return false;
|
||||
|
||||
/* retain new surfaces */
|
||||
/* retain new surface, but only once per stream */
|
||||
dc_plane_state_retain(plane_state);
|
||||
free_pipe->plane_state = plane_state;
|
||||
|
||||
if (head_pipe != free_pipe) {
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->clock_source;
|
||||
free_pipe->top_pipe = tail_pipe;
|
||||
tail_pipe->bottom_pipe = free_pipe;
|
||||
} else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) {
|
||||
ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp);
|
||||
free_pipe->bottom_pipe->plane_state = plane_state;
|
||||
while (head_pipe) {
|
||||
tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
|
||||
ASSERT(tail_pipe);
|
||||
|
||||
free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
|
||||
if (!free_pipe) {
|
||||
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
|
||||
if (pipe_idx >= 0)
|
||||
free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
|
||||
}
|
||||
#endif
|
||||
if (!free_pipe) {
|
||||
dc_plane_state_release(plane_state);
|
||||
return false;
|
||||
}
|
||||
|
||||
free_pipe->plane_state = plane_state;
|
||||
|
||||
if (head_pipe != free_pipe) {
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = tail_pipe->clock_source;
|
||||
free_pipe->top_pipe = tail_pipe;
|
||||
tail_pipe->bottom_pipe = free_pipe;
|
||||
}
|
||||
head_pipe = head_pipe->next_odm_pipe;
|
||||
}
|
||||
|
||||
/* assign new surfaces*/
|
||||
stream_status->plane_states[stream_status->plane_count] = plane_state;
|
||||
|
||||
|
@ -1308,35 +1290,6 @@ bool dc_add_plane_to_context(
|
|||
return true;
|
||||
}
|
||||
|
||||
struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe;
|
||||
|
||||
/* ODM should only be updated once per otg */
|
||||
if (pipe_ctx->top_pipe)
|
||||
return NULL;
|
||||
|
||||
while (bottom_pipe) {
|
||||
if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
|
||||
break;
|
||||
bottom_pipe = bottom_pipe->bottom_pipe;
|
||||
}
|
||||
|
||||
return bottom_pipe;
|
||||
}
|
||||
|
||||
bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
|
||||
|
||||
if (!top_pipe)
|
||||
return false;
|
||||
if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_remove_plane_from_context(
|
||||
const struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
|
@ -1363,12 +1316,6 @@ bool dc_remove_plane_from_context(
|
|||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->plane_state == plane_state) {
|
||||
if (dc_res_is_odm_head_pipe(pipe_ctx)) {
|
||||
pipe_ctx->plane_state = NULL;
|
||||
pipe_ctx->bottom_pipe = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pipe_ctx->top_pipe)
|
||||
pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
|
||||
|
||||
|
@ -1383,13 +1330,10 @@ bool dc_remove_plane_from_context(
|
|||
* For head pipe detach surfaces from pipe for tail
|
||||
* pipe just zero it out
|
||||
*/
|
||||
if (!pipe_ctx->top_pipe) {
|
||||
if (!pipe_ctx->top_pipe)
|
||||
pipe_ctx->plane_state = NULL;
|
||||
if (!dc_res_get_odm_bottom_pipe(pipe_ctx))
|
||||
pipe_ctx->bottom_pipe = NULL;
|
||||
} else {
|
||||
else
|
||||
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1675,24 +1619,25 @@ static struct audio *find_first_free_audio(
|
|||
const struct resource_pool *pool,
|
||||
enum engine_id id)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < pool->audio_count; i++) {
|
||||
int i, available_audio_count;
|
||||
|
||||
available_audio_count = pool->audio_count;
|
||||
|
||||
for (i = 0; i < available_audio_count; i++) {
|
||||
if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
|
||||
/*we have enough audio endpoint, find the matching inst*/
|
||||
if (id != i)
|
||||
continue;
|
||||
|
||||
return pool->audios[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* use engine id to find free audio */
|
||||
if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
|
||||
/* use engine id to find free audio */
|
||||
if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
|
||||
return pool->audios[id];
|
||||
}
|
||||
|
||||
/*not found the matching one, first come first serve*/
|
||||
for (i = 0; i < pool->audio_count; i++) {
|
||||
for (i = 0; i < available_audio_count; i++) {
|
||||
if (res_ctx->is_audio_acquired[i] == false) {
|
||||
return pool->audios[i];
|
||||
}
|
||||
|
@ -1752,51 +1697,46 @@ enum dc_status dc_remove_stream_from_ctx(
|
|||
{
|
||||
int i;
|
||||
struct dc_context *dc_ctx = dc->ctx;
|
||||
struct pipe_ctx *del_pipe = NULL;
|
||||
|
||||
/* Release primary pipe */
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
|
||||
!new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
|
||||
struct pipe_ctx *odm_pipe =
|
||||
dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]);
|
||||
|
||||
del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
ASSERT(del_pipe->stream_res.stream_enc);
|
||||
update_stream_engine_usage(
|
||||
&new_ctx->res_ctx,
|
||||
dc->res_pool,
|
||||
del_pipe->stream_res.stream_enc,
|
||||
false);
|
||||
|
||||
if (del_pipe->stream_res.audio)
|
||||
update_audio_usage(
|
||||
&new_ctx->res_ctx,
|
||||
dc->res_pool,
|
||||
del_pipe->stream_res.audio,
|
||||
false);
|
||||
|
||||
resource_unreference_clock_source(&new_ctx->res_ctx,
|
||||
dc->res_pool,
|
||||
del_pipe->clock_source);
|
||||
|
||||
if (dc->res_pool->funcs->remove_stream_from_ctx)
|
||||
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
|
||||
|
||||
memset(del_pipe, 0, sizeof(*del_pipe));
|
||||
if (odm_pipe)
|
||||
memset(odm_pipe, 0, sizeof(*odm_pipe));
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
struct pipe_ctx *del_pipe = resource_get_head_pipe_for_stream(&new_ctx->res_ctx, stream);
|
||||
struct pipe_ctx *odm_pipe;
|
||||
|
||||
if (!del_pipe) {
|
||||
DC_ERROR("Pipe not found for stream %p !\n", stream);
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
odm_pipe = del_pipe->next_odm_pipe;
|
||||
|
||||
/* Release primary pipe */
|
||||
ASSERT(del_pipe->stream_res.stream_enc);
|
||||
update_stream_engine_usage(
|
||||
&new_ctx->res_ctx,
|
||||
dc->res_pool,
|
||||
del_pipe->stream_res.stream_enc,
|
||||
false);
|
||||
|
||||
if (del_pipe->stream_res.audio)
|
||||
update_audio_usage(
|
||||
&new_ctx->res_ctx,
|
||||
dc->res_pool,
|
||||
del_pipe->stream_res.audio,
|
||||
false);
|
||||
|
||||
resource_unreference_clock_source(&new_ctx->res_ctx,
|
||||
dc->res_pool,
|
||||
del_pipe->clock_source);
|
||||
|
||||
if (dc->res_pool->funcs->remove_stream_from_ctx)
|
||||
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
|
||||
|
||||
while (odm_pipe) {
|
||||
struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
|
||||
|
||||
memset(odm_pipe, 0, sizeof(*odm_pipe));
|
||||
odm_pipe = next_odm_pipe;
|
||||
}
|
||||
memset(del_pipe, 0, sizeof(*del_pipe));
|
||||
|
||||
for (i = 0; i < new_ctx->stream_count; i++)
|
||||
if (new_ctx->streams[i] == stream)
|
||||
break;
|
||||
|
@ -1896,7 +1836,7 @@ static int acquire_resource_from_hw_enabled_state(
|
|||
struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = stream->link;
|
||||
unsigned int inst;
|
||||
unsigned int inst, tg_inst;
|
||||
|
||||
/* Check for enabled DIG to identify enabled display */
|
||||
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
|
@ -1908,28 +1848,37 @@ static int acquire_resource_from_hw_enabled_state(
|
|||
* current implementation always map 1-to-1, so this code makes
|
||||
* the same assumption and doesn't check OTG source.
|
||||
*/
|
||||
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
|
||||
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
|
||||
|
||||
/* Instance should be within the range of the pool */
|
||||
if (inst >= pool->pipe_count)
|
||||
return -1;
|
||||
|
||||
if (!res_ctx->pipe_ctx[inst].stream) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[inst];
|
||||
if (inst >= pool->stream_enc_count)
|
||||
return -1;
|
||||
|
||||
pipe_ctx->stream_res.tg = pool->timing_generators[inst];
|
||||
pipe_ctx->plane_res.mi = pool->mis[inst];
|
||||
pipe_ctx->plane_res.hubp = pool->hubps[inst];
|
||||
pipe_ctx->plane_res.ipp = pool->ipps[inst];
|
||||
pipe_ctx->plane_res.xfm = pool->transforms[inst];
|
||||
pipe_ctx->plane_res.dpp = pool->dpps[inst];
|
||||
pipe_ctx->stream_res.opp = pool->opps[inst];
|
||||
if (pool->dpps[inst])
|
||||
pipe_ctx->plane_res.mpcc_inst = pool->dpps[inst]->inst;
|
||||
pipe_ctx->pipe_idx = inst;
|
||||
tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]);
|
||||
|
||||
if (tg_inst >= pool->timing_generator_count)
|
||||
return false;
|
||||
|
||||
if (!res_ctx->pipe_ctx[tg_inst].stream) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
|
||||
|
||||
pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
|
||||
pipe_ctx->plane_res.mi = pool->mis[tg_inst];
|
||||
pipe_ctx->plane_res.hubp = pool->hubps[tg_inst];
|
||||
pipe_ctx->plane_res.ipp = pool->ipps[tg_inst];
|
||||
pipe_ctx->plane_res.xfm = pool->transforms[tg_inst];
|
||||
pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
|
||||
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
|
||||
|
||||
if (pool->dpps[tg_inst])
|
||||
pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
|
||||
pipe_ctx->pipe_idx = tg_inst;
|
||||
|
||||
pipe_ctx->stream = stream;
|
||||
return inst;
|
||||
return tg_inst;
|
||||
}
|
||||
|
||||
return -1;
|
||||
|
@ -2491,6 +2440,12 @@ void dc_resource_state_copy_construct(
|
|||
|
||||
if (cur_pipe->bottom_pipe)
|
||||
cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
|
||||
|
||||
if (cur_pipe->next_odm_pipe)
|
||||
cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
|
||||
|
||||
if (cur_pipe->prev_odm_pipe)
|
||||
cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
|
||||
}
|
||||
|
||||
for (i = 0; i < dst_ctx->stream_count; i++) {
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.42"
|
||||
#define DC_VER "3.2.48"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -121,7 +121,7 @@ struct dc_caps {
|
|||
struct dc_bug_wa {
|
||||
bool no_connect_phy_config;
|
||||
bool dedcn20_305_wa;
|
||||
struct display_mode_lib alternate_dml;
|
||||
bool skip_clock_update;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -220,7 +220,7 @@ struct dc_config {
|
|||
bool power_down_display_on_boot;
|
||||
bool edp_not_connected;
|
||||
bool forced_clocks;
|
||||
|
||||
bool multi_mon_pp_mclk_switch;
|
||||
};
|
||||
|
||||
enum visual_confirm {
|
||||
|
@ -423,6 +423,7 @@ struct dc_phy_addr_space_config {
|
|||
} gart_config;
|
||||
|
||||
bool valid;
|
||||
uint64_t page_table_default_page_addr;
|
||||
};
|
||||
|
||||
struct dc_virtual_addr_space_config {
|
||||
|
@ -614,9 +615,12 @@ enum dc_transfer_func_predefined {
|
|||
TRANSFER_FUNCTION_UNITY,
|
||||
TRANSFER_FUNCTION_HLG,
|
||||
TRANSFER_FUNCTION_HLG12,
|
||||
TRANSFER_FUNCTION_GAMMA22
|
||||
TRANSFER_FUNCTION_GAMMA22,
|
||||
TRANSFER_FUNCTION_GAMMA24,
|
||||
TRANSFER_FUNCTION_GAMMA26
|
||||
};
|
||||
|
||||
|
||||
struct dc_transfer_func {
|
||||
struct kref refcount;
|
||||
enum dc_transfer_func_type type;
|
||||
|
@ -747,6 +751,7 @@ struct dc_plane_state {
|
|||
bool visible;
|
||||
bool flip_immediate;
|
||||
bool horizontal_mirror;
|
||||
int layer_index;
|
||||
|
||||
union surface_update_flags update_flags;
|
||||
/* private to DC core */
|
||||
|
@ -776,6 +781,7 @@ struct dc_plane_info {
|
|||
bool global_alpha;
|
||||
int global_alpha_value;
|
||||
bool input_csc_enabled;
|
||||
int layer_index;
|
||||
};
|
||||
|
||||
struct dc_scaling_info {
|
||||
|
|
|
@ -61,9 +61,6 @@ struct dc_vbios_funcs {
|
|||
struct graphics_object_id connector_object_id,
|
||||
uint32_t device_tag_index,
|
||||
struct connector_device_tag_info *info);
|
||||
enum bp_result (*get_firmware_info)(
|
||||
struct dc_bios *bios,
|
||||
struct dc_firmware_info *info);
|
||||
enum bp_result (*get_spread_spectrum_info)(
|
||||
struct dc_bios *bios,
|
||||
enum as_signal_type signal,
|
||||
|
@ -152,6 +149,8 @@ struct dc_bios {
|
|||
struct dc_context *ctx;
|
||||
const struct bios_registers *regs;
|
||||
struct integrated_info *integrated_info;
|
||||
struct dc_firmware_info fw_info;
|
||||
bool fw_info_valid;
|
||||
};
|
||||
|
||||
#endif /* DC_BIOS_TYPES_H */
|
||||
|
|
|
@ -128,7 +128,10 @@ struct dc_link_training_overrides {
|
|||
enum dc_link_spread *downspread;
|
||||
bool *alternate_scrambler_reset;
|
||||
bool *enhanced_framing;
|
||||
bool *mst_enable;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
bool *fec_enable;
|
||||
#endif
|
||||
};
|
||||
|
||||
union dpcd_rev {
|
||||
|
|
|
@ -516,7 +516,8 @@ union dc_cursor_attribute_flags {
|
|||
uint32_t INVERT_PIXEL_DATA:1;
|
||||
uint32_t ZERO_EXPANSION:1;
|
||||
uint32_t MIN_MAX_INVERT:1;
|
||||
uint32_t RESERVED:25;
|
||||
uint32_t ENABLE_CURSOR_DEGAMMA:1;
|
||||
uint32_t RESERVED:24;
|
||||
} bits;
|
||||
uint32_t value;
|
||||
};
|
||||
|
@ -756,6 +757,8 @@ struct crtc_trigger_info {
|
|||
struct dc_crtc_timing_adjust {
|
||||
uint32_t v_total_min;
|
||||
uint32_t v_total_max;
|
||||
uint32_t v_total_mid;
|
||||
uint32_t v_total_mid_frame_num;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
|
|
|
@ -84,6 +84,7 @@ struct dc_link {
|
|||
bool dp_ss_off;
|
||||
bool link_state_valid;
|
||||
bool aux_access_disabled;
|
||||
bool sync_lt_in_progress;
|
||||
|
||||
/* caps is the same as reported_link_cap. link_traing use
|
||||
* reported_link_cap. Will clean up. TODO
|
||||
|
@ -228,6 +229,15 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
const struct dc_link_settings *link_setting,
|
||||
bool skip_video_pattern);
|
||||
|
||||
bool dc_link_dp_sync_lt_begin(struct dc_link *link);
|
||||
|
||||
enum link_training_result dc_link_dp_sync_lt_attempt(
|
||||
struct dc_link *link,
|
||||
struct dc_link_settings *link_setting,
|
||||
struct dc_link_training_overrides *lt_settings);
|
||||
|
||||
bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down);
|
||||
|
||||
void dc_link_dp_enable_hpd(const struct dc_link *link);
|
||||
|
||||
void dc_link_dp_disable_hpd(const struct dc_link *link);
|
||||
|
|
|
@ -613,6 +613,8 @@ void dce_aud_az_configure(
|
|||
|
||||
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
|
||||
value);
|
||||
DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
|
||||
audio->inst, value, audio_info->display_name);
|
||||
|
||||
/*
|
||||
*write the port ID:
|
||||
|
@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
|
|||
.az_configure = dce_aud_az_configure,
|
||||
.destroy = dce_aud_destroy,
|
||||
};
|
||||
|
||||
void dce_aud_destroy(struct audio **audio)
|
||||
{
|
||||
struct dce_audio *aud = DCE_AUD(*audio);
|
||||
|
@ -936,7 +937,7 @@ struct audio *dce_audio_create(
|
|||
unsigned int inst,
|
||||
const struct dce_audio_registers *reg,
|
||||
const struct dce_audio_shift *shifts,
|
||||
const struct dce_aduio_mask *masks
|
||||
const struct dce_audio_mask *masks
|
||||
)
|
||||
{
|
||||
struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
|
||||
|
@ -953,7 +954,6 @@ struct audio *dce_audio_create(
|
|||
audio->regs = reg;
|
||||
audio->shifts = shifts;
|
||||
audio->masks = masks;
|
||||
|
||||
return &audio->base;
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ struct dce_audio_shift {
|
|||
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
|
||||
};
|
||||
|
||||
struct dce_aduio_mask {
|
||||
struct dce_audio_mask {
|
||||
uint32_t AZALIA_ENDPOINT_REG_INDEX;
|
||||
uint32_t AZALIA_ENDPOINT_REG_DATA;
|
||||
|
||||
|
@ -125,7 +125,7 @@ struct dce_audio {
|
|||
struct audio base;
|
||||
const struct dce_audio_registers *regs;
|
||||
const struct dce_audio_shift *shifts;
|
||||
const struct dce_aduio_mask *masks;
|
||||
const struct dce_audio_mask *masks;
|
||||
};
|
||||
|
||||
struct audio *dce_audio_create(
|
||||
|
@ -133,7 +133,7 @@ struct audio *dce_audio_create(
|
|||
unsigned int inst,
|
||||
const struct dce_audio_registers *reg,
|
||||
const struct dce_audio_shift *shifts,
|
||||
const struct dce_aduio_mask *masks);
|
||||
const struct dce_audio_mask *masks);
|
||||
|
||||
void dce_aud_destroy(struct audio **audio);
|
||||
|
||||
|
|
|
@ -1061,7 +1061,8 @@ static bool dcn20_program_pix_clk(
|
|||
static const struct clock_source_funcs dcn20_clk_src_funcs = {
|
||||
.cs_power_down = dce110_clock_source_power_down,
|
||||
.program_pix_clk = dcn20_program_pix_clk,
|
||||
.get_pix_clk_dividers = dce112_get_pix_clk_dividers
|
||||
.get_pix_clk_dividers = dce112_get_pix_clk_dividers,
|
||||
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -1234,37 +1235,36 @@ static bool calc_pll_max_vco_construct(
|
|||
struct calc_pll_clock_source_init_data *init_data)
|
||||
{
|
||||
uint32_t i;
|
||||
struct dc_firmware_info fw_info = { { 0 } };
|
||||
struct dc_firmware_info *fw_info;
|
||||
if (calc_pll_cs == NULL ||
|
||||
init_data == NULL ||
|
||||
init_data->bp == NULL)
|
||||
return false;
|
||||
|
||||
if (init_data->bp->funcs->get_firmware_info(
|
||||
init_data->bp,
|
||||
&fw_info) != BP_RESULT_OK)
|
||||
if (!init_data->bp->fw_info_valid)
|
||||
return false;
|
||||
|
||||
fw_info = &init_data->bp->fw_info;
|
||||
calc_pll_cs->ctx = init_data->ctx;
|
||||
calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
|
||||
calc_pll_cs->ref_freq_khz = fw_info->pll_info.crystal_frequency;
|
||||
calc_pll_cs->min_vco_khz =
|
||||
fw_info.pll_info.min_output_pxl_clk_pll_frequency;
|
||||
fw_info->pll_info.min_output_pxl_clk_pll_frequency;
|
||||
calc_pll_cs->max_vco_khz =
|
||||
fw_info.pll_info.max_output_pxl_clk_pll_frequency;
|
||||
fw_info->pll_info.max_output_pxl_clk_pll_frequency;
|
||||
|
||||
if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
|
||||
calc_pll_cs->max_pll_input_freq_khz =
|
||||
init_data->max_override_input_pxl_clk_pll_freq_khz;
|
||||
else
|
||||
calc_pll_cs->max_pll_input_freq_khz =
|
||||
fw_info.pll_info.max_input_pxl_clk_pll_frequency;
|
||||
fw_info->pll_info.max_input_pxl_clk_pll_frequency;
|
||||
|
||||
if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
|
||||
calc_pll_cs->min_pll_input_freq_khz =
|
||||
init_data->min_override_input_pxl_clk_pll_freq_khz;
|
||||
else
|
||||
calc_pll_cs->min_pll_input_freq_khz =
|
||||
fw_info.pll_info.min_input_pxl_clk_pll_frequency;
|
||||
fw_info->pll_info.min_input_pxl_clk_pll_frequency;
|
||||
|
||||
calc_pll_cs->min_pix_clock_pll_post_divider =
|
||||
init_data->min_pix_clk_pll_post_divider;
|
||||
|
@ -1316,7 +1316,6 @@ bool dce110_clk_src_construct(
|
|||
const struct dce110_clk_src_shift *cs_shift,
|
||||
const struct dce110_clk_src_mask *cs_mask)
|
||||
{
|
||||
struct dc_firmware_info fw_info = { { 0 } };
|
||||
struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
|
||||
struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
|
||||
|
||||
|
@ -1329,14 +1328,12 @@ bool dce110_clk_src_construct(
|
|||
clk_src->cs_shift = cs_shift;
|
||||
clk_src->cs_mask = cs_mask;
|
||||
|
||||
if (clk_src->bios->funcs->get_firmware_info(
|
||||
clk_src->bios, &fw_info) != BP_RESULT_OK) {
|
||||
if (!clk_src->bios->fw_info_valid) {
|
||||
ASSERT_CRITICAL(false);
|
||||
goto unexpected_failure;
|
||||
}
|
||||
|
||||
clk_src->ext_clk_khz =
|
||||
fw_info.external_clock_source_frequency_for_dp;
|
||||
clk_src->ext_clk_khz = clk_src->bios->fw_info.external_clock_source_frequency_for_dp;
|
||||
|
||||
/* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
|
||||
calc_pll_cs_init_data.bp = bios;
|
||||
|
@ -1376,7 +1373,7 @@ bool dce110_clk_src_construct(
|
|||
FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
|
||||
calc_pll_cs_init_data_hdmi.ctx = ctx;
|
||||
|
||||
clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
|
||||
clk_src->ref_freq_khz = clk_src->bios->fw_info.pll_info.crystal_frequency;
|
||||
|
||||
if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
|
||||
return true;
|
||||
|
@ -1419,8 +1416,6 @@ bool dce112_clk_src_construct(
|
|||
const struct dce110_clk_src_shift *cs_shift,
|
||||
const struct dce110_clk_src_mask *cs_mask)
|
||||
{
|
||||
struct dc_firmware_info fw_info = { { 0 } };
|
||||
|
||||
clk_src->base.ctx = ctx;
|
||||
clk_src->bios = bios;
|
||||
clk_src->base.id = id;
|
||||
|
@ -1430,13 +1425,12 @@ bool dce112_clk_src_construct(
|
|||
clk_src->cs_shift = cs_shift;
|
||||
clk_src->cs_mask = cs_mask;
|
||||
|
||||
if (clk_src->bios->funcs->get_firmware_info(
|
||||
clk_src->bios, &fw_info) != BP_RESULT_OK) {
|
||||
if (!clk_src->bios->fw_info_valid) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
clk_src->ext_clk_khz = fw_info.external_clock_source_frequency_for_dp;
|
||||
clk_src->ext_clk_khz = clk_src->bios->fw_info.external_clock_source_frequency_for_dp;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -62,6 +62,10 @@
|
|||
SRII(BLND_CONTROL, BLND, 4), \
|
||||
SRII(BLND_CONTROL, BLND, 5)
|
||||
|
||||
#define HSWEQ_DCN_PIXEL_RATE_REG_LIST(blk, inst) \
|
||||
SRII(PIXEL_RATE_CNTL, blk, inst), \
|
||||
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, inst)
|
||||
|
||||
#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
|
||||
SRII(PIXEL_RATE_CNTL, blk, 0), \
|
||||
SRII(PIXEL_RATE_CNTL, blk, 1), \
|
||||
|
@ -151,7 +155,10 @@
|
|||
SR(DCCG_GATE_DISABLE_CNTL2), \
|
||||
SR(DCFCLK_CNTL),\
|
||||
SR(DCFCLK_CNTL), \
|
||||
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
|
||||
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
|
||||
|
||||
|
||||
#define MMHUB_DCN_REG_LIST()\
|
||||
/* todo: get these from GVM instead of reading registers ourselves */\
|
||||
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
|
||||
MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
|
||||
|
@ -166,10 +173,14 @@
|
|||
MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
|
||||
MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
|
||||
|
||||
|
||||
#define HWSEQ_DCN1_REG_LIST()\
|
||||
HWSEQ_DCN_REG_LIST(), \
|
||||
HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
|
||||
HWSEQ_PHYPLL_REG_LIST(OTG), \
|
||||
MMHUB_DCN_REG_LIST(), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 2), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 3), \
|
||||
SR(DCHUBBUB_SDPIF_FB_BASE),\
|
||||
SR(DCHUBBUB_SDPIF_FB_OFFSET),\
|
||||
SR(DCHUBBUB_SDPIF_AGP_BASE),\
|
||||
|
@ -202,8 +213,12 @@
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
#define HWSEQ_DCN2_REG_LIST()\
|
||||
HWSEQ_DCN_REG_LIST(), \
|
||||
HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
|
||||
HWSEQ_PHYPLL_REG_LIST(OTG), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 2), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 3), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 4), \
|
||||
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 5), \
|
||||
SR(MICROSECOND_TIME_BASE_DIV), \
|
||||
SR(MILLISECOND_TIME_BASE_DIV), \
|
||||
SR(DISPCLK_FREQ_CHANGE_CNTL), \
|
||||
|
@ -401,36 +416,34 @@ struct dce_hwseq_registers {
|
|||
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
|
||||
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
|
||||
|
||||
#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
|
||||
#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
|
||||
.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
|
||||
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
|
||||
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
|
||||
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
|
||||
HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
|
||||
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
|
||||
|
||||
#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
|
||||
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
|
||||
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
|
||||
|
||||
#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
|
||||
SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
|
||||
|
||||
#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
|
||||
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
|
||||
|
||||
#define HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
|
||||
|
@ -438,18 +451,15 @@ struct dce_hwseq_registers {
|
|||
SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
|
||||
SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
|
||||
SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
|
||||
SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh)
|
||||
|
||||
#define HWSEQ_DCE12_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE0_DCFE_),\
|
||||
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
|
||||
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
|
||||
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
|
||||
HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
|
||||
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
|
||||
|
||||
#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
|
||||
|
@ -512,10 +522,7 @@ struct dce_hwseq_registers {
|
|||
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
|
||||
|
@ -576,8 +583,7 @@ struct dce_hwseq_registers {
|
|||
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
|
||||
#endif
|
||||
|
||||
#define HWSEQ_REG_FIELD_LIST(type) \
|
||||
|
@ -612,9 +618,9 @@ struct dce_hwseq_registers {
|
|||
type ENABLE_L1_TLB;\
|
||||
type SYSTEM_ACCESS_MODE;\
|
||||
type LVTMA_BLON;\
|
||||
type LVTMA_PWRSEQ_TARGET_STATE_R;\
|
||||
type LVTMA_DIGON;\
|
||||
type LVTMA_DIGON_OVRD;
|
||||
type LVTMA_DIGON_OVRD;\
|
||||
type LVTMA_PWRSEQ_TARGET_STATE_R;
|
||||
|
||||
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
|
||||
type HUBP_VTG_SEL; \
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "resource.h"
|
||||
#include "dce_i2c.h"
|
||||
#include "dce_i2c_hw.h"
|
||||
#include "reg_helper.h"
|
||||
|
@ -99,17 +100,6 @@ static uint32_t get_hw_buffer_available_size(
|
|||
dce_i2c_hw->buffer_used_bytes;
|
||||
}
|
||||
|
||||
uint32_t get_reference_clock(
|
||||
struct dc_bios *bios)
|
||||
{
|
||||
struct dc_firmware_info info = { { 0 } };
|
||||
|
||||
if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
|
||||
return 0;
|
||||
|
||||
return info.pll_info.crystal_frequency;
|
||||
}
|
||||
|
||||
static uint32_t get_speed(
|
||||
const struct dce_i2c_hw *dce_i2c_hw)
|
||||
{
|
||||
|
@ -401,7 +391,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
|
|||
if (ddc->hw_info.hw_supported) {
|
||||
enum gpio_ddc_line line = dal_ddc_get_line(ddc);
|
||||
|
||||
if (line < pool->pipe_count)
|
||||
if (line < pool->res_cap->num_ddc)
|
||||
dce_i2c_hw = pool->hw_i2cs[line];
|
||||
}
|
||||
|
||||
|
@ -632,7 +622,7 @@ void dce_i2c_hw_construct(
|
|||
{
|
||||
dce_i2c_hw->ctx = ctx;
|
||||
dce_i2c_hw->engine_id = engine_id;
|
||||
dce_i2c_hw->reference_frequency = get_reference_clock(ctx->dc_bios) >> 1;
|
||||
dce_i2c_hw->reference_frequency = (ctx->dc_bios->fw_info.pll_info.crystal_frequency) >> 1;
|
||||
dce_i2c_hw->regs = regs;
|
||||
dce_i2c_hw->shifts = shifts;
|
||||
dce_i2c_hw->masks = masks;
|
||||
|
|
|
@ -1038,6 +1038,24 @@ static void dce110_stream_encoder_set_avmute(
|
|||
}
|
||||
|
||||
|
||||
static void dce110_reset_hdmi_stream_attribute(
|
||||
struct stream_encoder *enc)
|
||||
{
|
||||
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
|
||||
if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN)
|
||||
REG_UPDATE_5(HDMI_CONTROL,
|
||||
HDMI_PACKET_GEN_VERSION, 1,
|
||||
HDMI_KEEPOUT_MODE, 1,
|
||||
HDMI_DEEP_COLOR_ENABLE, 0,
|
||||
HDMI_DATA_SCRAMBLE_EN, 0,
|
||||
HDMI_CLOCK_CHANNEL_RATE, 0);
|
||||
else
|
||||
REG_UPDATE_3(HDMI_CONTROL,
|
||||
HDMI_PACKET_GEN_VERSION, 1,
|
||||
HDMI_KEEPOUT_MODE, 1,
|
||||
HDMI_DEEP_COLOR_ENABLE, 0);
|
||||
}
|
||||
|
||||
#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
|
||||
#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
|
||||
|
||||
|
@ -1584,6 +1602,17 @@ static void dig_connect_to_otg(
|
|||
REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst);
|
||||
}
|
||||
|
||||
static unsigned int dig_source_otg(
|
||||
struct stream_encoder *enc)
|
||||
{
|
||||
uint32_t tg_inst = 0;
|
||||
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
|
||||
|
||||
REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst);
|
||||
|
||||
return tg_inst;
|
||||
}
|
||||
|
||||
static const struct stream_encoder_funcs dce110_str_enc_funcs = {
|
||||
.dp_set_stream_attribute =
|
||||
dce110_stream_encoder_dp_set_stream_attribute,
|
||||
|
@ -1618,6 +1647,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
|
|||
.setup_stereo_sync = setup_stereo_sync,
|
||||
.set_avmute = dce110_stream_encoder_set_avmute,
|
||||
.dig_connect_to_otg = dig_connect_to_otg,
|
||||
.hdmi_reset_stream_attribute = dce110_reset_hdmi_stream_attribute,
|
||||
.dig_source_otg = dig_source_otg,
|
||||
};
|
||||
|
||||
void dce110_stream_encoder_construct(
|
||||
|
|
|
@ -304,7 +304,7 @@ static const struct dce_audio_shift audio_shift = {
|
|||
AUD_COMMON_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_aduio_mask audio_mask = {
|
||||
static const struct dce_audio_mask audio_mask = {
|
||||
AUD_COMMON_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
|
@ -910,7 +910,6 @@ static bool construct(
|
|||
{
|
||||
unsigned int i;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
struct dc_firmware_info info;
|
||||
struct dc_bios *bp;
|
||||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
@ -921,8 +920,7 @@ static bool construct(
|
|||
|
||||
bp = ctx->dc_bios;
|
||||
|
||||
if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
|
||||
info.external_clock_source_frequency_for_dp != 0) {
|
||||
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
|
||||
pool->base.dp_clock_source =
|
||||
dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
|
||||
|
||||
|
|
|
@ -731,7 +731,7 @@ static enum bp_result link_transmitter_control(
|
|||
* @brief
|
||||
* eDP only.
|
||||
*/
|
||||
void hwss_edp_wait_for_hpd_ready(
|
||||
void dce110_edp_wait_for_hpd_ready(
|
||||
struct dc_link *link,
|
||||
bool power_up)
|
||||
{
|
||||
|
@ -799,7 +799,7 @@ void hwss_edp_wait_for_hpd_ready(
|
|||
}
|
||||
}
|
||||
|
||||
void hwss_edp_power_control(
|
||||
void dce110_edp_power_control(
|
||||
struct dc_link *link,
|
||||
bool power_up)
|
||||
{
|
||||
|
@ -881,7 +881,7 @@ void hwss_edp_power_control(
|
|||
* @brief
|
||||
* eDP only. Control the backlight of the eDP panel
|
||||
*/
|
||||
void hwss_edp_backlight_control(
|
||||
void dce110_edp_backlight_control(
|
||||
struct dc_link *link,
|
||||
bool enable)
|
||||
{
|
||||
|
@ -981,7 +981,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
}
|
||||
|
||||
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
||||
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *dc;
|
||||
struct pp_smu_funcs *pp_smu = NULL;
|
||||
|
@ -1004,24 +1004,13 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
if (dc->res_pool->pp_smu)
|
||||
pp_smu = dc->res_pool->pp_smu;
|
||||
|
||||
if (option != KEEP_ACQUIRED_RESOURCE ||
|
||||
!dc->debug.az_endpoint_mute_only)
|
||||
/*only disalbe az_endpoint if power down or free*/
|
||||
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
else
|
||||
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
/*don't free audio if it is from retrain or internal disable stream*/
|
||||
if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
|
||||
/*we have to dynamic arbitrate the audio endpoints*/
|
||||
/*we free the resource, need reset is_audio_acquired*/
|
||||
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
|
||||
pipe_ctx->stream_res.audio = NULL;
|
||||
}
|
||||
|
||||
if (clk_mgr->funcs->enable_pme_wa)
|
||||
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
|
||||
clk_mgr->funcs->enable_pme_wa(clk_mgr);
|
||||
|
@ -1034,21 +1023,24 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
}
|
||||
}
|
||||
|
||||
void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
||||
void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->link;
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
|
||||
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
|
||||
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
pipe_ctx->stream_res.stream_enc->funcs->hdmi_reset_stream_attribute(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
}
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
|
||||
pipe_ctx->stream_res.stream_enc);
|
||||
|
||||
dc->hwss.disable_audio_stream(pipe_ctx, option);
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(
|
||||
link->link_enc,
|
||||
|
@ -1338,7 +1330,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
struct drr_params params = {0};
|
||||
unsigned int event_triggers = 0;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
|
||||
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
|
||||
#endif
|
||||
|
||||
if (dc->hwss.disable_stream_gating) {
|
||||
|
@ -1406,7 +1398,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
&stream->bit_depth_params,
|
||||
&stream->clamping);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
|
||||
if (odm_pipe) {
|
||||
while (odm_pipe) {
|
||||
odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion(
|
||||
odm_pipe->stream_res.opp,
|
||||
COLOR_SPACE_YCBCR601,
|
||||
|
@ -1417,6 +1409,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
odm_pipe->stream_res.opp,
|
||||
&stream->bit_depth_params,
|
||||
&stream->clamping);
|
||||
odm_pipe = odm_pipe->next_odm_pipe;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1726,7 +1719,8 @@ void dce110_set_safe_displaymarks(
|
|||
******************************************************************************/
|
||||
|
||||
static void set_drr(struct pipe_ctx **pipe_ctx,
|
||||
int num_pipes, int vmin, int vmax)
|
||||
int num_pipes, unsigned int vmin, unsigned int vmax,
|
||||
unsigned int vmid, unsigned int vmid_frame_number)
|
||||
{
|
||||
int i = 0;
|
||||
struct drr_params params = {0};
|
||||
|
@ -1910,8 +1904,25 @@ static void dce110_reset_hw_ctx_wrap(
|
|||
/* Disable if new stream is null. O/w, if stream is
|
||||
* disabled already, no need to disable again.
|
||||
*/
|
||||
if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
|
||||
core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
|
||||
if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) {
|
||||
core_link_disable_stream(pipe_ctx_old);
|
||||
|
||||
/* free acquired resources*/
|
||||
if (pipe_ctx_old->stream_res.audio) {
|
||||
/*disable az_endpoint*/
|
||||
pipe_ctx_old->stream_res.audio->funcs->
|
||||
az_disable(pipe_ctx_old->stream_res.audio);
|
||||
|
||||
/*free audio*/
|
||||
if (dc->caps.dynamic_audio == true) {
|
||||
/*we have to dynamic arbitrate the audio endpoints*/
|
||||
/*we free the resource, need reset is_audio_acquired*/
|
||||
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
|
||||
pipe_ctx_old->stream_res.audio, false);
|
||||
pipe_ctx_old->stream_res.audio = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
|
||||
if (!hwss_wait_for_blank_complete(pipe_ctx_old->stream_res.tg)) {
|
||||
|
@ -2076,7 +2087,7 @@ enum dc_status dce110_apply_ctx_to_hw(
|
|||
if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->top_pipe)
|
||||
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
status = apply_single_controller_ctx_to_hw(
|
||||
|
@ -2755,9 +2766,9 @@ static const struct hw_sequencer_funcs dce110_funcs = {
|
|||
.setup_stereo = NULL,
|
||||
.set_avmute = dce110_set_avmute,
|
||||
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
|
||||
.edp_backlight_control = hwss_edp_backlight_control,
|
||||
.edp_power_control = hwss_edp_power_control,
|
||||
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
|
||||
.edp_backlight_control = dce110_edp_backlight_control,
|
||||
.edp_power_control = dce110_edp_power_control,
|
||||
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
|
||||
.set_cursor_position = dce110_set_cursor_position,
|
||||
.set_cursor_attribute = dce110_set_cursor_attribute
|
||||
};
|
||||
|
|
|
@ -42,7 +42,7 @@ enum dc_status dce110_apply_ctx_to_hw(
|
|||
|
||||
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
|
||||
void dce110_disable_stream(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
|
||||
struct dc_link_settings *link_settings);
|
||||
|
@ -50,7 +50,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
|
|||
void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
|
||||
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
|
||||
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
|
@ -73,15 +73,15 @@ void dce110_optimize_bandwidth(
|
|||
|
||||
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
||||
|
||||
void hwss_edp_power_control(
|
||||
void dce110_edp_power_control(
|
||||
struct dc_link *link,
|
||||
bool power_up);
|
||||
|
||||
void hwss_edp_backlight_control(
|
||||
void dce110_edp_backlight_control(
|
||||
struct dc_link *link,
|
||||
bool enable);
|
||||
|
||||
void hwss_edp_wait_for_hpd_ready(
|
||||
void dce110_edp_wait_for_hpd_ready(
|
||||
struct dc_link *link,
|
||||
bool power_up);
|
||||
|
||||
|
|
|
@ -331,7 +331,7 @@ static const struct dce_audio_shift audio_shift = {
|
|||
AUD_COMMON_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_aduio_mask audio_mask = {
|
||||
static const struct dce_audio_mask audio_mask = {
|
||||
AUD_COMMON_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
|
@ -1274,7 +1274,6 @@ static bool construct(
|
|||
{
|
||||
unsigned int i;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
struct dc_firmware_info info;
|
||||
struct dc_bios *bp;
|
||||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
@ -1300,8 +1299,7 @@ static bool construct(
|
|||
|
||||
bp = ctx->dc_bios;
|
||||
|
||||
if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
|
||||
info.external_clock_source_frequency_for_dp != 0) {
|
||||
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
|
||||
pool->base.dp_clock_source =
|
||||
dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ static const struct dce_audio_shift audio_shift = {
|
|||
AUD_COMMON_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_aduio_mask audio_mask = {
|
||||
static const struct dce_audio_mask audio_mask = {
|
||||
AUD_COMMON_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче