Merge tag 'amd-drm-next-5.7-2020-03-10' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.7-2020-03-10: amdgpu: - SR-IOV fixes - Fix up fallout from drm load/unload callback removal - Navi, renoir power management watermark fixes - Refactor smu parameter handling - Display FEC fixes - Display DCC fixes - HDCP fixes - Add support for USB-C PD firmware updates - Pollock detection fix - Rework compute ring priority handling - RAS fixes - Misc cleanups amdkfd: - Consolidate more gfx config details in amdgpu - Consolidate bo alloc flags - Improve code comments - SDMA MQD fixes - Misc cleanups gpu scheduler: - Add suport for modifying the sched list uapi: - Clarify comments about GEM_CREATE flags that are not used by userspace. The kernel driver has always prevented userspace from using these. They are only used internally in the kernel driver. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200310212748.4519-1-alexander.deucher@amd.com
This commit is contained in:
Коммит
69ddce0970
|
@ -579,6 +579,7 @@ struct amdgpu_asic_funcs {
|
|||
/* invalidate hdp read cache */
|
||||
void (*invalidate_hdp)(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring);
|
||||
void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
|
||||
/* check if the asic needs a full reset of if soft reset will work */
|
||||
bool (*need_full_reset)(struct amdgpu_device *adev);
|
||||
/* initialize doorbell layout for specific asic*/
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include "amdgpu_xgmi.h"
|
||||
#include <uapi/linux/kfd_ioctl.h>
|
||||
|
||||
static const unsigned int compute_vmid_bitmap = 0xFF00;
|
||||
|
||||
|
@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
|
|||
|
||||
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
||||
void **mem_obj, uint64_t *gpu_addr,
|
||||
void **cpu_ptr, bool mqd_gfx9)
|
||||
void **cpu_ptr, bool cp_mqd_gfx9)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
|
@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
|
||||
if (mqd_gfx9)
|
||||
bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
|
||||
if (cp_mqd_gfx9)
|
||||
bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
|
||||
|
||||
r = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (r) {
|
||||
|
@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
|
|||
metadata_size, &metadata_flags);
|
||||
if (flags) {
|
||||
*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
|
||||
ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_VRAM
|
||||
: KFD_IOC_ALLOC_MEM_FLAGS_GTT;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
|
||||
*flags |= ALLOC_MEM_FLAGS_PUBLIC;
|
||||
*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
|
||||
}
|
||||
|
||||
out_put:
|
||||
|
|
|
@ -242,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
|||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||
|
||||
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config);
|
||||
|
||||
/* KGD2KFD callbacks */
|
||||
int kgd2kfd_init(void);
|
||||
void kgd2kfd_exit(void);
|
||||
|
|
|
@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
|||
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
||||
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
|
||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
||||
};
|
||||
|
|
|
@ -42,38 +42,6 @@ enum hqd_dequeue_request_type {
|
|||
SAVE_WAVES
|
||||
};
|
||||
|
||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||
* asic specific file.
|
||||
*/
|
||||
static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
config->gb_addr_config = adev->gfx.config.gb_addr_config;
|
||||
#if 0
|
||||
/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
|
||||
* MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
|
||||
* changes commented out related code, doing the same here for now but
|
||||
* need to sync with Ken et al
|
||||
*/
|
||||
config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFBANK);
|
||||
config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFRANKS);
|
||||
#endif
|
||||
|
||||
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
|
||||
config->num_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
|
||||
config->macro_tile_config_ptr =
|
||||
adev->gfx.config.macrotile_mode_array;
|
||||
config->num_macro_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
{
|
||||
return (struct amdgpu_device *)kgd;
|
||||
|
@ -805,7 +773,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
|
|||
.address_watch_get_offset = kgd_address_watch_get_offset,
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
get_atc_vmid_pasid_mapping_info,
|
||||
.get_tile_config = amdgpu_amdkfd_get_tile_config,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
||||
.get_unique_id = amdgpu_amdkfd_get_unique_id,
|
||||
|
|
|
@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS {
|
|||
float f32All;
|
||||
};
|
||||
|
||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||
* asic specific file.
|
||||
*/
|
||||
static int get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
config->gb_addr_config = adev->gfx.config.gb_addr_config;
|
||||
config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFBANK);
|
||||
config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFRANKS);
|
||||
|
||||
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
|
||||
config->num_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
|
||||
config->macro_tile_config_ptr =
|
||||
adev->gfx.config.macrotile_mode_array;
|
||||
config->num_macro_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
{
|
||||
return (struct amdgpu_device *)kgd;
|
||||
|
@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
|
|||
.address_watch_get_offset = kgd_address_watch_get_offset,
|
||||
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
|
||||
.set_scratch_backing_va = set_scratch_backing_va,
|
||||
.get_tile_config = get_tile_config,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
|
||||
};
|
||||
|
|
|
@ -41,31 +41,6 @@ enum hqd_dequeue_request_type {
|
|||
RESET_WAVES
|
||||
};
|
||||
|
||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||
* asic specific file.
|
||||
*/
|
||||
static int get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
config->gb_addr_config = adev->gfx.config.gb_addr_config;
|
||||
config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFBANK);
|
||||
config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFRANKS);
|
||||
|
||||
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
|
||||
config->num_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
|
||||
config->macro_tile_config_ptr =
|
||||
adev->gfx.config.macrotile_mode_array;
|
||||
config->num_macro_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
{
|
||||
return (struct amdgpu_device *)kgd;
|
||||
|
@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
|
|||
.get_atc_vmid_pasid_mapping_info =
|
||||
get_atc_vmid_pasid_mapping_info,
|
||||
.set_scratch_backing_va = set_scratch_backing_va,
|
||||
.get_tile_config = get_tile_config,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
};
|
||||
|
|
|
@ -48,28 +48,6 @@ enum hqd_dequeue_request_type {
|
|||
RESET_WAVES
|
||||
};
|
||||
|
||||
|
||||
/* Because of REG_GET_FIELD() being used, we put this function in the
|
||||
* asic specific file.
|
||||
*/
|
||||
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
config->gb_addr_config = adev->gfx.config.gb_addr_config;
|
||||
|
||||
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
|
||||
config->num_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
|
||||
config->macro_tile_config_ptr =
|
||||
adev->gfx.config.macrotile_mode_array;
|
||||
config->num_macro_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
||||
{
|
||||
return (struct amdgpu_device *)kgd;
|
||||
|
@ -736,7 +714,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
|
|||
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
|
||||
.get_atc_vmid_pasid_mapping_info =
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
||||
.get_unique_id = amdgpu_amdkfd_get_unique_id,
|
||||
|
|
|
@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
|
|||
|
||||
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
||||
uint8_t vmid, uint16_t *p_pasid);
|
||||
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "amdgpu_vm.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_dma_buf.h"
|
||||
#include <uapi/linux/kfd_ioctl.h>
|
||||
|
||||
/* BO flag to indicate a KFD userptr BO */
|
||||
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
|
||||
|
@ -400,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
|
|||
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
|
||||
{
|
||||
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
|
||||
bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
|
||||
bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
|
||||
uint32_t mapping_flags;
|
||||
|
||||
mapping_flags = AMDGPU_VM_PAGE_READABLE;
|
||||
if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
|
||||
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
|
||||
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
|
||||
if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
|
||||
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
|
||||
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ARCTURUS:
|
||||
if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
|
||||
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
if (bo_adev == adev)
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
|
||||
|
@ -1160,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
/*
|
||||
* Check on which domain to allocate BO
|
||||
*/
|
||||
if (flags & ALLOC_MEM_FLAGS_VRAM) {
|
||||
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
|
||||
alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
|
||||
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
|
||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||
} else if (flags & ALLOC_MEM_FLAGS_GTT) {
|
||||
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
|
||||
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_flags = 0;
|
||||
} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
|
||||
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
|
||||
alloc_flags = 0;
|
||||
if (!offset || !*offset)
|
||||
return -EINVAL;
|
||||
user_addr = untagged_addr(*offset);
|
||||
} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
|
||||
ALLOC_MEM_FLAGS_MMIO_REMAP)) {
|
||||
} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
|
||||
bo_type = ttm_bo_type_sg;
|
||||
|
@ -1198,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
|||
}
|
||||
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
||||
mutex_init(&(*mem)->lock);
|
||||
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
|
||||
(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
|
||||
|
||||
/* Workaround for AQL queue wraparound bug. Map the same
|
||||
* memory twice. That means we only actually allocate half
|
||||
|
@ -1680,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
|||
|
||||
INIT_LIST_HEAD(&(*mem)->bo_va_list);
|
||||
mutex_init(&(*mem)->lock);
|
||||
|
||||
(*mem)->alloc_flags =
|
||||
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
|
||||
ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
|
||||
ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
|
||||
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
|
||||
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||
|
||||
(*mem)->bo = amdgpu_bo_ref(bo);
|
||||
(*mem)->va = va;
|
||||
|
@ -2242,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
|
|||
kfree(mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Returns GPU-specific tiling mode information */
|
||||
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
config->gb_addr_config = adev->gfx.config.gb_addr_config;
|
||||
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
|
||||
config->num_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
|
||||
config->macro_tile_config_ptr =
|
||||
adev->gfx.config.macrotile_mode_array;
|
||||
config->num_macro_tile_configs =
|
||||
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
|
||||
|
||||
/* Those values are not set from GFX9 onwards */
|
||||
config->num_banks = adev->gfx.config.num_banks;
|
||||
config->num_ranks = adev->gfx.config.num_ranks;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1208,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct drm_sched_entity *entity = p->entity;
|
||||
enum drm_sched_priority priority;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
struct amdgpu_job *job;
|
||||
uint64_t seq;
|
||||
|
@ -1261,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||
priority = job->base.s_priority;
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
|
||||
ring = to_amdgpu_ring(entity->rq->sched);
|
||||
amdgpu_ring_priority_get(ring, priority);
|
||||
|
||||
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
|
|
|
@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
|
||||
{
|
||||
switch (prio) {
|
||||
case DRM_SCHED_PRIORITY_HIGH_HW:
|
||||
case DRM_SCHED_PRIORITY_KERNEL:
|
||||
return AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
default:
|
||||
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
struct amdgpu_ctx_entity *entity;
|
||||
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
|
||||
unsigned num_scheds = 0;
|
||||
enum gfx_pipe_priority hw_prio;
|
||||
enum drm_sched_priority priority;
|
||||
int r;
|
||||
|
||||
|
@ -85,8 +97,9 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
|
|||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
scheds = adev->gfx.compute_sched;
|
||||
num_scheds = adev->gfx.num_compute_sched;
|
||||
hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
|
||||
scheds = adev->gfx.compute_prio_sched[hw_prio];
|
||||
num_scheds = adev->gfx.num_compute_sched[hw_prio];
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
scheds = adev->sdma.sdma_sched;
|
||||
|
@ -502,6 +515,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
|||
return fence;
|
||||
}
|
||||
|
||||
static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
|
||||
struct amdgpu_ctx_entity *aentity,
|
||||
int hw_ip,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
enum gfx_pipe_priority hw_prio;
|
||||
struct drm_gpu_scheduler **scheds = NULL;
|
||||
unsigned num_scheds;
|
||||
|
||||
/* set sw priority */
|
||||
drm_sched_entity_set_priority(&aentity->entity, priority);
|
||||
|
||||
/* set hw priority */
|
||||
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
|
||||
hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
|
||||
scheds = adev->gfx.compute_prio_sched[hw_prio];
|
||||
num_scheds = adev->gfx.num_compute_sched[hw_prio];
|
||||
drm_sched_entity_modify_sched(&aentity->entity, scheds,
|
||||
num_scheds);
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
|
@ -514,13 +550,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
|||
ctx->init_priority : ctx->override_priority;
|
||||
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
|
||||
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
|
||||
struct drm_sched_entity *entity;
|
||||
|
||||
if (!ctx->entities[i][j])
|
||||
continue;
|
||||
|
||||
entity = &ctx->entities[i][j]->entity;
|
||||
drm_sched_entity_set_priority(entity, ctx_prio);
|
||||
amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
|
||||
i, ctx_prio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -628,20 +662,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
|
|||
mutex_destroy(&mgr->lock);
|
||||
}
|
||||
|
||||
|
||||
static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
|
||||
{
|
||||
int num_compute_sched_normal = 0;
|
||||
int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
|
||||
int i;
|
||||
|
||||
/* use one drm sched array, gfx.compute_sched to store both high and
|
||||
* normal priority drm compute schedulers */
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
if (!adev->gfx.compute_ring[i].has_high_prio)
|
||||
adev->gfx.compute_sched[num_compute_sched_normal++] =
|
||||
&adev->gfx.compute_ring[i].sched;
|
||||
else
|
||||
adev->gfx.compute_sched[num_compute_sched_high--] =
|
||||
&adev->gfx.compute_ring[i].sched;
|
||||
}
|
||||
|
||||
/* compute ring only has two priority for now */
|
||||
i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
|
||||
adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
|
||||
|
||||
i = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
|
||||
/* When compute has no high priority rings then use */
|
||||
/* normal priority sched array */
|
||||
adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
|
||||
adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
|
||||
} else {
|
||||
adev->gfx.compute_prio_sched[i] =
|
||||
&adev->gfx.compute_sched[num_compute_sched_high - 1];
|
||||
adev->gfx.num_compute_sched[i] =
|
||||
adev->gfx.num_compute_rings - num_compute_sched_normal;
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
amdgpu_ctx_init_compute_sched(adev);
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
|
||||
adev->gfx.num_gfx_sched++;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
|
||||
adev->gfx.num_compute_sched++;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
|
||||
adev->sdma.num_sdma_sched++;
|
||||
|
|
|
@ -992,18 +992,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
|
||||
if (adev->debugfs_regs[i]) {
|
||||
debugfs_remove(adev->debugfs_regs[i]);
|
||||
adev->debugfs_regs[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
|
@ -1269,9 +1257,44 @@ failure:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_debugfs_sclk_set(void *data, u64 val)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t max_freq, min_freq;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)data;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (is_support_sw_smu(adev)) {
|
||||
ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
|
||||
if (ret || val > max_freq || val < min_freq)
|
||||
return -EINVAL;
|
||||
ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
|
||||
amdgpu_debugfs_ib_preempt, "%llu\n");
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
|
||||
amdgpu_debugfs_sclk_set, "%llu\n");
|
||||
|
||||
extern void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
|
||||
int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r, i;
|
||||
|
@ -1285,6 +1308,15 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
adev->smu.debugfs_sclk =
|
||||
debugfs_create_file("amdgpu_force_sclk", 0200,
|
||||
adev->ddev->primary->debugfs_root, adev,
|
||||
&fops_sclk_set);
|
||||
if (!(adev->smu.debugfs_sclk)) {
|
||||
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Register debugfs entries for amdgpu_ttm */
|
||||
r = amdgpu_ttm_debugfs_init(adev);
|
||||
if (r) {
|
||||
|
@ -1335,35 +1367,19 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
amdgpu_ras_debugfs_create_all(adev);
|
||||
|
||||
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
|
||||
ARRAY_SIZE(amdgpu_debugfs_list));
|
||||
}
|
||||
|
||||
void amdgpu_debugfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
if (!ring)
|
||||
continue;
|
||||
|
||||
amdgpu_debugfs_ring_fini(ring);
|
||||
}
|
||||
amdgpu_ttm_debugfs_fini(adev);
|
||||
debugfs_remove(adev->debugfs_preempt);
|
||||
}
|
||||
|
||||
#else
|
||||
int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
void amdgpu_debugfs_fini(struct amdgpu_device *adev) { }
|
||||
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
|
||||
#endif
|
||||
|
|
|
@ -32,7 +32,6 @@ struct amdgpu_debugfs {
|
|||
};
|
||||
|
||||
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
||||
int amdgpu_debugfs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
|
||||
|
|
|
@ -3193,6 +3193,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
flush_delayed_work(&adev->delayed_init_work);
|
||||
adev->shutdown = true;
|
||||
|
||||
/* make sure IB test finished before entering exclusive mode
|
||||
* to avoid preemption on IB test
|
||||
* */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
||||
/* disable all interrupts */
|
||||
amdgpu_irq_disable_all(adev);
|
||||
if (adev->mode_info.mode_config_initialized){
|
||||
|
@ -3235,7 +3241,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||
adev->rmmio = NULL;
|
||||
amdgpu_device_doorbell_fini(adev);
|
||||
|
||||
amdgpu_debugfs_regs_cleanup(adev);
|
||||
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
|
||||
if (adev->ucode_sysfs_en)
|
||||
amdgpu_ucode_sysfs_fini(adev);
|
||||
|
|
|
@ -1121,18 +1121,16 @@ static void
|
|||
amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
#ifdef MODULE
|
||||
if (THIS_MODULE->state != MODULE_STATE_GOING)
|
||||
#endif
|
||||
DRM_ERROR("Hotplug removal is not supported\n");
|
||||
drm_dev_unplug(dev);
|
||||
drm_dev_put(dev);
|
||||
amdgpu_debugfs_fini(adev);
|
||||
amdgpu_driver_unload_kms(dev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
drm_dev_put(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1301,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
|
|||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
|
||||
int ret = 1;
|
||||
|
||||
if (!adev->runpm) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
|
||||
if (crtc->enabled) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
|
||||
return -EBUSY;
|
||||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
drm_modeset_lock_all(drm_dev);
|
||||
|
||||
drm_for_each_crtc(crtc, drm_dev) {
|
||||
if (crtc->state->active) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
drm_modeset_unlock_all(drm_dev);
|
||||
|
||||
} else {
|
||||
struct drm_connector *list_connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
|
||||
mutex_lock(&drm_dev->mode_config.mutex);
|
||||
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
|
||||
|
||||
drm_connector_list_iter_begin(drm_dev, &iter);
|
||||
drm_for_each_connector_iter(list_connector, &iter) {
|
||||
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
|
||||
mutex_unlock(&drm_dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
if (ret == -EBUSY)
|
||||
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_autosuspend(dev);
|
||||
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
long amdgpu_drm_ioctl(struct file *filp,
|
||||
|
|
|
@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
|
|||
return adev->gfx.mec.num_mec > 1;
|
||||
}
|
||||
|
||||
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
|
||||
int queue)
|
||||
{
|
||||
/* Policy: make queue 0 of each pipe as high priority compute queue */
|
||||
return (queue == 0);
|
||||
|
||||
}
|
||||
|
||||
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, queue, pipe, mec;
|
||||
|
@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
|||
int r;
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "gfx_err_count",
|
||||
.debugfs_name = "gfx_err_inject",
|
||||
};
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = amdgpu_gfx_process_ras_data_cb,
|
||||
|
|
|
@ -41,6 +41,15 @@
|
|||
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
|
||||
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
|
||||
|
||||
enum gfx_pipe_priority {
|
||||
AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH,
|
||||
AMDGPU_GFX_PIPE_PRIO_MAX
|
||||
};
|
||||
|
||||
#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
|
||||
#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
|
||||
|
||||
struct amdgpu_mec {
|
||||
struct amdgpu_bo *hpd_eop_obj;
|
||||
u64 hpd_eop_gpu_addr;
|
||||
|
@ -151,6 +160,8 @@ struct amdgpu_gfx_config {
|
|||
unsigned num_gpus;
|
||||
unsigned multi_gpu_tile_size;
|
||||
unsigned mc_arb_ramcfg;
|
||||
unsigned num_banks;
|
||||
unsigned num_ranks;
|
||||
unsigned gb_addr_config;
|
||||
unsigned num_rbs;
|
||||
unsigned gs_vgt_table_depth;
|
||||
|
@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs {
|
|||
u32 queue, u32 vmid);
|
||||
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
|
||||
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
|
||||
void (*reset_ras_error_count) (struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct sq_work {
|
||||
|
@ -278,8 +290,9 @@ struct amdgpu_gfx {
|
|||
uint32_t num_gfx_sched;
|
||||
unsigned num_gfx_rings;
|
||||
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
|
||||
struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
|
||||
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
|
||||
uint32_t num_compute_sched;
|
||||
uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
|
||||
unsigned num_compute_rings;
|
||||
struct amdgpu_irq_src eop_irq;
|
||||
struct amdgpu_irq_src priv_reg_irq;
|
||||
|
@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
|
|||
int *mec, int *pipe, int *queue);
|
||||
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
|
||||
int pipe, int queue);
|
||||
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
|
||||
int queue);
|
||||
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
|
||||
int pipe, int queue);
|
||||
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
|
||||
|
|
|
@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|||
|
||||
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(s_job);
|
||||
|
||||
drm_sched_job_cleanup(s_job);
|
||||
|
||||
amdgpu_ring_priority_put(ring, s_job->s_priority);
|
||||
dma_fence_put(job->fence);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
|
@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
|||
void *owner, struct dma_fence **f)
|
||||
{
|
||||
enum drm_sched_priority priority;
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
|
||||
if (!f)
|
||||
|
@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
|||
priority = job->base.s_priority;
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
|
||||
ring = to_amdgpu_ring(entity->rq->sched);
|
||||
amdgpu_ring_priority_get(ring, priority);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
|
|||
if (adev->rmmio == NULL)
|
||||
goto done_free;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
||||
if (adev->runpm) {
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
pm_runtime_forbid(dev->dev);
|
||||
|
@ -175,6 +172,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||
else if (amdgpu_device_supports_baco(dev) &&
|
||||
(amdgpu_runtime_pm != 0) &&
|
||||
(adev->asic_type >= CHIP_TOPAZ) &&
|
||||
(adev->asic_type != CHIP_VEGA10) &&
|
||||
(adev->asic_type != CHIP_VEGA20) &&
|
||||
(adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
|
||||
adev->runpm = true;
|
||||
|
|
|
@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
|
|||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "mmhub_err_count",
|
||||
.debugfs_name = "mmhub_err_inject",
|
||||
};
|
||||
|
||||
if (!adev->mmhub.ras_if) {
|
||||
|
|
|
@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs {
|
|||
int (*ras_late_init)(struct amdgpu_device *adev);
|
||||
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
void (*reset_ras_error_count)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_mmhub {
|
||||
|
|
|
@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
|
|||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "pcie_bif_err_count",
|
||||
.debugfs_name = "pcie_bif_err_inject",
|
||||
};
|
||||
|
||||
if (!adev->nbio.ras_if) {
|
||||
|
|
|
@ -1319,7 +1319,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||
amdgpu_amdkfd_unreserve_memory_limit(abo);
|
||||
|
||||
/* We only remove the fence if the resv has individualized. */
|
||||
WARN_ON_ONCE(bo->base.resv != &bo->base._resv);
|
||||
WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
|
||||
&& bo->base.resv != &bo->base._resv);
|
||||
if (bo->base.resv == &bo->base._resv)
|
||||
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
|
@ -38,6 +39,9 @@
|
|||
|
||||
static void psp_set_funcs(struct amdgpu_device *adev);
|
||||
|
||||
static int psp_sysfs_init(struct amdgpu_device *adev);
|
||||
static void psp_sysfs_fini(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* Due to DF Cstate management centralized to PMFW, the firmware
|
||||
* loading sequence will be updated as below:
|
||||
|
@ -136,6 +140,13 @@ static int psp_sw_init(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10) {
|
||||
ret= psp_sysfs_init(adev);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -152,6 +163,10 @@ static int psp_sw_fini(void *handle)
|
|||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10)
|
||||
psp_sysfs_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1816,6 +1831,97 @@ static int psp_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint32_t fw_ver;
|
||||
int ret;
|
||||
|
||||
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
|
||||
DRM_INFO("PSP block is not ready yet.");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->psp.mutex);
|
||||
ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
|
||||
mutex_unlock(&adev->psp.mutex);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
|
||||
}
|
||||
|
||||
static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
void *cpu_addr;
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
char fw_name[100];
|
||||
const struct firmware *usbc_pd_fw;
|
||||
|
||||
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
|
||||
DRM_INFO("PSP block is not ready yet.");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
|
||||
ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* We need contiguous physical mem to place the FW for psp to access */
|
||||
cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
|
||||
|
||||
ret = dma_mapping_error(adev->dev, dma_addr);
|
||||
if (ret)
|
||||
goto rel_buf;
|
||||
|
||||
memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
|
||||
|
||||
/*
|
||||
* x86 specific workaround.
|
||||
* Without it the buffer is invisible in PSP.
|
||||
*
|
||||
* TODO Remove once PSP starts snooping CPU cache
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
|
||||
#endif
|
||||
|
||||
mutex_lock(&adev->psp.mutex);
|
||||
ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
|
||||
mutex_unlock(&adev->psp.mutex);
|
||||
|
||||
rel_buf:
|
||||
dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
|
||||
release_firmware(usbc_pd_fw);
|
||||
|
||||
fail:
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
|
||||
psp_usbc_pd_fw_sysfs_read,
|
||||
psp_usbc_pd_fw_sysfs_write);
|
||||
|
||||
|
||||
|
||||
const struct amd_ip_funcs psp_ip_funcs = {
|
||||
.name = "psp",
|
||||
.early_init = psp_early_init,
|
||||
|
@ -1834,6 +1940,21 @@ const struct amd_ip_funcs psp_ip_funcs = {
|
|||
.set_powergating_state = psp_set_powergating_state,
|
||||
};
|
||||
|
||||
static int psp_sysfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
|
||||
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to create USBC PD FW control file!");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_sysfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
|
||||
}
|
||||
|
||||
static const struct amdgpu_psp_funcs psp_funcs = {
|
||||
.check_fw_loading_status = psp_check_fw_loading_status,
|
||||
};
|
||||
|
|
|
@ -114,6 +114,8 @@ struct psp_funcs
|
|||
int (*mem_training)(struct psp_context *psp, uint32_t ops);
|
||||
uint32_t (*ring_get_wptr)(struct psp_context *psp);
|
||||
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
|
||||
int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
|
||||
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
|
||||
};
|
||||
|
||||
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
|
||||
|
@ -351,6 +353,14 @@ struct amdgpu_psp_funcs {
|
|||
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
|
||||
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
|
||||
|
||||
#define psp_load_usbc_pd_fw(psp, dma_addr) \
|
||||
((psp)->funcs->load_usbc_pd_fw ? \
|
||||
(psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
|
||||
|
||||
#define psp_read_usbc_pd_fw(psp, fw_ver) \
|
||||
((psp)->funcs->read_usbc_pd_fw ? \
|
||||
(psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
|
||||
|
||||
extern const struct amd_ip_funcs psp_ip_funcs;
|
||||
|
||||
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
|
||||
|
|
|
@ -721,6 +721,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
|
|||
if (adev->nbio.funcs->query_ras_error_count)
|
||||
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
|
||||
break;
|
||||
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
|
||||
amdgpu_xgmi_query_ras_error_count(adev, &err_data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1110,6 +1113,35 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
|
|||
&amdgpu_ras_debugfs_ops);
|
||||
}
|
||||
|
||||
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
struct ras_manager *obj, *tmp;
|
||||
struct ras_fs_if fs_info;
|
||||
|
||||
/*
|
||||
* it won't be called in resume path, no need to check
|
||||
* suspend and gpu reset status
|
||||
*/
|
||||
if (!con)
|
||||
return;
|
||||
|
||||
amdgpu_ras_debugfs_create_ctrl_node(adev);
|
||||
|
||||
list_for_each_entry_safe(obj, tmp, &con->head, node) {
|
||||
if (!obj)
|
||||
continue;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
|
||||
(obj->attr_inuse == 1)) {
|
||||
sprintf(fs_info.debugfs_name, "%s_err_inject",
|
||||
ras_block_str(obj->head.block));
|
||||
fs_info.head = obj->head;
|
||||
amdgpu_ras_debugfs_create(adev, &fs_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
|
||||
struct ras_common_if *head)
|
||||
{
|
||||
|
@ -1142,7 +1174,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
|
|||
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_ras_sysfs_create_feature_node(adev);
|
||||
amdgpu_ras_debugfs_create_ctrl_node(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1846,8 +1877,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
|
|||
goto interrupt;
|
||||
}
|
||||
|
||||
amdgpu_ras_debugfs_create(adev, fs_info);
|
||||
|
||||
r = amdgpu_ras_sysfs_create(adev, fs_info);
|
||||
if (r)
|
||||
goto sysfs;
|
||||
|
@ -1856,7 +1885,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
|
|||
cleanup:
|
||||
amdgpu_ras_sysfs_remove(adev, ras_block);
|
||||
sysfs:
|
||||
amdgpu_ras_debugfs_remove(adev, ras_block);
|
||||
if (ih_info->cb)
|
||||
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
|
||||
interrupt:
|
||||
|
@ -1873,7 +1901,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
|
|||
return;
|
||||
|
||||
amdgpu_ras_sysfs_remove(adev, ras_block);
|
||||
amdgpu_ras_debugfs_remove(adev, ras_block);
|
||||
if (ih_info->cb)
|
||||
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
|
||||
amdgpu_ras_feature_enable(adev, ras_block, 0);
|
||||
|
|
|
@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
|
|||
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
|
||||
struct ras_fs_if *head);
|
||||
|
||||
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
|
||||
struct ras_common_if *head);
|
||||
|
||||
|
|
|
@ -150,76 +150,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
|
|||
ring->funcs->end_use(ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_priority_put - restore a ring's priority
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding the information
|
||||
* @priority: target priority
|
||||
*
|
||||
* Release a request for executing at @priority
|
||||
*/
|
||||
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ring->funcs->set_priority)
|
||||
return;
|
||||
|
||||
if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
|
||||
return;
|
||||
|
||||
/* no need to restore if the job is already at the lowest priority */
|
||||
if (priority == DRM_SCHED_PRIORITY_NORMAL)
|
||||
return;
|
||||
|
||||
mutex_lock(&ring->priority_mutex);
|
||||
/* something higher prio is executing, no need to decay */
|
||||
if (ring->priority > priority)
|
||||
goto out_unlock;
|
||||
|
||||
/* decay priority to the next level with a job available */
|
||||
for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
if (i == DRM_SCHED_PRIORITY_NORMAL
|
||||
|| atomic_read(&ring->num_jobs[i])) {
|
||||
ring->priority = i;
|
||||
ring->funcs->set_priority(ring, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ring->priority_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_priority_get - change the ring's priority
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding the information
|
||||
* @priority: target priority
|
||||
*
|
||||
* Request a ring's priority to be raised to @priority (refcounted).
|
||||
*/
|
||||
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
if (!ring->funcs->set_priority)
|
||||
return;
|
||||
|
||||
if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&ring->priority_mutex);
|
||||
if (priority <= ring->priority)
|
||||
goto out_unlock;
|
||||
|
||||
ring->priority = priority;
|
||||
ring->funcs->set_priority(ring, priority);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ring->priority_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_init - init driver ring struct.
|
||||
*
|
||||
|
@ -499,13 +429,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
debugfs_remove(ring->ent);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_test_helper - tests ring and set sched readiness status
|
||||
*
|
||||
|
|
|
@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
|
|||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask);
|
||||
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
|
||||
/* priority functions */
|
||||
void (*set_priority) (struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority);
|
||||
/* Try to soft recover the ring to make the fence signal */
|
||||
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
|
||||
int (*preempt_ib)(struct amdgpu_ring *ring);
|
||||
|
@ -222,6 +219,7 @@ struct amdgpu_ring {
|
|||
struct mutex priority_mutex;
|
||||
/* protected by priority_mutex */
|
||||
int priority;
|
||||
bool has_high_prio;
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *ent;
|
||||
|
@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
|
|||
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority);
|
||||
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority);
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
|
|
|
@ -126,6 +126,7 @@ struct amdgpu_rlc_funcs {
|
|||
void (*stop)(struct amdgpu_device *adev);
|
||||
void (*reset)(struct amdgpu_device *adev);
|
||||
void (*start)(struct amdgpu_device *adev);
|
||||
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
|
||||
};
|
||||
|
||||
struct amdgpu_rlc {
|
||||
|
|
|
@ -93,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
|
|||
struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "sdma_err_count",
|
||||
.debugfs_name = "sdma_err_inject",
|
||||
};
|
||||
|
||||
if (!ih_info)
|
||||
|
|
|
@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs {
|
|||
void (*ras_fini)(struct amdgpu_device *adev);
|
||||
int (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||
uint32_t instance, void *ras_error_status);
|
||||
void (*reset_ras_error_count)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_sdma {
|
||||
|
|
|
@ -1028,7 +1028,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
|||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
int r;
|
||||
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
|
||||
if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
|
||||
uint64_t page_idx = 1;
|
||||
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
|
||||
|
@ -1036,7 +1036,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
goto gart_bind_fail;
|
||||
|
||||
/* Patch mtype of the second part BO */
|
||||
/* The memory type of the first page defaults to UC. Now
|
||||
* modify the memory type to NC from the second page of
|
||||
* the BO onward.
|
||||
*/
|
||||
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
|
||||
flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
|
||||
|
||||
|
@ -2565,13 +2568,3 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
|
||||
debugfs_remove(adev->mman.debugfs_entries[i]);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -134,6 +134,5 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|||
struct ttm_mem_reg *mem);
|
||||
|
||||
int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
|
|||
int r;
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "umc_err_count",
|
||||
.debugfs_name = "umc_err_inject",
|
||||
};
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = amdgpu_umc_process_ras_data_cb,
|
||||
|
|
|
@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
|
||||
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct dma_fence *fence;
|
||||
long r;
|
||||
|
||||
/* temporarily disable ib test for sriov */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 16);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -656,15 +654,10 @@ err:
|
|||
|
||||
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
/* temporarily disable ib test for sriov */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
|
|
|
@ -1080,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
|||
struct dma_fence *fence = NULL;
|
||||
bool pasid_mapping_needed = false;
|
||||
unsigned patch_offset = 0;
|
||||
bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
|
||||
int r;
|
||||
|
||||
if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
|
||||
|
||||
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
|
||||
gds_switch_needed = true;
|
||||
vm_flush_needed = true;
|
||||
|
@ -3209,6 +3213,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
union drm_amdgpu_vm *args = data;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
long timeout = msecs_to_jiffies(2000);
|
||||
int r;
|
||||
|
||||
switch (args->in.op) {
|
||||
|
@ -3220,6 +3225,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
return r;
|
||||
break;
|
||||
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
||||
if (amdgpu_sriov_runtime(adev))
|
||||
timeout = 8 * timeout;
|
||||
|
||||
/* Wait vm idle to make sure the vmid set in SPM_VMID is
|
||||
* not referenced anymore.
|
||||
*/
|
||||
r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
|
||||
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -26,7 +26,12 @@
|
|||
#include "amdgpu_xgmi.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "soc15.h"
|
||||
#include "df/df_3_6_offset.h"
|
||||
#include "xgmi/xgmi_4_0_0_smn.h"
|
||||
#include "xgmi/xgmi_4_0_0_sh_mask.h"
|
||||
#include "wafl/wafl2_4_0_0_smn.h"
|
||||
#include "wafl/wafl2_4_0_0_sh_mask.h"
|
||||
|
||||
static DEFINE_MUTEX(xgmi_mutex);
|
||||
|
||||
|
@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex);
|
|||
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
|
||||
static unsigned hive_count = 0;
|
||||
|
||||
static const int xgmi_pcs_err_status_reg_vg20[] = {
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
|
||||
};
|
||||
|
||||
static const int wafl_pcs_err_status_reg_vg20[] = {
|
||||
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
|
||||
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
|
||||
};
|
||||
|
||||
static const int xgmi_pcs_err_status_reg_arct[] = {
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
|
||||
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
|
||||
};
|
||||
|
||||
/* same as vg20*/
|
||||
static const int wafl_pcs_err_status_reg_arct[] = {
|
||||
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
|
||||
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
|
||||
};
|
||||
|
||||
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
|
||||
{"XGMI PCS DataLossErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
|
||||
{"XGMI PCS TrainingErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
|
||||
{"XGMI PCS CRCErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
|
||||
{"XGMI PCS BERExceededErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
|
||||
{"XGMI PCS TxMetaDataErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
|
||||
{"XGMI PCS ReplayBufParityErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
|
||||
{"XGMI PCS DataParityErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
|
||||
{"XGMI PCS ReplayFifoOverflowErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
|
||||
{"XGMI PCS ReplayFifoUnderflowErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
|
||||
{"XGMI PCS ElasticFifoOverflowErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
|
||||
{"XGMI PCS DeskewErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
|
||||
{"XGMI PCS DataStartupLimitErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
|
||||
{"XGMI PCS FCInitTimeoutErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
|
||||
{"XGMI PCS RecoveryTimeoutErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
|
||||
{"XGMI PCS ReadySerialTimeoutErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
|
||||
{"XGMI PCS ReadySerialAttemptErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
|
||||
{"XGMI PCS RecoveryAttemptErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
|
||||
{"XGMI PCS RecoveryRelockAttemptErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
|
||||
{"WAFL PCS DataLossErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
|
||||
{"WAFL PCS TrainingErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
|
||||
{"WAFL PCS CRCErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
|
||||
{"WAFL PCS BERExceededErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
|
||||
{"WAFL PCS TxMetaDataErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
|
||||
{"WAFL PCS ReplayBufParityErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
|
||||
{"WAFL PCS DataParityErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
|
||||
{"WAFL PCS ReplayFifoOverflowErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
|
||||
{"WAFL PCS ReplayFifoUnderflowErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
|
||||
{"WAFL PCS ElasticFifoOverflowErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
|
||||
{"WAFL PCS DeskewErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
|
||||
{"WAFL PCS DataStartupLimitErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
|
||||
{"WAFL PCS FCInitTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
|
||||
{"WAFL PCS RecoveryTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
|
||||
{"WAFL PCS ReadySerialTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
|
||||
{"WAFL PCS ReadySerialAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
|
||||
{"WAFL PCS RecoveryAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
|
||||
{"WAFL PCS RecoveryRelockAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
|
||||
};
|
||||
|
||||
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
|
||||
{
|
||||
return &hive->device_list;
|
||||
|
@ -490,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
|
|||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "xgmi_wafl_err_count",
|
||||
.debugfs_name = "xgmi_wafl_err_inject",
|
||||
};
|
||||
|
||||
if (!adev->gmc.xgmi.supported ||
|
||||
|
@ -560,3 +667,99 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
|
|||
|
||||
return addr + dram_base_addr;
|
||||
}
|
||||
|
||||
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
|
||||
uint32_t value,
|
||||
uint32_t *ue_count,
|
||||
uint32_t *ce_count,
|
||||
bool is_xgmi_pcs)
|
||||
{
|
||||
int i;
|
||||
int ue_cnt;
|
||||
|
||||
if (is_xgmi_pcs) {
|
||||
/* query xgmi pcs error status,
|
||||
* only ue is supported */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
|
||||
ue_cnt = (value &
|
||||
xgmi_pcs_ras_fields[i].pcs_err_mask) >>
|
||||
xgmi_pcs_ras_fields[i].pcs_err_shift;
|
||||
if (ue_cnt) {
|
||||
dev_info(adev->dev, "%s detected\n",
|
||||
xgmi_pcs_ras_fields[i].err_name);
|
||||
*ue_count += ue_cnt;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* query wafl pcs error status,
|
||||
* only ue is supported */
|
||||
for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
|
||||
ue_cnt = (value &
|
||||
wafl_pcs_ras_fields[i].pcs_err_mask) >>
|
||||
wafl_pcs_ras_fields[i].pcs_err_shift;
|
||||
if (ue_cnt) {
|
||||
dev_info(adev->dev, "%s detected\n",
|
||||
wafl_pcs_ras_fields[i].err_name);
|
||||
*ue_count += ue_cnt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
int i;
|
||||
uint32_t data;
|
||||
uint32_t ue_cnt = 0, ce_cnt = 0;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
|
||||
return -EINVAL;
|
||||
|
||||
err_data->ue_count = 0;
|
||||
err_data->ce_count = 0;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ARCTURUS:
|
||||
/* check xgmi pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
|
||||
data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
|
||||
data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
}
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
default:
|
||||
/* check xgmi pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
|
||||
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
|
||||
data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
err_data->ue_count += ue_cnt;
|
||||
err_data->ce_count += ce_cnt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,12 @@ struct amdgpu_hive_info {
|
|||
struct task_barrier tb;
|
||||
};
|
||||
|
||||
struct amdgpu_pcs_ras_field {
|
||||
const char *err_name;
|
||||
uint32_t pcs_err_mask;
|
||||
uint32_t pcs_err_shift;
|
||||
};
|
||||
|
||||
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
|
||||
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
|
||||
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
|
||||
|
@ -48,6 +54,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
|
|||
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
|
||||
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
|
||||
uint64_t addr);
|
||||
int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
|
||||
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *bo_adev)
|
||||
|
|
|
@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
|
|||
cjiffies = jiffies;
|
||||
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
|
||||
cjiffies -= ctx->last_jump_jiffies;
|
||||
if ((jiffies_to_msecs(cjiffies) > 5000)) {
|
||||
DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
|
||||
if ((jiffies_to_msecs(cjiffies) > 10000)) {
|
||||
DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
|
||||
ctx->abort = true;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
|
||||
#include "gc/gc_10_1_0_offset.h"
|
||||
#include "gc/gc_10_1_0_sh_mask.h"
|
||||
#include "smuio/smuio_11_0_0_offset.h"
|
||||
#include "smuio/smuio_11_0_0_sh_mask.h"
|
||||
#include "navi10_enum.h"
|
||||
#include "hdp/hdp_5_0_0_offset.h"
|
||||
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
|
||||
|
@ -500,29 +502,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_ib ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint32_t scratch;
|
||||
uint32_t tmp = 0;
|
||||
unsigned index;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t tmp;
|
||||
long r;
|
||||
|
||||
r = amdgpu_gfx_scratch_get(adev, &scratch);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
|
||||
r = amdgpu_device_wb_get(adev, &index);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
r = amdgpu_ib_get(adev, NULL, 16, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
}
|
||||
|
||||
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
|
||||
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
|
||||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
|
||||
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
|
||||
ib.ptr[2] = lower_32_bits(gpu_addr);
|
||||
ib.ptr[3] = upper_32_bits(gpu_addr);
|
||||
ib.ptr[4] = 0xDEADBEEF;
|
||||
ib.length_dw = 5;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
|
@ -530,15 +531,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
|
||||
r = dma_fence_wait_timeout(f, false, timeout);
|
||||
if (r == 0) {
|
||||
DRM_ERROR("amdgpu: IB test timed out.\n");
|
||||
r = -ETIMEDOUT;
|
||||
goto err2;
|
||||
} else if (r < 0) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||
goto err2;
|
||||
}
|
||||
|
||||
tmp = RREG32(scratch);
|
||||
tmp = adev->wb.wb[index];
|
||||
if (tmp == 0xDEADBEEF)
|
||||
r = 0;
|
||||
else
|
||||
|
@ -547,8 +546,7 @@ err2:
|
|||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
dma_fence_put(f);
|
||||
err1:
|
||||
amdgpu_gfx_scratch_free(adev, scratch);
|
||||
|
||||
amdgpu_device_wb_free(adev, index);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1016,6 +1014,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* init spm vmid with 0xf */
|
||||
if (adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1783,11 +1785,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
|
|||
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
|
||||
|
||||
/* csib */
|
||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
|
||||
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
|
||||
adev->gfx.rlc.clear_state_gpu_addr >> 32);
|
||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
|
||||
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
|
||||
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
|
||||
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
|
||||
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2395,7 +2397,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
adev->gfx.gfx_ring[i].sched.ready = false;
|
||||
}
|
||||
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
|
||||
|
@ -3211,6 +3213,22 @@ done:
|
|||
return r;
|
||||
}
|
||||
|
||||
static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
ring->has_high_prio = true;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
} else {
|
||||
ring->has_high_prio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
@ -3336,6 +3354,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
|
|||
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
|
||||
mqd->cp_hqd_ib_control = tmp;
|
||||
|
||||
/* set static priority for a compute queue/ring */
|
||||
gfx_v10_0_compute_mqd_set_priority(ring, mqd);
|
||||
|
||||
/* map_queues packet doesn't need activate the queue,
|
||||
* so only kiq need set this field.
|
||||
*/
|
||||
|
@ -3925,9 +3946,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
|
||||
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
|
||||
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return clock;
|
||||
|
@ -4215,6 +4235,18 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
|
||||
|
||||
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
|
||||
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
|
||||
|
||||
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
|
||||
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
|
||||
.set_safe_mode = gfx_v10_0_set_safe_mode,
|
||||
|
@ -4225,7 +4257,8 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
|
|||
.resume = gfx_v10_0_rlc_resume,
|
||||
.stop = gfx_v10_0_rlc_stop,
|
||||
.reset = gfx_v10_0_rlc_reset,
|
||||
.start = gfx_v10_0_rlc_start
|
||||
.start = gfx_v10_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v10_0_update_spm_vmid
|
||||
};
|
||||
|
||||
static int gfx_v10_0_set_powergating_state(void *handle,
|
||||
|
@ -4420,7 +4453,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
if (flags & AMDGPU_IB_PREEMPTED)
|
||||
control |= INDIRECT_BUFFER_PRE_RESUME(1);
|
||||
|
||||
if (!(ib->flags & AMDGPU_IB_FLAG_CE))
|
||||
if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
|
||||
gfx_v10_0_ring_emit_de_meta(ring,
|
||||
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
|
||||
}
|
||||
|
|
|
@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* init spm vmid with 0xf */
|
||||
if (adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
data = RREG32(mmRLC_SPM_VMID);
|
||||
|
||||
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
|
||||
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
|
||||
|
||||
WREG32(mmRLC_SPM_VMID, data);
|
||||
}
|
||||
|
||||
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 data, orig, tmp, tmp2;
|
||||
|
@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
|
|||
.resume = gfx_v7_0_rlc_resume,
|
||||
.stop = gfx_v7_0_rlc_stop,
|
||||
.reset = gfx_v7_0_rlc_reset,
|
||||
.start = gfx_v7_0_rlc_start
|
||||
.start = gfx_v7_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v7_0_update_spm_vmid
|
||||
};
|
||||
|
||||
static int gfx_v7_0_early_init(void *handle)
|
||||
|
@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
|
||||
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
|
||||
|
||||
adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFBANK);
|
||||
adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFRANKS);
|
||||
|
||||
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
|
||||
adev->gfx.config.mem_max_burst_length_bytes = 256;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
|
|
|
@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* init spm vmid with 0xf */
|
||||
if (adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
|
||||
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
|
||||
|
||||
adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFBANK);
|
||||
adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
|
||||
MC_ARB_RAMCFG, NOOFRANKS);
|
||||
|
||||
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
|
||||
adev->gfx.config.mem_max_burst_length_bytes = 256;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
|
@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
ring->has_high_prio = true;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
} else {
|
||||
ring->has_high_prio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
|
|||
/* defaults */
|
||||
mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
|
||||
mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
|
||||
mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
|
||||
mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
|
||||
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
|
||||
mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
|
||||
mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
|
||||
mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
|
||||
|
@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
|
|||
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
|
||||
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
|
||||
|
||||
/* set static priority for a queue/ring */
|
||||
gfx_v8_0_mqd_set_priority(ring, mqd);
|
||||
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
|
||||
|
||||
/* map_queues packet doesn't need activate the queue,
|
||||
* so only kiq need set this field.
|
||||
*/
|
||||
|
@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
data = RREG32(mmRLC_SPM_VMID);
|
||||
|
||||
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
|
||||
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
|
||||
|
||||
WREG32(mmRLC_SPM_VMID, data);
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
|
||||
.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
|
||||
.set_safe_mode = gfx_v8_0_set_safe_mode,
|
||||
|
@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
|
|||
.resume = gfx_v8_0_rlc_resume,
|
||||
.stop = gfx_v8_0_rlc_stop,
|
||||
.reset = gfx_v8_0_rlc_reset,
|
||||
.start = gfx_v8_0_rlc_start
|
||||
.start = gfx_v8_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v8_0_update_spm_vmid
|
||||
};
|
||||
|
||||
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
|
||||
control |= INDIRECT_BUFFER_PRE_ENB(1);
|
||||
|
||||
if (!(ib->flags & AMDGPU_IB_FLAG_CE))
|
||||
if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
|
||||
gfx_v8_0_ring_emit_de_meta(ring);
|
||||
}
|
||||
|
||||
|
@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
|||
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
|
||||
bool acquire)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int pipe_num, tmp, reg;
|
||||
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
|
||||
|
||||
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
|
||||
|
||||
/* first me only has 2 entries, GFX and HP3D */
|
||||
if (ring->me > 0)
|
||||
pipe_num -= 2;
|
||||
|
||||
reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
|
||||
tmp = RREG32(reg);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
|
||||
WREG32(reg, tmp);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring,
|
||||
bool acquire)
|
||||
{
|
||||
int i, pipe;
|
||||
bool reserve;
|
||||
struct amdgpu_ring *iring;
|
||||
|
||||
mutex_lock(&adev->gfx.pipe_reserve_mutex);
|
||||
pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
|
||||
if (acquire)
|
||||
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
else
|
||||
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
|
||||
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
|
||||
/* Clear all reservations - everyone reacquires all resources */
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
|
||||
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
|
||||
true);
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
|
||||
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
|
||||
true);
|
||||
} else {
|
||||
/* Lower all pipes without a current reservation */
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
|
||||
iring = &adev->gfx.gfx_ring[i];
|
||||
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
|
||||
iring->me,
|
||||
iring->pipe,
|
||||
0);
|
||||
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
|
||||
iring = &adev->gfx.compute_ring[i];
|
||||
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
|
||||
iring->me,
|
||||
iring->pipe,
|
||||
0);
|
||||
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring,
|
||||
bool acquire)
|
||||
{
|
||||
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
|
||||
uint32_t queue_priority = acquire ? 0xf : 0x0;
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
||||
|
||||
WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
|
||||
WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
|
||||
|
||||
vi_srbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
}
|
||||
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
|
||||
|
||||
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
|
||||
return;
|
||||
|
||||
gfx_v8_0_hqd_set_priority(adev, ring, acquire);
|
||||
gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
||||
u64 addr, u64 seq,
|
||||
unsigned flags)
|
||||
|
@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.set_priority = gfx_v8_0_ring_set_priority_compute,
|
||||
.emit_wreg = gfx_v8_0_ring_emit_wreg,
|
||||
};
|
||||
|
||||
|
|
|
@ -738,9 +738,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
|
|||
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
|
||||
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
|
||||
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
|
||||
void *inject_if);
|
||||
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
|
||||
|
||||
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
|
||||
uint64_t queue_mask)
|
||||
|
@ -1847,6 +1847,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
/* init spm vmid with 0xf */
|
||||
if (adev->gfx.rlc.funcs->update_spm_vmid)
|
||||
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1993,7 +1997,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
|
|||
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
|
||||
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
|
||||
.ras_error_inject = &gfx_v9_0_ras_error_inject,
|
||||
.query_ras_error_count = &gfx_v9_0_query_ras_error_count
|
||||
.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
|
||||
.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
|
||||
};
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
|
||||
|
@ -2004,7 +2009,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
|
|||
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
|
||||
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
|
||||
.ras_error_inject = &gfx_v9_4_ras_error_inject,
|
||||
.query_ras_error_count = &gfx_v9_4_query_ras_error_count
|
||||
.query_ras_error_count = &gfx_v9_4_query_ras_error_count,
|
||||
.reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
|
||||
};
|
||||
|
||||
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
|
@ -3310,6 +3316,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
|
|||
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
ring->has_high_prio = true;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
} else {
|
||||
ring->has_high_prio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
@ -3446,6 +3468,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
|||
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
|
||||
mqd->cp_hqd_ib_control = tmp;
|
||||
|
||||
/* set static priority for a queue/ring */
|
||||
gfx_v9_0_mqd_set_priority(ring, mqd);
|
||||
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
|
||||
|
||||
/* map_queues packet doesn't need activate the queue,
|
||||
* so only kiq need set this field.
|
||||
*/
|
||||
|
@ -3964,6 +3990,63 @@ static int gfx_v9_0_soft_reset(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
|
||||
{
|
||||
signed long r, cnt = 0;
|
||||
unsigned long flags;
|
||||
uint32_t seq;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
BUG_ON(!ring->funcs->emit_rreg);
|
||||
|
||||
spin_lock_irqsave(&kiq->ring_lock, flags);
|
||||
amdgpu_ring_alloc(ring, 32);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
|
||||
amdgpu_ring_write(ring, 9 | /* src: register*/
|
||||
(5 << 8) | /* dst: memory */
|
||||
(1 << 16) | /* count sel */
|
||||
(1 << 20)); /* write confirm */
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
|
||||
kiq->reg_val_offs * 4));
|
||||
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
|
||||
kiq->reg_val_offs * 4));
|
||||
amdgpu_fence_emit_polling(ring, &seq);
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock_irqrestore(&kiq->ring_lock, flags);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
|
||||
/* don't wait anymore for gpu reset case because this way may
|
||||
* block gpu_recover() routine forever, e.g. this virt_kiq_rreg
|
||||
* is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
|
||||
* never return if we keep waiting in virt_kiq_rreg, which cause
|
||||
* gpu_recover() hang there.
|
||||
*
|
||||
* also don't wait anymore for IRQ context
|
||||
* */
|
||||
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
|
||||
goto failed_kiq_read;
|
||||
|
||||
might_sleep();
|
||||
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
|
||||
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
|
||||
}
|
||||
|
||||
if (cnt > MAX_KIQ_REG_TRY)
|
||||
goto failed_kiq_read;
|
||||
|
||||
return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
|
||||
(uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
|
||||
|
||||
failed_kiq_read:
|
||||
pr_err("failed to read gpu clock\n");
|
||||
return ~0;
|
||||
}
|
||||
|
||||
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t clock;
|
||||
|
@ -3971,16 +4054,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
|||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
|
||||
uint32_t tmp, lsb, msb, i = 0;
|
||||
do {
|
||||
if (i != 0)
|
||||
udelay(1);
|
||||
tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
|
||||
lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
|
||||
msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
|
||||
i++;
|
||||
} while (unlikely(tmp != msb) && (i < adev->usec_timeout));
|
||||
clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
|
||||
clock = gfx_v9_0_kiq_read_clock(adev);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
|
||||
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
|
||||
|
@ -4142,7 +4216,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
|
|||
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
|
||||
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
|
||||
{ SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
|
||||
};
|
||||
|
||||
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
|
||||
|
@ -4343,18 +4416,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
switch (adev->asic_type)
|
||||
{
|
||||
case CHIP_VEGA20:
|
||||
gfx_v9_0_clear_ras_edc_counter(adev);
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
gfx_v9_4_clear_ras_edc_counter(adev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
fail:
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
dma_fence_put(f);
|
||||
|
@ -4402,6 +4463,10 @@ static int gfx_v9_0_ecc_late_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (adev->gfx.funcs &&
|
||||
adev->gfx.funcs->reset_ras_error_count)
|
||||
adev->gfx.funcs->reset_ras_error_count(adev);
|
||||
|
||||
r = amdgpu_gfx_ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -4706,6 +4771,18 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
|
||||
|
||||
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
|
||||
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
|
||||
|
||||
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
|
||||
}
|
||||
|
||||
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
||||
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
|
||||
.set_safe_mode = gfx_v9_0_set_safe_mode,
|
||||
|
@ -4717,7 +4794,8 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
|||
.resume = gfx_v9_0_rlc_resume,
|
||||
.stop = gfx_v9_0_rlc_stop,
|
||||
.reset = gfx_v9_0_rlc_reset,
|
||||
.start = gfx_v9_0_rlc_start
|
||||
.start = gfx_v9_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v9_0_update_spm_vmid
|
||||
};
|
||||
|
||||
static int gfx_v9_0_set_powergating_state(void *handle,
|
||||
|
@ -4920,7 +4998,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
|
||||
control |= INDIRECT_BUFFER_PRE_ENB(1);
|
||||
|
||||
if (!(ib->flags & AMDGPU_IB_FLAG_CE))
|
||||
if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
|
||||
gfx_v9_0_ring_emit_de_meta(ring);
|
||||
}
|
||||
|
||||
|
@ -5045,105 +5123,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
|
|||
return wptr;
|
||||
}
|
||||
|
||||
static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
|
||||
bool acquire)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int pipe_num, tmp, reg;
|
||||
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
|
||||
|
||||
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
|
||||
|
||||
/* first me only has 2 entries, GFX and HP3D */
|
||||
if (ring->me > 0)
|
||||
pipe_num -= 2;
|
||||
|
||||
reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
|
||||
tmp = RREG32(reg);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
|
||||
WREG32(reg, tmp);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring,
|
||||
bool acquire)
|
||||
{
|
||||
int i, pipe;
|
||||
bool reserve;
|
||||
struct amdgpu_ring *iring;
|
||||
|
||||
mutex_lock(&adev->gfx.pipe_reserve_mutex);
|
||||
pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
|
||||
if (acquire)
|
||||
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
else
|
||||
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
|
||||
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
|
||||
/* Clear all reservations - everyone reacquires all resources */
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
|
||||
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
|
||||
true);
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
|
||||
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
|
||||
true);
|
||||
} else {
|
||||
/* Lower all pipes without a current reservation */
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
|
||||
iring = &adev->gfx.gfx_ring[i];
|
||||
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
|
||||
iring->me,
|
||||
iring->pipe,
|
||||
0);
|
||||
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
gfx_v9_0_ring_set_pipe_percent(iring, reserve);
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
|
||||
iring = &adev->gfx.compute_ring[i];
|
||||
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
|
||||
iring->me,
|
||||
iring->pipe,
|
||||
0);
|
||||
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
|
||||
gfx_v9_0_ring_set_pipe_percent(iring, reserve);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring,
|
||||
bool acquire)
|
||||
{
|
||||
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
|
||||
uint32_t queue_priority = acquire ? 0xf : 0x0;
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
||||
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
|
||||
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
|
||||
|
||||
soc15_grbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
|
||||
|
||||
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
|
||||
return;
|
||||
|
||||
gfx_v9_0_hqd_set_priority(adev, ring, acquire);
|
||||
gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
@ -6323,7 +6302,7 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
|
||||
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
|
@ -6514,7 +6493,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
|||
.test_ib = gfx_v9_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.set_priority = gfx_v9_0_ring_set_priority_compute,
|
||||
.emit_wreg = gfx_v9_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
|
||||
|
|
|
@ -893,7 +893,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
|
||||
void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
|
|
|
@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
|
|||
int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
|
||||
void *inject_if);
|
||||
|
||||
void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
|
||||
|
||||
#endif /* __GFX_V9_4_H__ */
|
||||
|
|
|
@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
|||
{
|
||||
uint64_t value;
|
||||
|
||||
/* Disable AGP. */
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* the new L1 policy will block SRIOV guest from writing
|
||||
* these regs, and they will be programed at host.
|
||||
* so skip programing these regs.
|
||||
*/
|
||||
/* Disable AGP. */
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->gmc.vram_end >> 18);
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->gmc.vram_end >> 18);
|
||||
|
||||
/* Set default page address. */
|
||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
|
||||
+ adev->vm_manager.vram_base_offset;
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||
(u32)(value >> 12));
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
||||
(u32)(value >> 44));
|
||||
/* Set default page address. */
|
||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
|
||||
+ adev->vm_manager.vram_base_offset;
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||
(u32)(value >> 12));
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
||||
(u32)(value >> 44));
|
||||
}
|
||||
|
||||
/* Program "protection fault". */
|
||||
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
||||
|
@ -260,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
|
|||
|
||||
int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||
* VF copy registers so vbios post doesn't program them, for
|
||||
* SRIOV driver need to program them
|
||||
*/
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
|
||||
adev->gmc.vram_start >> 24);
|
||||
WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
|
||||
adev->gmc.vram_end >> 24);
|
||||
}
|
||||
|
||||
/* GART Enable. */
|
||||
gfxhub_v2_0_init_gart_aperture_regs(adev);
|
||||
gfxhub_v2_0_init_system_aperture_regs(adev);
|
||||
|
|
|
@ -948,6 +948,9 @@ static int gmc_v9_0_late_init(void *handle)
|
|||
}
|
||||
}
|
||||
|
||||
if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
|
||||
adev->mmhub.funcs->reset_ras_error_count(adev);
|
||||
|
||||
r = amdgpu_gmc_ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
|
|||
err_data->ue_count += ded_count;
|
||||
}
|
||||
|
||||
static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
/* read back edc counter registers to reset the counters to 0 */
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
|
||||
for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
|
||||
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
|
||||
.ras_late_init = amdgpu_mmhub_ras_late_init,
|
||||
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
|
||||
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
|
||||
};
|
||||
|
|
|
@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->gmc.vram_end >> 18);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* the new L1 policy will block SRIOV guest from writing
|
||||
* these regs, and they will be programed at host.
|
||||
* so skip programing these regs.
|
||||
*/
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->gmc.vram_end >> 18);
|
||||
}
|
||||
|
||||
/* Set default page address. */
|
||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
|
||||
|
@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
|
|||
|
||||
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||
* VF copy registers so vbios post doesn't program them, for
|
||||
* SRIOV driver need to program them
|
||||
*/
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
|
||||
adev->gmc.vram_start >> 24);
|
||||
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
|
||||
adev->gmc.vram_end >> 24);
|
||||
}
|
||||
|
||||
/* GART Enable. */
|
||||
mmhub_v2_0_init_gart_aperture_regs(adev);
|
||||
mmhub_v2_0_init_system_aperture_regs(adev);
|
||||
|
|
|
@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
|
|||
err_data->ue_count += ded_count;
|
||||
}
|
||||
|
||||
static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
/* read back edc counter registers to reset the counters to 0 */
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
|
||||
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
|
||||
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
|
||||
.ras_late_init = amdgpu_mmhub_ras_late_init,
|
||||
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
|
||||
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
|
||||
};
|
||||
|
|
|
@ -0,0 +1,338 @@
|
|||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __MMSCH_V2_0_H__
|
||||
#define __MMSCH_V2_0_H__
|
||||
|
||||
// addressBlock: uvd0_mmsch_dec
|
||||
// base address: 0x1e000
|
||||
#define mmMMSCH_UCODE_ADDR 0x0000
|
||||
#define mmMMSCH_UCODE_ADDR_BASE_IDX 0
|
||||
#define mmMMSCH_UCODE_DATA 0x0001
|
||||
#define mmMMSCH_UCODE_DATA_BASE_IDX 0
|
||||
#define mmMMSCH_SRAM_ADDR 0x0002
|
||||
#define mmMMSCH_SRAM_ADDR_BASE_IDX 0
|
||||
#define mmMMSCH_SRAM_DATA 0x0003
|
||||
#define mmMMSCH_SRAM_DATA_BASE_IDX 0
|
||||
#define mmMMSCH_VF_SRAM_OFFSET 0x0004
|
||||
#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0
|
||||
#define mmMMSCH_DB_SRAM_OFFSET 0x0005
|
||||
#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0
|
||||
#define mmMMSCH_CTX_SRAM_OFFSET 0x0006
|
||||
#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0
|
||||
#define mmMMSCH_CTL 0x0007
|
||||
#define mmMMSCH_CTL_BASE_IDX 0
|
||||
#define mmMMSCH_INTR 0x0008
|
||||
#define mmMMSCH_INTR_BASE_IDX 0
|
||||
#define mmMMSCH_INTR_ACK 0x0009
|
||||
#define mmMMSCH_INTR_ACK_BASE_IDX 0
|
||||
#define mmMMSCH_INTR_STATUS 0x000a
|
||||
#define mmMMSCH_INTR_STATUS_BASE_IDX 0
|
||||
#define mmMMSCH_VF_VMID 0x000b
|
||||
#define mmMMSCH_VF_VMID_BASE_IDX 0
|
||||
#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
|
||||
#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
|
||||
#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
|
||||
#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
|
||||
#define mmMMSCH_VF_CTX_SIZE 0x000e
|
||||
#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
|
||||
#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f
|
||||
#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0
|
||||
#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010
|
||||
#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0
|
||||
#define mmMMSCH_VF_GPCOM_SIZE 0x0011
|
||||
#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX_HOST 0x0012
|
||||
#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX_RESP 0x0013
|
||||
#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX_0 0x0014
|
||||
#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015
|
||||
#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX_1 0x0016
|
||||
#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017
|
||||
#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0
|
||||
#define mmMMSCH_CNTL 0x001c
|
||||
#define mmMMSCH_CNTL_BASE_IDX 0
|
||||
#define mmMMSCH_NONCACHE_OFFSET0 0x001d
|
||||
#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0
|
||||
#define mmMMSCH_NONCACHE_SIZE0 0x001e
|
||||
#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0
|
||||
#define mmMMSCH_NONCACHE_OFFSET1 0x001f
|
||||
#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0
|
||||
#define mmMMSCH_NONCACHE_SIZE1 0x0020
|
||||
#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0
|
||||
#define mmMMSCH_PDEBUG_STATUS 0x0021
|
||||
#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0
|
||||
#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022
|
||||
#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0
|
||||
#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023
|
||||
#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0
|
||||
#define mmMMSCH_PDEBUG_EPC 0x0024
|
||||
#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0
|
||||
#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025
|
||||
#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0
|
||||
#define mmMMSCH_PROC_STATE1 0x0026
|
||||
#define mmMMSCH_PROC_STATE1_BASE_IDX 0
|
||||
#define mmMMSCH_LAST_MC_ADDR 0x0027
|
||||
#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0
|
||||
#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028
|
||||
#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0
|
||||
#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029
|
||||
#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0
|
||||
#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a
|
||||
#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_0 0x002b
|
||||
#define mmMMSCH_SCRATCH_0_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_1 0x002c
|
||||
#define mmMMSCH_SCRATCH_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e
|
||||
#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030
|
||||
#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW6_0 0x0033
|
||||
#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW7_0 0x0034
|
||||
#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW8_0 0x0035
|
||||
#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037
|
||||
#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039
|
||||
#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW6_1 0x003c
|
||||
#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW7_1 0x003d
|
||||
#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW8_1 0x003e
|
||||
#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CNTXT 0x003f
|
||||
#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_2 0x0040
|
||||
#define mmMMSCH_SCRATCH_2_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_3 0x0041
|
||||
#define mmMMSCH_SCRATCH_3_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_4 0x0042
|
||||
#define mmMMSCH_SCRATCH_4_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_5 0x0043
|
||||
#define mmMMSCH_SCRATCH_5_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_6 0x0044
|
||||
#define mmMMSCH_SCRATCH_6_BASE_IDX 0
|
||||
#define mmMMSCH_SCRATCH_7 0x0045
|
||||
#define mmMMSCH_SCRATCH_7_BASE_IDX 0
|
||||
#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046
|
||||
#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0
|
||||
#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047
|
||||
#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0
|
||||
#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048
|
||||
#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0
|
||||
#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049
|
||||
#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0
|
||||
#define mmMMSCH_NACK_STATUS 0x004a
|
||||
#define mmMMSCH_NACK_STATUS_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX0_DATA 0x004b
|
||||
#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0
|
||||
#define mmMMSCH_VF_MAILBOX1_DATA 0x004c
|
||||
#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053
|
||||
#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055
|
||||
#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057
|
||||
#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW6_2 0x005a
|
||||
#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW7_2 0x005b
|
||||
#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_DW8_2 0x005c
|
||||
#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d
|
||||
#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e
|
||||
#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f
|
||||
#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0
|
||||
#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060
|
||||
#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0
|
||||
#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061
|
||||
#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0
|
||||
#define mmMMSCH_VM_BUSY_STATUS_0 0x0062
|
||||
#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0
|
||||
#define mmMMSCH_VM_BUSY_STATUS_1 0x0063
|
||||
#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0
|
||||
#define mmMMSCH_VM_BUSY_STATUS_2 0x0064
|
||||
#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0
|
||||
|
||||
#define MMSCH_VERSION_MAJOR 2
|
||||
#define MMSCH_VERSION_MINOR 0
|
||||
#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
|
||||
|
||||
enum mmsch_v2_0_command_type {
|
||||
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
|
||||
MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
|
||||
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
|
||||
MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
|
||||
MMSCH_COMMAND__END = 0xf
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_init_header {
|
||||
uint32_t version;
|
||||
uint32_t header_size;
|
||||
uint32_t vcn_init_status;
|
||||
uint32_t vcn_table_offset;
|
||||
uint32_t vcn_table_size;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_direct_reg_header {
|
||||
uint32_t reg_offset : 28;
|
||||
uint32_t command_type : 4;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_indirect_reg_header {
|
||||
uint32_t reg_offset : 20;
|
||||
uint32_t reg_idx_space : 8;
|
||||
uint32_t command_type : 4;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_direct_write {
|
||||
struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
|
||||
uint32_t reg_value;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_direct_read_modify_write {
|
||||
struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
|
||||
uint32_t write_data;
|
||||
uint32_t mask_value;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_direct_polling {
|
||||
struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
|
||||
uint32_t mask_value;
|
||||
uint32_t wait_value;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_end {
|
||||
struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
|
||||
};
|
||||
|
||||
struct mmsch_v2_0_cmd_indirect_write {
|
||||
struct mmsch_v2_0_cmd_indirect_reg_header cmd_header;
|
||||
uint32_t reg_value;
|
||||
};
|
||||
|
||||
static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt,
|
||||
uint32_t *init_table,
|
||||
uint32_t reg_offset,
|
||||
uint32_t value)
|
||||
{
|
||||
direct_wt->cmd_header.reg_offset = reg_offset;
|
||||
direct_wt->reg_value = value;
|
||||
memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write));
|
||||
}
|
||||
|
||||
static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
|
||||
uint32_t *init_table,
|
||||
uint32_t reg_offset,
|
||||
uint32_t mask, uint32_t data)
|
||||
{
|
||||
direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
|
||||
direct_rd_mod_wt->mask_value = mask;
|
||||
direct_rd_mod_wt->write_data = data;
|
||||
memcpy((void *)init_table, direct_rd_mod_wt,
|
||||
sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write));
|
||||
}
|
||||
|
||||
static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll,
|
||||
uint32_t *init_table,
|
||||
uint32_t reg_offset,
|
||||
uint32_t mask, uint32_t wait)
|
||||
{
|
||||
direct_poll->cmd_header.reg_offset = reg_offset;
|
||||
direct_poll->mask_value = mask;
|
||||
direct_poll->wait_value = wait;
|
||||
memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling));
|
||||
}
|
||||
|
||||
#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
|
||||
mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
|
||||
init_table, (reg), \
|
||||
(mask), (data)); \
|
||||
init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
|
||||
table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
|
||||
}
|
||||
|
||||
#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \
|
||||
mmsch_v2_0_insert_direct_wt(&direct_wt, \
|
||||
init_table, (reg), \
|
||||
(value)); \
|
||||
init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
|
||||
table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
|
||||
}
|
||||
|
||||
#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
|
||||
mmsch_v2_0_insert_direct_poll(&direct_poll, \
|
||||
init_table, (reg), \
|
||||
(mask), (wait)); \
|
||||
init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
|
||||
table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
|
||||
}
|
||||
|
||||
#endif
|
|
@ -32,6 +32,7 @@
|
|||
#include "soc15_common.h"
|
||||
#include "navi10_ih.h"
|
||||
|
||||
#define MAX_REARM_RETRY 10
|
||||
|
||||
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -283,6 +284,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
|
|||
ih->rptr += 32;
|
||||
}
|
||||
|
||||
/**
|
||||
* navi10_ih_irq_rearm - rearm IRQ if lost
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
|
||||
struct amdgpu_ih_ring *ih)
|
||||
{
|
||||
uint32_t reg_rptr = 0;
|
||||
uint32_t v = 0;
|
||||
uint32_t i = 0;
|
||||
|
||||
if (ih == &adev->irq.ih)
|
||||
reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
|
||||
else if (ih == &adev->irq.ih1)
|
||||
reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
|
||||
else if (ih == &adev->irq.ih2)
|
||||
reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
|
||||
else
|
||||
return;
|
||||
|
||||
/* Rearm IRQ / re-write doorbell if doorbell write is lost */
|
||||
for (i = 0; i < MAX_REARM_RETRY; i++) {
|
||||
v = RREG32_NO_KIQ(reg_rptr);
|
||||
if ((v < ih->ring_size) && (v != ih->rptr))
|
||||
WDOORBELL32(ih->doorbell_index, ih->rptr);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* navi10_ih_set_rptr - set the IH ring buffer rptr
|
||||
*
|
||||
|
@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
|
|||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
WDOORBELL32(ih->doorbell_index, ih->rptr);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
navi10_ih_irq_rearm(adev, ih);
|
||||
} else
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
|
||||
}
|
||||
|
|
|
@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
|||
!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -31,6 +31,9 @@
|
|||
#define GFX_CMD_RESERVED_MASK 0x7FF00000
|
||||
#define GFX_CMD_RESPONSE_MASK 0x80000000
|
||||
|
||||
/* USBC PD FW version retrieval command */
|
||||
#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000
|
||||
|
||||
/* TEE Gfx Command IDs for the register interface.
|
||||
* Command ID must be between 0x00010000 and 0x000F0000.
|
||||
*/
|
||||
|
|
|
@ -65,6 +65,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
|
|||
/* memory training timeout define */
|
||||
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
|
||||
|
||||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
|
||||
static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
@ -1109,6 +1112,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
|
|||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
|
||||
}
|
||||
|
||||
static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t reg_status;
|
||||
int ret, i = 0;
|
||||
|
||||
/* Write lower 32-bit address of the PD Controller FW */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the lower address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Write upper 32-bit address of the PD Controller FW */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the upper address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
|
||||
|
||||
/* FW load takes very long time */
|
||||
do {
|
||||
msleep(1000);
|
||||
reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
|
||||
|
||||
if (reg_status & 0x80000000)
|
||||
goto done;
|
||||
|
||||
} while (++i < USBC_PD_POLLING_LIMIT_S);
|
||||
|
||||
return -ETIME;
|
||||
done:
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
int ret;
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (!ret)
|
||||
*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v11_0_funcs = {
|
||||
.init_microcode = psp_v11_0_init_microcode,
|
||||
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
|
||||
|
@ -1133,6 +1212,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
|
|||
.mem_training = psp_v11_0_memory_training,
|
||||
.ring_get_wptr = psp_v11_0_ring_get_wptr,
|
||||
.ring_set_wptr = psp_v11_0_ring_set_wptr,
|
||||
.load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
|
||||
.read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
|
||||
};
|
||||
|
||||
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
|
||||
|
|
|
@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle)
|
|||
struct ras_ih_if ih_info = {
|
||||
.cb = sdma_v4_0_process_ras_data_cb,
|
||||
};
|
||||
int i;
|
||||
|
||||
/* read back edc counter registers to clear the counters */
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
|
||||
}
|
||||
if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
|
||||
adev->sdma.funcs->reset_ras_error_count(adev);
|
||||
|
||||
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
|
||||
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
|
||||
|
@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
};
|
||||
|
||||
static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* read back edc counter registers to clear the counters */
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
|
||||
.ras_late_init = amdgpu_sdma_ras_late_init,
|
||||
.ras_fini = amdgpu_sdma_ras_fini,
|
||||
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
|
||||
.reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
|
||||
};
|
||||
|
||||
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -89,6 +89,13 @@
|
|||
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
|
||||
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
|
||||
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
|
||||
|
||||
/* for Vega20/arcturus regiter offset change */
|
||||
#define mmROM_INDEX_VG20 0x00e4
|
||||
#define mmROM_INDEX_VG20_BASE_IDX 0
|
||||
#define mmROM_DATA_VG20 0x00e5
|
||||
#define mmROM_DATA_VG20_BASE_IDX 0
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
|
@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 *dw_ptr;
|
||||
u32 i, length_dw;
|
||||
uint32_t rom_index_offset;
|
||||
uint32_t rom_data_offset;
|
||||
|
||||
if (bios == NULL)
|
||||
return false;
|
||||
|
@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
|
|||
dw_ptr = (u32 *)bios;
|
||||
length_dw = ALIGN(length_bytes, 4) / 4;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_ARCTURUS:
|
||||
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
|
||||
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
|
||||
break;
|
||||
default:
|
||||
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
|
||||
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
|
||||
break;
|
||||
}
|
||||
|
||||
/* set rom index to 0 */
|
||||
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
|
||||
WREG32(rom_index_offset, 0);
|
||||
/* read out the rom data */
|
||||
for (i = 0; i < length_dw; i++)
|
||||
dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
|
||||
dw_ptr[i] = RREG32(rom_data_offset);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -831,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
|
|||
/* change this when we implement soft reset */
|
||||
return true;
|
||||
}
|
||||
|
||||
static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
|
||||
return;
|
||||
/*read back hdp ras counter to reset it to 0 */
|
||||
RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
|
||||
}
|
||||
|
||||
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
|
||||
uint64_t *count1)
|
||||
{
|
||||
|
@ -998,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
|
|||
.get_config_memsize = &soc15_get_config_memsize,
|
||||
.flush_hdp = &soc15_flush_hdp,
|
||||
.invalidate_hdp = &soc15_invalidate_hdp,
|
||||
.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
|
||||
.need_full_reset = &soc15_need_full_reset,
|
||||
.init_doorbell_index = &vega20_doorbell_index_init,
|
||||
.get_pcie_usage = &vega20_get_pcie_usage,
|
||||
|
@ -1243,6 +1274,10 @@ static int soc15_common_late_init(void *handle)
|
|||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_ai_mailbox_get_irq(adev);
|
||||
|
||||
if (adev->asic_funcs &&
|
||||
adev->asic_funcs->reset_hdp_ras_error_count)
|
||||
adev->asic_funcs->reset_hdp_ras_error_count(adev);
|
||||
|
||||
if (adev->nbio.funcs->ras_late_init)
|
||||
r = adev->nbio.funcs->ras_late_init(adev);
|
||||
|
||||
|
|
|
@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
|
|||
if (rsmu_umc_index_state)
|
||||
umc_v6_1_disable_umc_index_mode(adev);
|
||||
|
||||
if ((adev->asic_type == CHIP_ARCTURUS) &&
|
||||
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
|
||||
DRM_WARN("Fail to disable DF-Cstate.\n");
|
||||
|
||||
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
|
||||
umc_reg_offset = get_umc_6_reg_offset(adev,
|
||||
umc_inst,
|
||||
|
@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
|
|||
&(err_data->ue_count));
|
||||
}
|
||||
|
||||
if ((adev->asic_type == CHIP_ARCTURUS) &&
|
||||
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
|
||||
DRM_WARN("Fail to enable DF-Cstate\n");
|
||||
|
||||
if (rsmu_umc_index_state)
|
||||
umc_v6_1_enable_umc_index_mode(adev);
|
||||
}
|
||||
|
@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
|
|||
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
|
||||
}
|
||||
|
||||
/* skip error address process if -ENOMEM */
|
||||
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||
|
||||
if (mc_umc_status == 0)
|
||||
return;
|
||||
|
||||
if (!err_data->err_addr) {
|
||||
/* clear umc status */
|
||||
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
|
||||
|
@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
|
||||
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||
|
||||
/* calculate error address if ue/ce error is detected */
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
|
@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
|
|||
if (rsmu_umc_index_state)
|
||||
umc_v6_1_disable_umc_index_mode(adev);
|
||||
|
||||
if ((adev->asic_type == CHIP_ARCTURUS) &&
|
||||
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
|
||||
DRM_WARN("Fail to disable DF-Cstate.\n");
|
||||
|
||||
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
|
||||
umc_reg_offset = get_umc_6_reg_offset(adev,
|
||||
umc_inst,
|
||||
|
@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
|
|||
umc_inst);
|
||||
}
|
||||
|
||||
if ((adev->asic_type == CHIP_ARCTURUS) &&
|
||||
amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
|
||||
DRM_WARN("Fail to enable DF-Cstate\n");
|
||||
|
||||
if (rsmu_umc_index_state)
|
||||
umc_v6_1_enable_umc_index_mode(adev);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "soc15d.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "mmsch_v2_0.h"
|
||||
|
||||
#include "vcn/vcn_2_0_0_offset.h"
|
||||
#include "vcn/vcn_2_0_0_sh_mask.h"
|
||||
|
@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
|
|||
enum amd_powergating_state state);
|
||||
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
int inst_idx, struct dpg_pause_state *new_state);
|
||||
|
||||
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
|
||||
/**
|
||||
* vcn_v2_0_early_init - set function pointers
|
||||
*
|
||||
|
@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->vcn.num_vcn_inst = 1;
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->vcn.num_enc_rings = 1;
|
||||
else
|
||||
adev->vcn.num_enc_rings = 2;
|
||||
|
||||
vcn_v2_0_set_dec_ring_funcs(adev);
|
||||
vcn_v2_0_set_enc_ring_funcs(adev);
|
||||
|
@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
ring = &adev->vcn.inst->ring_enc[i];
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
|
||||
else
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
|
||||
sprintf(ring->name, "vcn_enc%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
|
||||
if (r)
|
||||
|
@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
|
|||
|
||||
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
|
||||
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_virt_free_mm_table(adev);
|
||||
|
||||
r = amdgpu_vcn_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
|
|||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
ring->doorbell_index, 0);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
vcn_v2_0_start_sriov(adev);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
|
@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
|
|||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* cache window 0: fw */
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
|
@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
|
|||
{
|
||||
uint32_t data;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* UVD disable CGC */
|
||||
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
|
||||
|
@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
|
|||
{
|
||||
uint32_t data = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* enable UVD CGC */
|
||||
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
|
||||
|
@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
|
|||
uint32_t data = 0;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
|
||||
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
|
||||
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
|
||||
|
@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
|
|||
uint32_t data = 0;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
|
||||
/* Before power off, this indicator has to be turned on */
|
||||
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
|
||||
|
@ -1215,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (vcn_v2_0_is_idle(handle))
|
||||
|
@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
|
|||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
|
||||
r = amdgpu_ring_alloc(ring, 4);
|
||||
if (r)
|
||||
|
@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
|
|||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (state == adev->vcn.cur_state)
|
||||
return 0;
|
||||
|
||||
|
@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
|
||||
struct amdgpu_mm_table *table)
|
||||
{
|
||||
uint32_t data = 0, loop;
|
||||
uint64_t addr = table->gpu_addr;
|
||||
struct mmsch_v2_0_init_header *header;
|
||||
uint32_t size;
|
||||
int i;
|
||||
|
||||
header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
|
||||
size = header->header_size + header->vcn_table_size;
|
||||
|
||||
/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
|
||||
* of memory descriptor location
|
||||
*/
|
||||
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
|
||||
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
|
||||
|
||||
/* 2, update vmid of descriptor */
|
||||
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
|
||||
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
|
||||
/* use domain0 for MM scheduler */
|
||||
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
|
||||
WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
|
||||
|
||||
/* 3, notify mmsch about the size of this descriptor */
|
||||
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
|
||||
|
||||
/* 4, set resp to zero */
|
||||
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
|
||||
|
||||
adev->vcn.inst->ring_dec.wptr = 0;
|
||||
adev->vcn.inst->ring_dec.wptr_old = 0;
|
||||
vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
|
||||
|
||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
adev->vcn.inst->ring_enc[i].wptr = 0;
|
||||
adev->vcn.inst->ring_enc[i].wptr_old = 0;
|
||||
vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
|
||||
}
|
||||
|
||||
/* 5, kick off the initialization and wait until
|
||||
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
|
||||
*/
|
||||
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
|
||||
|
||||
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
|
||||
loop = 1000;
|
||||
while ((data & 0x10000002) != 0x10000002) {
|
||||
udelay(10);
|
||||
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
|
||||
loop--;
|
||||
if (!loop)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!loop) {
|
||||
DRM_ERROR("failed to init MMSCH, " \
|
||||
"mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
uint32_t tmp;
|
||||
struct amdgpu_ring *ring;
|
||||
uint32_t offset, size;
|
||||
uint32_t table_size = 0;
|
||||
struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
|
||||
struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
|
||||
struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
|
||||
struct mmsch_v2_0_cmd_end end = { {0} };
|
||||
struct mmsch_v2_0_init_header *header;
|
||||
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
|
||||
uint8_t i = 0;
|
||||
|
||||
header = (struct mmsch_v2_0_init_header *)init_table;
|
||||
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
|
||||
direct_rd_mod_wt.cmd_header.command_type =
|
||||
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
|
||||
direct_poll.cmd_header.command_type =
|
||||
MMSCH_COMMAND__DIRECT_REG_POLLING;
|
||||
end.cmd_header.command_type = MMSCH_COMMAND__END;
|
||||
|
||||
if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
|
||||
header->version = MMSCH_VERSION;
|
||||
header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
|
||||
|
||||
header->vcn_table_offset = header->header_size;
|
||||
|
||||
init_table += header->vcn_table_offset;
|
||||
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
|
||||
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
|
||||
0xFFFFFFFF, 0x00000004);
|
||||
|
||||
/* mc resume*/
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
tmp = AMDGPU_UCODE_ID_VCN;
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||
adev->firmware.ucode[tmp].tmr_mc_addr_lo);
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||
adev->firmware.ucode[tmp].tmr_mc_addr_hi);
|
||||
offset = 0;
|
||||
} else {
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||
lower_32_bits(adev->vcn.inst->gpu_addr));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||
upper_32_bits(adev->vcn.inst->gpu_addr));
|
||||
offset = size;
|
||||
}
|
||||
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
|
||||
0);
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
|
||||
size);
|
||||
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
|
||||
lower_32_bits(adev->vcn.inst->gpu_addr + offset));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
|
||||
upper_32_bits(adev->vcn.inst->gpu_addr + offset));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
|
||||
0);
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
|
||||
AMDGPU_VCN_STACK_SIZE);
|
||||
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
|
||||
lower_32_bits(adev->vcn.inst->gpu_addr + offset +
|
||||
AMDGPU_VCN_STACK_SIZE));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
|
||||
upper_32_bits(adev->vcn.inst->gpu_addr + offset +
|
||||
AMDGPU_VCN_STACK_SIZE));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
|
||||
0);
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
|
||||
AMDGPU_VCN_CONTEXT_SIZE);
|
||||
|
||||
for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
|
||||
ring = &adev->vcn.inst->ring_enc[r];
|
||||
ring->wptr = 0;
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
|
||||
ring->ring_size / 4);
|
||||
}
|
||||
|
||||
ring = &adev->vcn.inst->ring_dec;
|
||||
ring->wptr = 0;
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
/* force RBC into idle state */
|
||||
tmp = order_base_2(ring->ring_size);
|
||||
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
||||
MMSCH_V2_0_INSERT_DIRECT_WT(
|
||||
SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
|
||||
|
||||
/* add end packet */
|
||||
tmp = sizeof(struct mmsch_v2_0_cmd_end);
|
||||
memcpy((void *)init_table, &end, tmp);
|
||||
table_size += (tmp / 4);
|
||||
header->vcn_table_size = table_size;
|
||||
|
||||
}
|
||||
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
|
||||
.name = "vcn_v2_0",
|
||||
.early_init = vcn_v2_0_early_init,
|
||||
|
|
|
@ -1169,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
|
|||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
dev->kfd2kgd->get_tile_config(dev->kgd, &config);
|
||||
amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
|
||||
|
||||
args->gb_addr_config = config.gb_addr_config;
|
||||
args->num_banks = config.num_banks;
|
||||
|
|
|
@ -1734,7 +1734,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
|
|||
|
||||
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
|
||||
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
|
||||
(void *)&(mem_obj->cpu_ptr), true);
|
||||
(void *)&(mem_obj->cpu_ptr), false);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd,
|
|||
if (p->signal_mapped_size &&
|
||||
p->signal_event_count == p->signal_mapped_size / 8) {
|
||||
if (!p->signal_event_limit_reached) {
|
||||
pr_warn("Signal event wasn't created because limit was reached\n");
|
||||
pr_debug("Signal event wasn't created because limit was reached\n");
|
||||
p->signal_event_limit_reached = true;
|
||||
}
|
||||
return -ENOSPC;
|
||||
|
|
|
@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
|
|||
int retval;
|
||||
struct kfd_mem_obj *mqd_mem_obj = NULL;
|
||||
|
||||
/* From V9, for CWSR, the control stack is located on the next page
|
||||
* boundary after the mqd, we will use the gtt allocation function
|
||||
* instead of sub-allocation function.
|
||||
/* For V9 only, due to a HW bug, the control stack of a user mode
|
||||
* compute queue needs to be allocated just behind the page boundary
|
||||
* of its regular MQD buffer. So we allocate an enlarged MQD buffer:
|
||||
* the first page of the buffer serves as the regular MQD buffer
|
||||
* purpose and the remaining is for control stack. Although the two
|
||||
* parts are in the same buffer object, they need different memory
|
||||
* types: MQD part needs UC (uncached) as usual, while control stack
|
||||
* needs NC (non coherent), which is different from the UC type which
|
||||
* is used when control stack is allocated in user space.
|
||||
*
|
||||
* Because of all those, we use the gtt allocation function instead
|
||||
* of sub-allocation function for this enlarged MQD buffer. Moreover,
|
||||
* in order to achieve two memory types in a single buffer object, we
|
||||
* pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
|
||||
* amdgpu memory functions to do so.
|
||||
*/
|
||||
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
|
||||
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
|
||||
|
|
|
@ -327,10 +327,10 @@ err_alloc_mem:
|
|||
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
|
||||
{
|
||||
struct qcm_process_device *qpd = &pdd->qpd;
|
||||
uint32_t flags = ALLOC_MEM_FLAGS_GTT |
|
||||
ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
|
||||
ALLOC_MEM_FLAGS_WRITABLE |
|
||||
ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||
void *kaddr;
|
||||
int ret;
|
||||
|
||||
|
@ -641,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
|
|||
|
||||
/* Indicate to other users that MM is no longer valid */
|
||||
p->mm = NULL;
|
||||
/* Signal the eviction fence after user mode queues are
|
||||
* destroyed. This allows any BOs to be freed without
|
||||
* triggering pointless evictions or waiting for fences.
|
||||
*/
|
||||
dma_fence_signal(p->ef);
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
|
@ -692,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
|
|||
{
|
||||
struct kfd_dev *dev = pdd->dev;
|
||||
struct qcm_process_device *qpd = &pdd->qpd;
|
||||
uint32_t flags = ALLOC_MEM_FLAGS_GTT |
|
||||
ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
|
||||
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
|
||||
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||
void *kaddr;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -490,7 +490,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
|||
dev->node_props.num_sdma_queues_per_engine);
|
||||
sysfs_show_32bit_prop(buffer, "num_cp_queues",
|
||||
dev->node_props.num_cp_queues);
|
||||
sysfs_show_64bit_prop(buffer, "unique_id",
|
||||
sysfs_show_64bit_prop(buffer, "unique_id",
|
||||
dev->node_props.unique_id);
|
||||
|
||||
if (dev->gpu) {
|
||||
|
|
|
@ -902,7 +902,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
init_data.asic_id.chip_family = adev->family;
|
||||
|
||||
init_data.asic_id.pci_revision_id = adev->rev_id;
|
||||
init_data.asic_id.pci_revision_id = adev->pdev->revision;
|
||||
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
|
||||
|
||||
init_data.asic_id.vram_width = adev->gmc.vram_width;
|
||||
|
|
|
@ -412,6 +412,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
|||
link->dig_be = config->link_enc_inst;
|
||||
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
|
||||
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
|
||||
link->dp.mst_supported = config->mst_supported;
|
||||
display->adjust.disable = 1;
|
||||
link->adjust.auth_delay = 2;
|
||||
|
||||
|
|
|
@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr(
|
|||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return false;
|
||||
DRM_ERROR("Failed to find connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (boot) {
|
||||
|
@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr(
|
|||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
return;
|
||||
DRM_ERROR("Failed to find connector for link!");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
|
||||
|
@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
|
|||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
DRM_ERROR("Failed to find connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd(
|
|||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
DRM_ERROR("Failed to find connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c(
|
|||
bool result;
|
||||
|
||||
if (!aconnector) {
|
||||
DRM_ERROR("Failed to found connector for link!");
|
||||
DRM_ERROR("Failed to find connector for link!");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
|
|||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
BUG_ON("Failed to found connector for link!");
|
||||
BUG_ON("Failed to find connector for link!");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -207,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
|||
|
||||
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
|
||||
dsc_caps, NULL,
|
||||
&dc_sink->sink_dsc_caps.dsc_dec_caps))
|
||||
&dc_sink->dsc_caps.dsc_dec_caps))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -262,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
|||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (!validate_dsc_caps_on_connector(aconnector))
|
||||
memset(&aconnector->dc_sink->sink_dsc_caps,
|
||||
0, sizeof(aconnector->dc_sink->sink_dsc_caps));
|
||||
memset(&aconnector->dc_sink->dsc_caps,
|
||||
0, sizeof(aconnector->dc_sink->dsc_caps));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
|
|||
memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
|
||||
if (vars[i].dsc_enabled && dc_dsc_compute_config(
|
||||
params[i].sink->ctx->dc->res_pool->dscs[0],
|
||||
¶ms[i].sink->sink_dsc_caps.dsc_dec_caps,
|
||||
¶ms[i].sink->dsc_caps.dsc_dec_caps,
|
||||
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
0,
|
||||
params[i].timing,
|
||||
|
@ -558,7 +558,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
|||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->sink_dsc_caps.dsc_dec_caps,
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
(int) kbps, param.timing, &dsc_config);
|
||||
|
||||
|
@ -755,14 +755,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
params[count].sink = stream->sink;
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
params[count].port = aconnector->port;
|
||||
params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
|
||||
params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
|
||||
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
|
||||
if (!dc_dsc_compute_bandwidth_range(
|
||||
stream->sink->ctx->dc->res_pool->dscs[0],
|
||||
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
dsc_policy.min_target_bpp,
|
||||
dsc_policy.max_target_bpp,
|
||||
&stream->sink->sink_dsc_caps.dsc_dec_caps,
|
||||
&stream->sink->dsc_caps.dsc_dec_caps,
|
||||
&stream->timing, ¶ms[count].bw_range))
|
||||
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
|
||||
|
@ -844,7 +844,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
|||
if (!aconnector || !aconnector->dc_sink)
|
||||
continue;
|
||||
|
||||
if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
|
||||
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
|
||||
continue;
|
||||
|
||||
if (computed_streams[i])
|
||||
|
|
|
@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx,
|
|||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->stutter_entry_wm_ns[0].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[1].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[2].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
|
||||
if (ctx->dc->caps.max_slave_planes) {
|
||||
calcs_output->stutter_entry_wm_ns[3].b_mark =
|
||||
calcs_output->stutter_entry_wm_ns[0].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].b_mark =
|
||||
stutter_entry_watermark[4], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[1].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
|
||||
} else {
|
||||
calcs_output->stutter_entry_wm_ns[3].b_mark =
|
||||
stutter_entry_watermark[5], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[2].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].b_mark =
|
||||
stutter_entry_watermark[6], bw_int_to_fixed(1000)));
|
||||
if (ctx->dc->caps.max_slave_planes) {
|
||||
calcs_output->stutter_entry_wm_ns[3].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[0], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[1], bw_int_to_fixed(1000)));
|
||||
} else {
|
||||
calcs_output->stutter_entry_wm_ns[3].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[7], bw_int_to_fixed(1000)));
|
||||
calcs_output->stutter_entry_wm_ns[4].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
|
||||
}
|
||||
calcs_output->stutter_entry_wm_ns[5].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[8], bw_int_to_fixed(1000)));
|
||||
}
|
||||
calcs_output->stutter_entry_wm_ns[5].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
|
||||
stutter_entry_watermark[9], bw_int_to_fixed(1000)));
|
||||
|
||||
calcs_output->urgent_wm_ns[0].b_mark =
|
||||
bw_fixed_to_int(bw_mul(data->
|
||||
|
|
|
@ -712,6 +712,11 @@ unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_
|
|||
case PRID_DALI_DF:
|
||||
case PRID_DALI_E3:
|
||||
case PRID_DALI_E4:
|
||||
case PRID_POLLOCK_94:
|
||||
case PRID_POLLOCK_95:
|
||||
case PRID_POLLOCK_E9:
|
||||
case PRID_POLLOCK_EA:
|
||||
case PRID_POLLOCK_EB:
|
||||
return 0;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
/* Constants */
|
||||
|
||||
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
|
||||
#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
|
||||
|
||||
/* Macros */
|
||||
|
||||
|
@ -720,6 +721,13 @@ void rn_clk_mgr_construct(
|
|||
} else {
|
||||
struct clk_log_info log_info = {0};
|
||||
|
||||
clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
|
||||
|
||||
/* SMU Version 55.51.0 and up no longer have an issue
|
||||
* that needs to limit minimum dispclk */
|
||||
if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
|
||||
debug->min_disp_clk_khz = 0;
|
||||
|
||||
/* TODO: Check we get what we expect during bringup */
|
||||
clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
|
||||
|
||||
|
|
|
@ -1378,6 +1378,10 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
|||
}
|
||||
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
|
||||
dc->clk_optimized_required = false;
|
||||
dc->wm_optimized_required = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -585,14 +585,14 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
|
|||
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
|
||||
}
|
||||
|
||||
static bool detect_dp(
|
||||
struct dc_link *link,
|
||||
struct display_sink_capability *sink_caps,
|
||||
bool *converter_disable_audio,
|
||||
struct audio_support *audio_support,
|
||||
enum dc_detect_reason reason)
|
||||
static bool detect_dp(struct dc_link *link,
|
||||
struct display_sink_capability *sink_caps,
|
||||
bool *converter_disable_audio,
|
||||
struct audio_support *audio_support,
|
||||
enum dc_detect_reason reason)
|
||||
{
|
||||
bool boot = false;
|
||||
|
||||
sink_caps->signal = link_detect_sink(link, reason);
|
||||
sink_caps->transaction_type =
|
||||
get_ddc_transaction_type(sink_caps->signal);
|
||||
|
@ -609,9 +609,8 @@ static bool detect_dp(
|
|||
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
|
||||
link->type = dc_connection_mst_branch;
|
||||
|
||||
dal_ddc_service_set_transaction_type(
|
||||
link->ddc,
|
||||
sink_caps->transaction_type);
|
||||
dal_ddc_service_set_transaction_type(link->ddc,
|
||||
sink_caps->transaction_type);
|
||||
|
||||
/*
|
||||
* This call will initiate MST topology discovery. Which
|
||||
|
@ -640,13 +639,10 @@ static bool detect_dp(
|
|||
if (reason == DETECT_REASON_BOOT)
|
||||
boot = true;
|
||||
|
||||
dm_helpers_dp_update_branch_info(
|
||||
link->ctx,
|
||||
link);
|
||||
dm_helpers_dp_update_branch_info(link->ctx, link);
|
||||
|
||||
if (!dm_helpers_dp_mst_start_top_mgr(
|
||||
link->ctx,
|
||||
link, boot)) {
|
||||
if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
|
||||
link, boot)) {
|
||||
/* MST not supported */
|
||||
link->type = dc_connection_single;
|
||||
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
|
@ -654,7 +650,7 @@ static bool detect_dp(
|
|||
}
|
||||
|
||||
if (link->type != dc_connection_mst_branch &&
|
||||
is_dp_active_dongle(link)) {
|
||||
is_dp_active_dongle(link)) {
|
||||
/* DP active dongles */
|
||||
link->type = dc_connection_active_dongle;
|
||||
if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
|
||||
|
@ -665,14 +661,15 @@ static bool detect_dp(
|
|||
return true;
|
||||
}
|
||||
|
||||
if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
|
||||
if (link->dpcd_caps.dongle_type !=
|
||||
DISPLAY_DONGLE_DP_HDMI_CONVERTER)
|
||||
*converter_disable_audio = true;
|
||||
}
|
||||
} else {
|
||||
/* DP passive dongles */
|
||||
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
|
||||
sink_caps,
|
||||
audio_support);
|
||||
sink_caps,
|
||||
audio_support);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -973,6 +970,9 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
|||
break;
|
||||
}
|
||||
|
||||
if (link->local_sink->edid_caps.panel_patch.disable_fec)
|
||||
link->ctx->dc->debug.disable_fec = true;
|
||||
|
||||
// Check if edid is the same
|
||||
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
|
||||
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
|
||||
|
@ -1498,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
}
|
||||
|
||||
static enum dc_status enable_link_dp(
|
||||
struct dc_state *state,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
enum dc_status status;
|
||||
|
@ -1532,7 +1531,8 @@ static enum dc_status enable_link_dp(
|
|||
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
|
||||
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
if (state->clk_mgr && !apply_seamless_boot_optimization)
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr,
|
||||
state, false);
|
||||
|
||||
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
|
||||
dpcd_set_source_specific_data(link);
|
||||
|
@ -1540,21 +1540,20 @@ static enum dc_status enable_link_dp(
|
|||
skip_video_pattern = true;
|
||||
|
||||
if (link_settings.link_rate == LINK_RATE_LOW)
|
||||
skip_video_pattern = false;
|
||||
skip_video_pattern = false;
|
||||
|
||||
if (perform_link_training_with_retries(
|
||||
&link_settings,
|
||||
skip_video_pattern,
|
||||
LINK_TRAINING_ATTEMPTS,
|
||||
pipe_ctx,
|
||||
pipe_ctx->stream->signal)) {
|
||||
if (perform_link_training_with_retries(&link_settings,
|
||||
skip_video_pattern,
|
||||
LINK_TRAINING_ATTEMPTS,
|
||||
pipe_ctx,
|
||||
pipe_ctx->stream->signal)) {
|
||||
link->cur_link_settings = link_settings;
|
||||
status = DC_OK;
|
||||
}
|
||||
else
|
||||
} else {
|
||||
status = DC_FAIL_DP_LINK_TRAINING;
|
||||
}
|
||||
|
||||
if (link->preferred_training_settings.fec_enable != NULL)
|
||||
if (link->preferred_training_settings.fec_enable)
|
||||
fec_enable = *link->preferred_training_settings.fec_enable;
|
||||
else
|
||||
fec_enable = true;
|
||||
|
@ -1766,8 +1765,7 @@ static void write_i2c_retimer_setting(
|
|||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
|
||||
* needs to be set to 1 on every 0xA-0xC write.
|
||||
|
@ -1785,8 +1783,7 @@ static void write_i2c_retimer_setting(
|
|||
pipe_ctx->stream->link->ddc,
|
||||
slave_address, &offset, 1, &value, 1);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
}
|
||||
|
||||
buffer[0] = offset;
|
||||
|
@ -1798,8 +1795,7 @@ static void write_i2c_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1819,8 +1815,7 @@ static void write_i2c_retimer_setting(
|
|||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
|
||||
* needs to be set to 1 on every 0xA-0xC write.
|
||||
|
@ -1838,8 +1833,7 @@ static void write_i2c_retimer_setting(
|
|||
pipe_ctx->stream->link->ddc,
|
||||
slave_address, &offset, 1, &value, 1);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
}
|
||||
|
||||
buffer[0] = offset;
|
||||
|
@ -1851,8 +1845,7 @@ static void write_i2c_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1870,8 +1863,7 @@ static void write_i2c_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x00 to 0x23 */
|
||||
buffer[0] = 0x00;
|
||||
|
@ -1882,8 +1874,7 @@ static void write_i2c_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0xff to 0x00 */
|
||||
buffer[0] = 0xff;
|
||||
|
@ -1894,10 +1885,14 @@ static void write_i2c_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
i2c_write_fail:
|
||||
DC_LOG_DEBUG("Set retimer failed");
|
||||
}
|
||||
|
||||
static void write_i2c_default_retimer_setting(
|
||||
|
@ -1922,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x0A to 0x17 */
|
||||
buffer[0] = 0x0A;
|
||||
|
@ -1934,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x0B to 0xDA or 0xD8 */
|
||||
buffer[0] = 0x0B;
|
||||
|
@ -1946,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x0A to 0x17 */
|
||||
buffer[0] = 0x0A;
|
||||
|
@ -1958,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x0C to 0x1D or 0x91 */
|
||||
buffer[0] = 0x0C;
|
||||
|
@ -1970,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x0A to 0x17 */
|
||||
buffer[0] = 0x0A;
|
||||
|
@ -1982,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
|
||||
if (is_vga_mode) {
|
||||
|
@ -1998,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0x00 to 0x23 */
|
||||
buffer[0] = 0x00;
|
||||
|
@ -2010,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
|
||||
/* Write offset 0xff to 0x00 */
|
||||
buffer[0] = 0xff;
|
||||
|
@ -2022,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
|
|||
offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
|
||||
slave_address, buffer[0], buffer[1], i2c_success?1:0);
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
goto i2c_write_fail;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
i2c_write_fail:
|
||||
DC_LOG_DEBUG("Set default retimer failed");
|
||||
}
|
||||
|
||||
static void write_i2c_redriver_setting(
|
||||
|
@ -2053,8 +2044,7 @@ static void write_i2c_redriver_setting(
|
|||
slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
|
||||
|
||||
if (!i2c_success)
|
||||
/* Write failure */
|
||||
ASSERT(i2c_success);
|
||||
DC_LOG_DEBUG("Set redriver failed");
|
||||
}
|
||||
|
||||
static void disable_link(struct dc_link *link, enum signal_type signal)
|
||||
|
@ -2960,6 +2950,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||
config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
|
||||
config.dpms_off = dpms_off;
|
||||
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
|
||||
config.mst_supported = (pipe_ctx->stream->signal ==
|
||||
SIGNAL_TYPE_DISPLAY_PORT_MST);
|
||||
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
|
||||
}
|
||||
}
|
||||
|
@ -3077,9 +3069,14 @@ void core_link_enable_stream(
|
|||
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_enable(pipe_ctx, true);
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal)) {
|
||||
/* Here we only need to enable DSC on RX. DSC HW programming
|
||||
* was done earlier, as part of timing programming.
|
||||
*/
|
||||
dp_set_dsc_on_rx(pipe_ctx, true);
|
||||
}
|
||||
}
|
||||
|
||||
dc->hwss.enable_stream(pipe_ctx);
|
||||
|
||||
/* Set DPS PPS SDP (AKA "info frames") */
|
||||
|
@ -3106,7 +3103,7 @@ void core_link_enable_stream(
|
|||
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_enable(pipe_ctx, true);
|
||||
dp_set_dsc_on_rx(pipe_ctx, true);
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -3410,7 +3407,7 @@ uint32_t dc_link_bandwidth_kbps(
|
|||
link_bw_kbps *= 8; /* 8 bits per byte*/
|
||||
link_bw_kbps *= link_setting->lane_count;
|
||||
|
||||
if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
|
||||
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
|
||||
/* Account for FEC overhead.
|
||||
* We have to do it based on caps,
|
||||
* and not based on FEC being set ready,
|
||||
|
@ -3454,3 +3451,11 @@ void dc_link_overwrite_extended_receiver_cap(
|
|||
dp_overwrite_extended_receiver_cap(link);
|
||||
}
|
||||
|
||||
bool dc_link_is_fec_supported(const struct dc_link *link)
|
||||
{
|
||||
return (dc_is_dp_signal(link->connector_signal) &&
|
||||
link->link_enc->features.fec_supported &&
|
||||
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
|
||||
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
|
||||
}
|
||||
|
||||
|
|
|
@ -1446,11 +1446,15 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
&link->preferred_training_settings,
|
||||
<_settings);
|
||||
|
||||
/* 1. set link rate, lane count and spread. */
|
||||
/* Configure lttpr mode */
|
||||
if (!link->is_lttpr_mode_transparent)
|
||||
configure_lttpr_mode(link);
|
||||
|
||||
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
|
||||
start_clock_recovery_pattern_early(link, <_settings, DPRX);
|
||||
else
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
||||
/* 1. set link rate, lane count and spread. */
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
||||
if (link->preferred_training_settings.fec_enable != NULL)
|
||||
fec_enable = *link->preferred_training_settings.fec_enable;
|
||||
|
@ -1460,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
dp_set_fec_ready(link, fec_enable);
|
||||
|
||||
if (!link->is_lttpr_mode_transparent) {
|
||||
/* Configure lttpr mode */
|
||||
configure_lttpr_mode(link);
|
||||
|
||||
/* 2. perform link training (set link training done
|
||||
* to false is done as well)
|
||||
|
@ -1669,11 +1671,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
dp_set_panel_mode(link, panel_mode);
|
||||
|
||||
/* Attempt to train with given link training settings */
|
||||
/* Set link rate, lane count and spread. */
|
||||
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
|
||||
start_clock_recovery_pattern_early(link, <_settings, DPRX);
|
||||
else
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
||||
/* Set link rate, lane count and spread. */
|
||||
dpcd_set_link_settings(link, <_settings);
|
||||
|
||||
/* 2. perform link training (set link training done
|
||||
* to false is done as well)
|
||||
|
@ -3720,7 +3722,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
|||
struct pipe_ctx *odm_pipe;
|
||||
enum controller_dp_color_space controller_color_space;
|
||||
int opp_cnt = 1;
|
||||
uint16_t count = 0;
|
||||
int offset = 0;
|
||||
int dpg_width = width;
|
||||
|
||||
switch (test_pattern_color_space) {
|
||||
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
|
||||
|
@ -3742,33 +3745,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
|||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
opp_cnt++;
|
||||
dpg_width = width / opp_cnt;
|
||||
offset = dpg_width;
|
||||
|
||||
width /= opp_cnt;
|
||||
opp->funcs->opp_set_disp_pattern_generator(opp,
|
||||
controller_test_pattern,
|
||||
controller_color_space,
|
||||
color_depth,
|
||||
NULL,
|
||||
dpg_width,
|
||||
height,
|
||||
0);
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
|
||||
|
||||
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms);
|
||||
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
|
||||
controller_test_pattern,
|
||||
controller_color_space,
|
||||
color_depth,
|
||||
NULL,
|
||||
width,
|
||||
height);
|
||||
}
|
||||
opp->funcs->opp_set_disp_pattern_generator(opp,
|
||||
controller_test_pattern,
|
||||
controller_color_space,
|
||||
color_depth,
|
||||
NULL,
|
||||
width,
|
||||
height);
|
||||
/* wait for dpg to blank pixel data with test pattern */
|
||||
for (count = 0; count < 1000; count++) {
|
||||
if (opp->funcs->dpg_is_blanked(opp))
|
||||
break;
|
||||
udelay(100);
|
||||
dpg_width,
|
||||
height,
|
||||
offset);
|
||||
offset += offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3786,11 +3786,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
|||
else if (opp->funcs->opp_set_disp_pattern_generator) {
|
||||
struct pipe_ctx *odm_pipe;
|
||||
int opp_cnt = 1;
|
||||
int dpg_width = width;
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
|
||||
opp_cnt++;
|
||||
|
||||
width /= opp_cnt;
|
||||
dpg_width = width / opp_cnt;
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
|
||||
|
||||
|
@ -3800,16 +3801,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
|||
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
|
||||
color_depth,
|
||||
NULL,
|
||||
width,
|
||||
height);
|
||||
dpg_width,
|
||||
height,
|
||||
0);
|
||||
}
|
||||
opp->funcs->opp_set_disp_pattern_generator(opp,
|
||||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
|
||||
color_depth,
|
||||
NULL,
|
||||
width,
|
||||
height);
|
||||
dpg_width,
|
||||
height,
|
||||
0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -3987,6 +3990,11 @@ bool dc_link_dp_set_test_pattern(
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
|
||||
pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
|
||||
pipe_ctx->stream_res.tg);
|
||||
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
|
||||
/* update MSA to requested color space */
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
|
||||
&pipe_ctx->stream->timing,
|
||||
|
@ -3994,9 +4002,27 @@ bool dc_link_dp_set_test_pattern(
|
|||
pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
|
||||
link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
|
||||
|
||||
if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
|
||||
if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
|
||||
pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
|
||||
else
|
||||
pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
|
||||
resource_build_info_frame(pipe_ctx);
|
||||
link->dc->hwss.update_info_frame(pipe_ctx);
|
||||
}
|
||||
|
||||
/* CRTC Patterns */
|
||||
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
|
||||
CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
|
||||
pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
|
||||
pipe_ctx->stream_res.tg);
|
||||
/* Set Test Pattern state */
|
||||
link->test_pattern_enabled = true;
|
||||
}
|
||||
|
@ -4126,8 +4152,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
|
|||
struct link_encoder *link_enc = link->link_enc;
|
||||
uint8_t fec_config = 0;
|
||||
|
||||
if (link->dc->debug.disable_fec ||
|
||||
IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
|
||||
if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
|
||||
return;
|
||||
|
||||
if (link_enc->funcs->fec_set_ready &&
|
||||
|
@ -4162,8 +4187,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
|
|||
{
|
||||
struct link_encoder *link_enc = link->link_enc;
|
||||
|
||||
if (link->dc->debug.disable_fec ||
|
||||
IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
|
||||
if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
|
||||
return;
|
||||
|
||||
if (link_enc->funcs->fec_set_enable &&
|
||||
|
|
|
@ -394,7 +394,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
|
|||
DC_LOG_DSC("\tslice_width %d", config->slice_width);
|
||||
}
|
||||
|
||||
static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
{
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
|
@ -431,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
|
||||
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
|
||||
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
|
||||
|
@ -535,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
|
||||
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
|
||||
|
||||
DC_LOG_DSC(" ");
|
||||
|
|
|
@ -2171,10 +2171,10 @@ enum dc_status dc_validate_global_state(
|
|||
if (pipe_ctx->stream != stream)
|
||||
continue;
|
||||
|
||||
if (dc->res_pool->funcs->get_default_swizzle_mode &&
|
||||
if (dc->res_pool->funcs->patch_unknown_plane_state &&
|
||||
pipe_ctx->plane_state &&
|
||||
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
|
||||
result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
|
||||
result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
|
||||
if (result != DC_OK)
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.74"
|
||||
#define DC_VER "3.2.76"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -230,6 +230,7 @@ struct dc_config {
|
|||
bool forced_clocks;
|
||||
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
|
||||
bool multi_mon_pp_mclk_switch;
|
||||
bool psr_on_dmub;
|
||||
};
|
||||
|
||||
enum visual_confirm {
|
||||
|
@ -389,6 +390,7 @@ struct dc_debug_options {
|
|||
int always_scale;
|
||||
bool disable_pplib_clock_request;
|
||||
bool disable_clock_gate;
|
||||
bool disable_mem_low_power;
|
||||
bool disable_dmcu;
|
||||
bool disable_psr;
|
||||
bool force_abm_enable;
|
||||
|
@ -410,7 +412,6 @@ struct dc_debug_options {
|
|||
bool dmub_offload_enabled;
|
||||
bool dmcub_emulation;
|
||||
bool dmub_command_table; /* for testing only */
|
||||
bool psr_on_dmub;
|
||||
struct dc_bw_validation_profile bw_val_profile;
|
||||
bool disable_fec;
|
||||
bool disable_48mhz_pwrdwn;
|
||||
|
@ -1024,6 +1025,11 @@ struct dc_sink_dsc_caps {
|
|||
struct dsc_dec_dpcd_caps dsc_dec_caps;
|
||||
};
|
||||
|
||||
struct dc_sink_fec_caps {
|
||||
bool is_rx_fec_supported;
|
||||
bool is_topology_fec_supported;
|
||||
};
|
||||
|
||||
/*
|
||||
* The sink structure contains EDID and other display device properties
|
||||
*/
|
||||
|
@ -1037,7 +1043,8 @@ struct dc_sink {
|
|||
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
|
||||
bool converter_disable_audio;
|
||||
|
||||
struct dc_sink_dsc_caps sink_dsc_caps;
|
||||
struct dc_sink_dsc_caps dsc_caps;
|
||||
struct dc_sink_fec_caps fec_caps;
|
||||
|
||||
/* private to DC core */
|
||||
struct dc_link *link;
|
||||
|
|
|
@ -333,4 +333,7 @@ bool dc_submit_i2c_oem(
|
|||
|
||||
uint32_t dc_bandwidth_in_kbps_from_timing(
|
||||
const struct dc_crtc_timing *timing);
|
||||
|
||||
bool dc_link_is_fec_supported(const struct dc_link *link);
|
||||
|
||||
#endif /* DC_LINK_H_ */
|
||||
|
|
|
@ -231,6 +231,7 @@ struct dc_panel_patch {
|
|||
unsigned int extra_t7_ms;
|
||||
unsigned int skip_scdc_overwrite;
|
||||
unsigned int delay_ignore_msa;
|
||||
unsigned int disable_fec;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
|
|
|
@ -144,7 +144,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
|||
}
|
||||
}
|
||||
|
||||
if (!pipe_ctx || !&pipe_ctx->plane_res || !&pipe_ctx->stream_res)
|
||||
if (!pipe_ctx)
|
||||
return false;
|
||||
|
||||
// First, set the psr version
|
||||
|
@ -235,6 +235,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
|
|||
*/
|
||||
void dmub_psr_destroy(struct dmub_psr **dmub)
|
||||
{
|
||||
kfree(dmub);
|
||||
kfree(*dmub);
|
||||
*dmub = NULL;
|
||||
}
|
||||
|
|
|
@ -2717,30 +2717,20 @@ void dcn10_optimize_bandwidth(
|
|||
hws->funcs.verify_allow_pstate_change_high(dc);
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
if (context->stream_count == 0) {
|
||||
if (context->stream_count == 0)
|
||||
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
|
||||
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
} else if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
hubbub->funcs->program_watermarks(hubbub,
|
||||
&context->bw_ctx.bw.dcn.watermarks,
|
||||
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
}
|
||||
|
||||
dc->clk_optimized_required = false;
|
||||
dc->wm_optimized_required = false;
|
||||
hubbub->funcs->program_watermarks(hubbub,
|
||||
&context->bw_ctx.bw.dcn.watermarks,
|
||||
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
|
||||
true);
|
||||
|
||||
dcn10_stereo_hw_frame_pack_wa(dc, context);
|
||||
|
||||
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
|
||||
|
|
|
@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc,
|
|||
uint32_t asic_blank_end;
|
||||
uint32_t v_init;
|
||||
uint32_t v_fp2 = 0;
|
||||
int32_t vertical_line_start;
|
||||
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
|
@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc,
|
|||
patched_crtc_timing.v_border_top;
|
||||
|
||||
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
|
||||
vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
|
||||
if (vertical_line_start < 0)
|
||||
v_fp2 = -vertical_line_start;
|
||||
if (optc1->vstartup_start > asic_blank_end)
|
||||
v_fp2 = optc1->vstartup_start - asic_blank_end;
|
||||
|
||||
/* Interlace */
|
||||
if (REG(OTG_INTERLACE_CONTROL)) {
|
||||
|
@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
|
|||
REG_UPDATE_3(OTG_STEREO_CONTROL,
|
||||
OTG_STEREO_EN, stereo_en,
|
||||
OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
|
||||
OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
|
||||
OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
|
||||
|
||||
if (flags->PROGRAM_POLARITY)
|
||||
REG_UPDATE(OTG_STEREO_CONTROL,
|
||||
|
|
|
@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
|
|||
return DC_OK;
|
||||
}
|
||||
|
||||
static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
|
||||
static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
|
||||
{
|
||||
enum dc_status result = DC_OK;
|
||||
|
||||
|
@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
|
|||
.validate_plane = dcn10_validate_plane,
|
||||
.validate_global = dcn10_validate_global,
|
||||
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
|
||||
.get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
|
||||
.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
|
||||
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
|
||||
};
|
||||
|
||||
|
|
|
@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes(
|
|||
}
|
||||
}
|
||||
|
||||
#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
|
||||
|
||||
bool dpp2_get_optimal_number_of_taps(
|
||||
struct dpp *dpp,
|
||||
struct scaler_data *scl_data,
|
||||
const struct scaling_taps *in_taps)
|
||||
{
|
||||
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
|
||||
if (scl_data->viewport.width != scl_data->h_active &&
|
||||
scl_data->viewport.height != scl_data->v_active &&
|
||||
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
|
||||
scl_data->format == PIXEL_FORMAT_FP16)
|
||||
return false;
|
||||
|
||||
if (scl_data->viewport.width > scl_data->h_active &&
|
||||
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
|
||||
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
|
||||
return false;
|
||||
|
||||
/* TODO: add lb check */
|
||||
|
||||
/* No support for programming ratio of 8, drop to 7.99999.. */
|
||||
if (scl_data->ratios.horz.value == (8ll << 32))
|
||||
scl_data->ratios.horz.value--;
|
||||
if (scl_data->ratios.vert.value == (8ll << 32))
|
||||
scl_data->ratios.vert.value--;
|
||||
if (scl_data->ratios.horz_c.value == (8ll << 32))
|
||||
scl_data->ratios.horz_c.value--;
|
||||
if (scl_data->ratios.vert_c.value == (8ll << 32))
|
||||
scl_data->ratios.vert_c.value--;
|
||||
|
||||
/* Set default taps if none are provided */
|
||||
if (in_taps->h_taps == 0) {
|
||||
if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
|
||||
scl_data->taps.h_taps = 8;
|
||||
else
|
||||
scl_data->taps.h_taps = 4;
|
||||
} else
|
||||
scl_data->taps.h_taps = in_taps->h_taps;
|
||||
if (in_taps->v_taps == 0) {
|
||||
if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
|
||||
scl_data->taps.v_taps = 8;
|
||||
else
|
||||
scl_data->taps.v_taps = 4;
|
||||
} else
|
||||
scl_data->taps.v_taps = in_taps->v_taps;
|
||||
if (in_taps->v_taps_c == 0) {
|
||||
if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
|
||||
scl_data->taps.v_taps_c = 4;
|
||||
else
|
||||
scl_data->taps.v_taps_c = 2;
|
||||
} else
|
||||
scl_data->taps.v_taps_c = in_taps->v_taps_c;
|
||||
if (in_taps->h_taps_c == 0) {
|
||||
if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
|
||||
scl_data->taps.h_taps_c = 4;
|
||||
else
|
||||
scl_data->taps.h_taps_c = 2;
|
||||
} else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
|
||||
/* Only 1 and even h_taps_c are supported by hw */
|
||||
scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
|
||||
else
|
||||
scl_data->taps.h_taps_c = in_taps->h_taps_c;
|
||||
|
||||
if (!dpp->ctx->dc->debug.always_scale) {
|
||||
if (IDENTITY_RATIO(scl_data->ratios.horz))
|
||||
scl_data->taps.h_taps = 1;
|
||||
if (IDENTITY_RATIO(scl_data->ratios.vert))
|
||||
scl_data->taps.v_taps = 1;
|
||||
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
|
||||
scl_data->taps.h_taps_c = 1;
|
||||
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
|
||||
scl_data->taps.v_taps_c = 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void oppn20_dummy_program_regamma_pwl(
|
||||
struct dpp *dpp,
|
||||
const struct pwl_params *params,
|
||||
|
|
|
@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
|
|||
dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
|
||||
dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
|
||||
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
|
||||
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
|
||||
|
||||
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
|
||||
// see what happens when the same condition doesn't apply for slice_width/pic_width.
|
||||
|
@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
|
|||
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
|
||||
|
||||
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
|
||||
reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
|
||||
}
|
||||
|
||||
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
|
||||
|
|
|
@ -307,7 +307,8 @@ void dcn20_init_blank(
|
|||
COLOR_DEPTH_UNDEFINED,
|
||||
&black_color,
|
||||
otg_active_width,
|
||||
otg_active_height);
|
||||
otg_active_height,
|
||||
0);
|
||||
|
||||
if (num_opps == 2) {
|
||||
bottom_opp->funcs->opp_set_disp_pattern_generator(
|
||||
|
@ -317,7 +318,8 @@ void dcn20_init_blank(
|
|||
COLOR_DEPTH_UNDEFINED,
|
||||
&black_color,
|
||||
otg_active_width,
|
||||
otg_active_height);
|
||||
otg_active_height,
|
||||
0);
|
||||
}
|
||||
|
||||
hws->funcs.wait_for_blank_complete(opp);
|
||||
|
@ -621,6 +623,13 @@ enum dc_status dcn20_enable_stream_timing(
|
|||
|
||||
/* TODO check if timing_changed, disable stream if timing changed */
|
||||
|
||||
/* Have to setup DSC here to make sure the bandwidth sent to DIG BE won't be bigger than
|
||||
* what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag will be automatically
|
||||
* set at a later time when the video is enabled (DP_VID_STREAM_EN = 1).
|
||||
*/
|
||||
if (pipe_ctx->stream->timing.flags.DSC)
|
||||
dp_set_dsc_on_stream(pipe_ctx, true);
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
|
||||
opp_cnt++;
|
||||
|
@ -974,7 +983,8 @@ void dcn20_blank_pixel_data(
|
|||
stream->timing.display_color_depth,
|
||||
&black_color,
|
||||
width,
|
||||
height);
|
||||
height,
|
||||
0);
|
||||
|
||||
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
|
||||
odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
|
||||
|
@ -985,7 +995,8 @@ void dcn20_blank_pixel_data(
|
|||
stream->timing.display_color_depth,
|
||||
&black_color,
|
||||
width,
|
||||
height);
|
||||
height,
|
||||
0);
|
||||
}
|
||||
|
||||
if (!blank)
|
||||
|
@ -1656,22 +1667,16 @@ void dcn20_optimize_bandwidth(
|
|||
{
|
||||
struct hubbub *hubbub = dc->res_pool->hubbub;
|
||||
|
||||
if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
/* program dchubbub watermarks */
|
||||
hubbub->funcs->program_watermarks(hubbub,
|
||||
&context->bw_ctx.bw.dcn.watermarks,
|
||||
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
|
||||
true);
|
||||
dc->wm_optimized_required = false;
|
||||
}
|
||||
/* program dchubbub watermarks */
|
||||
hubbub->funcs->program_watermarks(hubbub,
|
||||
&context->bw_ctx.bw.dcn.watermarks,
|
||||
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
|
||||
true);
|
||||
|
||||
if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
dc->wm_optimized_required = false;
|
||||
}
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
}
|
||||
|
||||
bool dcn20_update_bandwidth(
|
||||
|
|
|
@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator(
|
|||
enum dc_color_depth color_depth,
|
||||
const struct tg_color *solid_color,
|
||||
int width,
|
||||
int height)
|
||||
int height,
|
||||
int offset)
|
||||
{
|
||||
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
|
||||
enum test_pattern_color_format bit_depth;
|
||||
|
@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator(
|
|||
DPG_ACTIVE_WIDTH, width,
|
||||
DPG_ACTIVE_HEIGHT, height);
|
||||
|
||||
/* set DPG offset */
|
||||
REG_SET_2(DPG_OFFSET_SEGMENT, 0,
|
||||
DPG_X_OFFSET, offset,
|
||||
DPG_SEGMENT_WIDTH, 0);
|
||||
|
||||
switch (test_pattern) {
|
||||
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
|
||||
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#define OPP_DPG_REG_LIST(id) \
|
||||
SRI(DPG_CONTROL, DPG, id), \
|
||||
SRI(DPG_DIMENSIONS, DPG, id), \
|
||||
SRI(DPG_OFFSET_SEGMENT, DPG, id), \
|
||||
SRI(DPG_COLOUR_B_CB, DPG, id), \
|
||||
SRI(DPG_COLOUR_G_Y, DPG, id), \
|
||||
SRI(DPG_COLOUR_R_CR, DPG, id), \
|
||||
|
@ -53,6 +54,7 @@
|
|||
uint32_t FMT_422_CONTROL; \
|
||||
uint32_t DPG_CONTROL; \
|
||||
uint32_t DPG_DIMENSIONS; \
|
||||
uint32_t DPG_OFFSET_SEGMENT; \
|
||||
uint32_t DPG_COLOUR_B_CB; \
|
||||
uint32_t DPG_COLOUR_G_Y; \
|
||||
uint32_t DPG_COLOUR_R_CR; \
|
||||
|
@ -68,6 +70,8 @@
|
|||
OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
|
||||
OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
|
||||
|
@ -97,6 +101,8 @@
|
|||
type DPG_HRES; \
|
||||
type DPG_ACTIVE_WIDTH; \
|
||||
type DPG_ACTIVE_HEIGHT; \
|
||||
type DPG_X_OFFSET; \
|
||||
type DPG_SEGMENT_WIDTH; \
|
||||
type DPG_COLOUR0_R_CR; \
|
||||
type DPG_COLOUR1_R_CR; \
|
||||
type DPG_COLOUR0_B_CB; \
|
||||
|
@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator(
|
|||
enum dc_color_depth color_depth,
|
||||
const struct tg_color *solid_color,
|
||||
int width,
|
||||
int height);
|
||||
int height,
|
||||
int offset);
|
||||
|
||||
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
|
||||
|
||||
|
|
|
@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
|
|||
.xfc_supported = true,
|
||||
.xfc_fill_bw_overhead_percent = 10.0,
|
||||
.xfc_fill_constant_bytes = 0,
|
||||
.number_of_cursors = 1,
|
||||
};
|
||||
|
||||
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
|
||||
|
@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
|
|||
.xfc_supported = true,
|
||||
.xfc_fill_bw_overhead_percent = 10.0,
|
||||
.xfc_fill_constant_bytes = 0,
|
||||
.ptoi_supported = 0
|
||||
.ptoi_supported = 0,
|
||||
.number_of_cursors = 1,
|
||||
};
|
||||
|
||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
||||
|
@ -335,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
|||
.use_urgent_burst_bw = 0
|
||||
};
|
||||
|
||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
|
||||
.clock_limits = {
|
||||
{
|
||||
.state = 0,
|
||||
.dcfclk_mhz = 560.0,
|
||||
.fabricclk_mhz = 560.0,
|
||||
.dispclk_mhz = 513.0,
|
||||
.dppclk_mhz = 513.0,
|
||||
.phyclk_mhz = 540.0,
|
||||
.socclk_mhz = 560.0,
|
||||
.dscclk_mhz = 171.0,
|
||||
.dram_speed_mts = 8960.0,
|
||||
},
|
||||
{
|
||||
.state = 1,
|
||||
.dcfclk_mhz = 694.0,
|
||||
.fabricclk_mhz = 694.0,
|
||||
.dispclk_mhz = 642.0,
|
||||
.dppclk_mhz = 642.0,
|
||||
.phyclk_mhz = 600.0,
|
||||
.socclk_mhz = 694.0,
|
||||
.dscclk_mhz = 214.0,
|
||||
.dram_speed_mts = 11104.0,
|
||||
},
|
||||
{
|
||||
.state = 2,
|
||||
.dcfclk_mhz = 875.0,
|
||||
.fabricclk_mhz = 875.0,
|
||||
.dispclk_mhz = 734.0,
|
||||
.dppclk_mhz = 734.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 875.0,
|
||||
.dscclk_mhz = 245.0,
|
||||
.dram_speed_mts = 14000.0,
|
||||
},
|
||||
{
|
||||
.state = 3,
|
||||
.dcfclk_mhz = 1000.0,
|
||||
.fabricclk_mhz = 1000.0,
|
||||
.dispclk_mhz = 1100.0,
|
||||
.dppclk_mhz = 1100.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 1000.0,
|
||||
.dscclk_mhz = 367.0,
|
||||
.dram_speed_mts = 16000.0,
|
||||
},
|
||||
{
|
||||
.state = 4,
|
||||
.dcfclk_mhz = 1200.0,
|
||||
.fabricclk_mhz = 1200.0,
|
||||
.dispclk_mhz = 1284.0,
|
||||
.dppclk_mhz = 1284.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 1200.0,
|
||||
.dscclk_mhz = 428.0,
|
||||
.dram_speed_mts = 16000.0,
|
||||
},
|
||||
/*Extra state, no dispclk ramping*/
|
||||
{
|
||||
.state = 5,
|
||||
.dcfclk_mhz = 1200.0,
|
||||
.fabricclk_mhz = 1200.0,
|
||||
.dispclk_mhz = 1284.0,
|
||||
.dppclk_mhz = 1284.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 1200.0,
|
||||
.dscclk_mhz = 428.0,
|
||||
.dram_speed_mts = 16000.0,
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 8.6,
|
||||
.sr_enter_plus_exit_time_us = 10.9,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
.urgent_latency_vm_data_only_us = 4.0,
|
||||
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
|
||||
.max_avg_sdp_bw_use_normal_percent = 40.0,
|
||||
.max_avg_dram_bw_use_normal_percent = 40.0,
|
||||
.writeback_latency_us = 12.0,
|
||||
.ideal_dram_bw_after_urgent_percent = 40.0,
|
||||
.max_request_size_bytes = 256,
|
||||
.dram_channel_width_bytes = 2,
|
||||
.fabric_datapath_to_dcn_data_return_bytes = 64,
|
||||
.dcn_downspread_percent = 0.5,
|
||||
.downspread_percent = 0.38,
|
||||
.dram_page_open_time_ns = 50.0,
|
||||
.dram_rw_turnaround_time_ns = 17.5,
|
||||
.dram_return_buffer_per_channel_bytes = 8192,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 131,
|
||||
.urgent_out_of_order_return_per_channel_bytes = 256,
|
||||
.channel_interleave_bytes = 256,
|
||||
.num_banks = 8,
|
||||
.num_chans = 8,
|
||||
.vmm_page_size_bytes = 4096,
|
||||
.dram_clock_change_latency_us = 404.0,
|
||||
.dummy_pstate_latency_us = 5.0,
|
||||
.writeback_dram_clock_change_latency_us = 23.0,
|
||||
.return_bus_width_bytes = 64,
|
||||
.dispclk_dppclk_vco_speed_mhz = 3850,
|
||||
.xfc_bus_transport_time_us = 20,
|
||||
.xfc_xbuf_latency_tolerance_us = 4,
|
||||
.use_urgent_burst_bw = 0
|
||||
};
|
||||
|
||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
|
||||
|
||||
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
|
||||
|
@ -1143,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.max_hdmi_pixel_clock = 600000,
|
||||
.hdmi_ycbcr420_supported = true,
|
||||
.dp_ycbcr420_supported = true,
|
||||
.fec_supported = true,
|
||||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_HBR3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
|
@ -2041,14 +2155,17 @@ int dcn20_populate_dml_pipes_from_context(
|
|||
/* todo: default max for now, until there is logic reflecting this in dc*/
|
||||
pipes[pipe_cnt].dout.output_bpc = 12;
|
||||
/*
|
||||
* Use max cursor settings for calculations to minimize
|
||||
* For graphic plane, cursor number is 1, nv12 is 0
|
||||
* bw calculations due to cursor on/off
|
||||
*/
|
||||
pipes[pipe_cnt].pipe.src.num_cursors = 2;
|
||||
if (res_ctx->pipe_ctx[i].plane_state &&
|
||||
res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
|
||||
pipes[pipe_cnt].pipe.src.num_cursors = 0;
|
||||
else
|
||||
pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
|
||||
|
||||
pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
|
||||
pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
|
||||
pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
|
||||
pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
|
||||
|
||||
if (!res_ctx->pipe_ctx[i].plane_state) {
|
||||
pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
|
||||
|
@ -2298,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
|
|||
+ stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
|
||||
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
|
||||
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
|
||||
|
||||
|
@ -3026,7 +3144,7 @@ static struct dc_cap_funcs cap_funcs = {
|
|||
};
|
||||
|
||||
|
||||
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
|
||||
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
|
||||
{
|
||||
enum dc_status result = DC_OK;
|
||||
|
||||
|
@ -3052,7 +3170,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
|
|||
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
|
||||
.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
|
||||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
|
||||
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
|
||||
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
|
||||
|
@ -3290,6 +3408,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
|
|||
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
|
||||
uint32_t hw_internal_rev)
|
||||
{
|
||||
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
|
||||
return &dcn2_0_nv14_soc;
|
||||
|
||||
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
|
||||
return &dcn2_0_nv12_soc;
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
|
|||
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
|
||||
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
|
||||
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
|
||||
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
|
||||
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
|
||||
|
||||
void dcn20_patch_bounding_box(
|
||||
struct dc *dc,
|
||||
|
|
|
@ -156,7 +156,8 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
|
|||
.xfc_supported = false,
|
||||
.xfc_fill_bw_overhead_percent = 10.0,
|
||||
.xfc_fill_constant_bytes = 0,
|
||||
.ptoi_supported = 0
|
||||
.ptoi_supported = 0,
|
||||
.number_of_cursors = 1,
|
||||
};
|
||||
|
||||
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
|
@ -1589,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.max_hdmi_pixel_clock = 600000,
|
||||
.hdmi_ycbcr420_supported = true,
|
||||
.dp_ycbcr420_supported = true,
|
||||
.fec_supported = true,
|
||||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_HBR3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
|
@ -1729,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context(
|
|||
return pipe_cnt;
|
||||
}
|
||||
|
||||
enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
|
||||
{
|
||||
enum dc_status result = DC_OK;
|
||||
|
||||
if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
|
||||
plane_state->dcc.enable = 1;
|
||||
/* align to our worst case block width */
|
||||
plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
|
||||
}
|
||||
result = dcn20_patch_unknown_plane_state(plane_state);
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct resource_funcs dcn21_res_pool_funcs = {
|
||||
.destroy = dcn21_destroy_resource_pool,
|
||||
.link_enc_create = dcn21_link_encoder_create,
|
||||
|
@ -1738,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
|
|||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
|
||||
.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
|
||||
.patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
|
||||
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
|
||||
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
|
||||
.update_bw_bounding_box = update_bw_bounding_box
|
||||
|
@ -1785,6 +1800,7 @@ static bool dcn21_resource_construct(
|
|||
dc->caps.force_dp_tps4_for_cp2520 = true;
|
||||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.is_apu = true;
|
||||
|
||||
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
|
||||
dc->debug = debug_defaults_drv;
|
||||
|
@ -1848,7 +1864,7 @@ static bool dcn21_resource_construct(
|
|||
goto create_fail;
|
||||
}
|
||||
|
||||
if (dc->debug.psr_on_dmub) {
|
||||
if (dc->config.psr_on_dmub) {
|
||||
pool->base.psr = dmub_psr_create(ctx);
|
||||
|
||||
if (pool->base.psr == NULL) {
|
||||
|
|
|
@ -32,6 +32,7 @@ struct cp_psp_stream_config {
|
|||
uint8_t otg_inst;
|
||||
uint8_t link_enc_inst;
|
||||
uint8_t stream_enc_inst;
|
||||
uint8_t mst_supported;
|
||||
void *dm_stream_ctx;
|
||||
bool dpms_off;
|
||||
};
|
||||
|
|
|
@ -69,6 +69,7 @@ struct _vcs_dpi_voltage_scaling_st {
|
|||
|
||||
struct _vcs_dpi_soc_bounding_box_st {
|
||||
struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
|
||||
unsigned int num_states;
|
||||
double sr_exit_time_us;
|
||||
double sr_enter_plus_exit_time_us;
|
||||
double urgent_latency_us;
|
||||
|
@ -111,7 +112,6 @@ struct _vcs_dpi_soc_bounding_box_st {
|
|||
double xfc_bus_transport_time_us;
|
||||
double xfc_xbuf_latency_tolerance_us;
|
||||
int use_urgent_burst_bw;
|
||||
unsigned int num_states;
|
||||
double min_dcfclk;
|
||||
bool do_urgent_latency_adjustment;
|
||||
double urgent_latency_adjustment_fabric_clock_component_us;
|
||||
|
@ -204,6 +204,7 @@ struct _vcs_dpi_ip_params_st {
|
|||
unsigned int LineBufferFixedBpp;
|
||||
unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
|
||||
unsigned int bug_forcing_LC_req_same_size_fixed;
|
||||
unsigned int number_of_cursors;
|
||||
};
|
||||
|
||||
struct _vcs_dpi_display_xfc_params_st {
|
||||
|
|
|
@ -124,7 +124,7 @@ struct resource_funcs {
|
|||
struct dc *dc,
|
||||
struct dc_state *new_ctx,
|
||||
struct dc_stream_state *stream);
|
||||
enum dc_status (*get_default_swizzle_mode)(
|
||||
enum dc_status (*patch_unknown_plane_state)(
|
||||
struct dc_plane_state *plane_state);
|
||||
|
||||
struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
|
||||
|
|
|
@ -85,6 +85,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable);
|
|||
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
|
||||
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
#endif /* __DC_LINK_DP_H__ */
|
||||
|
|
|
@ -39,6 +39,7 @@ struct dsc_config {
|
|||
uint32_t pic_height;
|
||||
enum dc_pixel_encoding pixel_encoding;
|
||||
enum dc_color_depth color_depth; /* Bits per component */
|
||||
bool is_odm;
|
||||
struct dc_dsc_config dc_dsc_cfg;
|
||||
};
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ struct encoder_feature_support {
|
|||
unsigned int max_hdmi_pixel_clock;
|
||||
bool hdmi_ycbcr420_supported;
|
||||
bool dp_ycbcr420_supported;
|
||||
bool fec_supported;
|
||||
};
|
||||
|
||||
union dpcd_psr_configuration {
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче