drm fixes for 5.17-rc1:
drivers fixes: - i915 fixes for ttm backend + one pm wakelock fix - amdgpu fixes, fairly big pile of small things all over. Note this doesn't yet containe the fixed version of the otg sync patch that blew up - small driver fixes: meson, sun4i, vga16fb probe fix drm core fixes: - cma-buf heap locking - ttm compilation - self refresh helper state check - wrong error message in atomic helpers - mipi-dbi buffer mapping -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEb4nG6jLu8Y5XI+PfTA9ye/CYqnEFAmHhrxUACgkQTA9ye/CY qnEowxAAgBPwGEobRGMbR3Me98vEKvcWqSxBe/k1VC4LhO5DrvbG5iW9cuxCJZM2 wlGlGAtU7C7pcCP5Xp1UlMqZ5a0rSVhqMPPkMKO9+7033ofSlAQatnMI1EENH6Hn BkhXwTyuOBSN6zqskg8FKqzF+VPTt5ZV2U5qJzQweP/wFtZPAKI4tWE4oKiHactH fJHnAi7T6ytF6a7J21BsSEluk4z7BjmcmFF0tW6iuq7Y6TXDFXFq9QFDR041b2rI GYDUXl2mebp/L+2M3sPYuMiIiyJ8enh7crNIdmi+EstmzRADa7RMjnY3j2tJg/7M pqnuJZAVcpkCurb7NMr3ycmrxnhfUsZfbuXvm+k5yJYfQCaGNiKy1ObsFWH9zBDz XMuxcE+csSaX/7rjoyXrL2ZTRPXnVwJNJ8x1CuKn3giLxMSqnPnDMjyHmNLB8qa1 R0wbPQbdx5+jWgs/ngUGFNo4vFBnNmqQP4G3LaWJ/Ku5cSrEM+Jt9GJOw5jjVgun gaOlKlUpMBlKmXOwkvhRW2AwHRcL7lrBuIw0oFOThdMzkSNlKSNBMpAkgvD/9C7g IDtJgA7a3MqzLjhPLmhUB3rFyXz5dg5rNZhoH8z4DFiJVpTKYYA5/UoU/RTXZGqW yFdrBkFmNJeKTZZAe+e/4OP/dm1vKVEKK6Ko/M9CrELzBKSussg= =h74X -----END PGP SIGNATURE----- Merge tag 'drm-next-2022-01-14' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Daniel Vetter: "drivers fixes: - i915 fixes for ttm backend + one pm wakelock fix - amdgpu fixes, fairly big pile of small things all over. Note this doesn't yet containe the fixed version of the otg sync patch that blew up - small driver fixes: meson, sun4i, vga16fb probe fix drm core fixes: - cma-buf heap locking - ttm compilation - self refresh helper state check - wrong error message in atomic helpers - mipi-dbi buffer mapping" * tag 'drm-next-2022-01-14' of git://anongit.freedesktop.org/drm/drm: (49 commits) drm/mipi-dbi: Fix source-buffer address in mipi_dbi_buf_copy drm: fix error found in some cases after the patch d1af5cd86997 drm/ttm: fix compilation on ARCH=um dma-buf: cma_heap: Fix mutex locking section video: vga16fb: Only probe for EGA and VGA 16 color graphic cards drm/amdkfd: Fix ASIC name typos drm/amdkfd: Fix DQM asserts on Hawaii drm/amdgpu: Use correct VIEWPORT_DIMENSION for DCN2 drm/amd/pm: only send GmiPwrDnControl msg on master die (v3) drm/amdgpu: use spin_lock_irqsave to avoid deadlock by local interrupt drm/amdgpu: not return error on the init_apu_flags drm/amdkfd: Use prange->update_list head for remove_list drm/amdkfd: Use prange->list head for insert_list drm/amdkfd: make SPDX License expression more sound drm/amdkfd: Check for null pointer after calling kmemdup drm/amd/display: invalid parameter check in dmub_hpd_callback Revert "drm/amdgpu: Don't inherit GEM object VMAs in child process" drm/amd/display: reset dcn31 SMU mailbox on failures drm/amdkfd: use default_groups in kobj_type drm/amdgpu: use default_groups in kobj_type ...
This commit is contained in:
Коммит
59d41458f1
|
@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
|
|||
maintainers:
|
||||
- Neil Armstrong <narmstrong@baylibre.com>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/sound/name-prefix.yaml#
|
||||
|
||||
description: |
|
||||
The Amlogic Meson Synopsys Designware Integration is composed of
|
||||
- A Synopsys DesignWare HDMI Controller IP
|
||||
|
@ -99,6 +102,8 @@ properties:
|
|||
"#sound-dai-cells":
|
||||
const: 0
|
||||
|
||||
sound-name-prefix: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -78,6 +78,10 @@ properties:
|
|||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
amlogic,canvas:
|
||||
description: should point to a canvas provider node
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
description: phandle to the associated power domain
|
||||
|
@ -106,6 +110,7 @@ required:
|
|||
- port@1
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
- amlogic,canvas
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -118,6 +123,7 @@ examples:
|
|||
interrupts = <3>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
amlogic,canvas = <&canvas>;
|
||||
|
||||
/* CVBS VDAC output port */
|
||||
port@0 {
|
||||
|
|
|
@ -124,10 +124,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
|||
struct cma_heap_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
|
||||
if (buffer->vmap_cnt)
|
||||
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
if (!a->mapped)
|
||||
continue;
|
||||
|
@ -144,10 +145,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
|||
struct cma_heap_buffer *buffer = dmabuf->priv;
|
||||
struct dma_heap_attachment *a;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
|
||||
if (buffer->vmap_cnt)
|
||||
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
list_for_each_entry(a, &buffer->attachments, list) {
|
||||
if (!a->mapped)
|
||||
continue;
|
||||
|
|
|
@ -514,13 +514,6 @@ out_put:
|
|||
return r;
|
||||
}
|
||||
|
||||
uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
|
||||
return amdgpu_vram_mgr_usage(vram_man);
|
||||
}
|
||||
|
||||
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
|
||||
struct amdgpu_device *src)
|
||||
{
|
||||
|
|
|
@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
|
|||
uint64_t *bo_size, void *metadata_buffer,
|
||||
size_t buffer_size, uint32_t *metadata_size,
|
||||
uint32_t *flags);
|
||||
uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev);
|
||||
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
|
||||
struct amdgpu_device *src);
|
||||
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
|
||||
|
|
|
@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
|||
{
|
||||
s64 time_us, increment_us;
|
||||
u64 free_vram, total_vram, used_vram;
|
||||
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
|
||||
* throttling.
|
||||
*
|
||||
|
@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
|
||||
used_vram = amdgpu_vram_mgr_usage(vram_man);
|
||||
used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
|
||||
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
||||
|
||||
spin_lock(&adev->mm_stats.lock);
|
||||
|
@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
|
|||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
|
||||
u64 total_vis_vram = adev->gmc.visible_vram_size;
|
||||
u64 used_vis_vram =
|
||||
amdgpu_vram_mgr_vis_usage(vram_man);
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
|
||||
|
||||
if (used_vis_vram < total_vis_vram) {
|
||||
u64 free_vis_vram = total_vis_vram - used_vis_vram;
|
||||
|
|
|
@ -552,7 +552,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
|
||||
* amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
|
||||
*
|
||||
* this function is invoked only the debugfs register access
|
||||
*/
|
||||
|
@ -567,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
|
|||
adev->gfx.rlc.funcs->is_rlcg_access_range) {
|
||||
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
|
||||
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
|
||||
} else if ((reg * 4) >= adev->rmmio_size) {
|
||||
adev->pcie_wreg(adev, reg * 4, v);
|
||||
} else {
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
}
|
||||
|
@ -1448,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
|
|||
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3496,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
mutex_init(&adev->psp.mutex);
|
||||
mutex_init(&adev->notifier_lock);
|
||||
|
||||
r = amdgpu_device_init_apu_flags(adev);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_device_init_apu_flags(adev);
|
||||
|
||||
r = amdgpu_device_check_arguments(adev);
|
||||
if (r)
|
||||
|
@ -3833,6 +3833,7 @@ failed:
|
|||
|
||||
static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
/* Clear all CPU mappings pointing to this device */
|
||||
unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
|
||||
|
||||
|
@ -3913,6 +3914,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||
|
||||
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
{
|
||||
int idx;
|
||||
|
||||
amdgpu_fence_driver_sw_fini(adev);
|
||||
amdgpu_device_ip_fini(adev);
|
||||
release_firmware(adev->firmware.gpu_info_fw);
|
||||
|
@ -3937,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
|||
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
|
||||
vga_client_unregister(adev->pdev);
|
||||
|
||||
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
|
||||
|
||||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
amdgpu_device_doorbell_fini(adev);
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PERF_EVENTS))
|
||||
amdgpu_pmu_fini(adev);
|
||||
if (adev->mman.discovery_bin)
|
||||
|
@ -3957,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
|
||||
{
|
||||
/* No need to evict vram on APUs for suspend to ram */
|
||||
if (adev->in_s3 && (adev->flags & AMD_IS_APU))
|
||||
/* No need to evict vram on APUs for suspend to ram or s2idle */
|
||||
if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
|
||||
return;
|
||||
|
||||
if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
|
||||
|
@ -4005,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
if (!adev->in_s0ix)
|
||||
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
|
||||
|
||||
/* First evict vram memory */
|
||||
amdgpu_device_evict_resources(adev);
|
||||
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
amdgpu_device_ip_suspend_phase2(adev);
|
||||
/* This second call to evict device resources is to evict
|
||||
* the gart page table using the CPU.
|
||||
*/
|
||||
amdgpu_device_evict_resources(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4359,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|||
goto error;
|
||||
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
||||
amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
|
||||
|
||||
r = amdgpu_device_fw_loading(adev);
|
||||
if (r)
|
||||
|
@ -4680,10 +4684,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
|||
amdgpu_inc_vram_lost(tmp_adev);
|
||||
}
|
||||
|
||||
r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_device_fw_loading(tmp_adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -550,7 +550,8 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||
}
|
||||
/* some IP discovery tables on Navy Flounder don't have this set correctly */
|
||||
if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
|
||||
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
|
||||
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)) &&
|
||||
(adev->pdev->revision != 0xFF))
|
||||
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
|
||||
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
||||
|
|
|
@ -2194,9 +2194,9 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
|||
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
adev->in_s3 = true;
|
||||
else
|
||||
adev->in_s3 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_s3 = false;
|
||||
if (r)
|
||||
return r;
|
||||
if (!adev->in_s0ix)
|
||||
|
@ -2217,6 +2217,8 @@ static int amdgpu_pmops_resume(struct device *dev)
|
|||
r = amdgpu_device_resume(drm_dev, true);
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = false;
|
||||
else
|
||||
adev->in_s3 = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
|
|||
*/
|
||||
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
if (adev->gart.bo != NULL)
|
||||
return 0;
|
||||
|
||||
if (adev->gart.bo == NULL) {
|
||||
struct amdgpu_bo_param bp;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
bp.size = adev->gart.table_size;
|
||||
bp.byte_align = PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = NULL;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
|
||||
r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_vram_pin - pin gart page table in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Pin the GART page table in vram so it will not be moved
|
||||
* by the memory manager (pcie r4xx, r5xx+). These asics require the
|
||||
* gart table to be in video memory.
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gart.bo, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gart.bo);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
|
||||
if (r)
|
||||
amdgpu_bo_unpin(adev->gart.bo);
|
||||
amdgpu_bo_unreserve(adev->gart.bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gart_table_vram_unpin - unpin gart page table in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Unpin the GART page table in vram (pcie r4xx, r5xx+).
|
||||
* These asics require the gart table to be in video memory.
|
||||
*/
|
||||
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gart.bo == NULL) {
|
||||
return;
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->gart.bo, true);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(adev->gart.bo);
|
||||
amdgpu_bo_unpin(adev->gart.bo);
|
||||
amdgpu_bo_unreserve(adev->gart.bo);
|
||||
adev->gart.ptr = NULL;
|
||||
}
|
||||
return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
|
||||
NULL, (void *)&adev->gart.ptr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gart.bo == NULL) {
|
||||
return;
|
||||
}
|
||||
amdgpu_bo_unref(&adev->gart.bo);
|
||||
adev->gart.ptr = NULL;
|
||||
amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -264,9 +264,6 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
|
|||
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
if (bo->kfd_bo)
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
|
||||
return drm_gem_ttm_mmap(obj, vma);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
|
||||
return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
|
||||
return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
|
||||
|
@ -206,30 +204,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
|
|||
/**
|
||||
* amdgpu_gtt_mgr_usage - return usage of GTT domain
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mgr: amdgpu_gtt_mgr pointer
|
||||
*
|
||||
* Return how many bytes are used in the GTT domain
|
||||
*/
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
|
||||
|
||||
return atomic64_read(&mgr->used) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gtt_mgr_recover - re-init gart
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mgr: amdgpu_gtt_mgr pointer
|
||||
*
|
||||
* Re-init the gart for each known BO in the GTT.
|
||||
*/
|
||||
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
|
||||
int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_gtt_node *node;
|
||||
struct drm_mm_node *mm_node;
|
||||
struct amdgpu_device *adev;
|
||||
int r = 0;
|
||||
|
||||
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
|
||||
|
|
|
@ -678,13 +678,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VRAM_USAGE:
|
||||
ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
|
||||
ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_VIS_VRAM_USAGE:
|
||||
ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
|
||||
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_GTT_USAGE:
|
||||
ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
|
||||
ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
|
||||
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_GDS_CONFIG: {
|
||||
struct drm_amdgpu_info_gds gds_info;
|
||||
|
@ -715,8 +715,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
}
|
||||
case AMDGPU_INFO_MEMORY: {
|
||||
struct drm_amdgpu_memory_info mem;
|
||||
struct ttm_resource_manager *vram_man =
|
||||
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
struct ttm_resource_manager *gtt_man =
|
||||
ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
|
||||
memset(&mem, 0, sizeof(mem));
|
||||
|
@ -725,7 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
atomic64_read(&adev->vram_pin_size) -
|
||||
AMDGPU_VM_RESERVED_VRAM;
|
||||
mem.vram.heap_usage =
|
||||
amdgpu_vram_mgr_usage(vram_man);
|
||||
amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
|
||||
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
|
||||
|
||||
mem.cpu_accessible_vram.total_heap_size =
|
||||
|
@ -735,7 +733,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
atomic64_read(&adev->visible_pin_size),
|
||||
mem.vram.usable_heap_size);
|
||||
mem.cpu_accessible_vram.heap_usage =
|
||||
amdgpu_vram_mgr_vis_usage(vram_man);
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
|
||||
mem.cpu_accessible_vram.max_allocation =
|
||||
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
|
||||
|
||||
|
@ -744,7 +742,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
|
||||
atomic64_read(&adev->gart_pin_size);
|
||||
mem.gtt.heap_usage =
|
||||
amdgpu_gtt_mgr_usage(gtt_man);
|
||||
amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
|
||||
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
|
||||
|
||||
return copy_to_user(out, &mem,
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_cache.h>
|
||||
#include "amdgpu.h"
|
||||
|
@ -1061,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_bo_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int idx;
|
||||
|
||||
amdgpu_ttm_fini(adev);
|
||||
|
||||
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
|
||||
|
||||
if (!adev->gmc.xgmi.connected_to_cpu) {
|
||||
arch_phys_wc_del(adev->gmc.vram_mtrr);
|
||||
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
|
||||
}
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1592,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
|
|||
/* Let IP handle its data, maybe we need get the output
|
||||
* from the callback to udpate the error type/count, etc
|
||||
*/
|
||||
memset(&err_data, 0, sizeof(err_data));
|
||||
ret = data->cb(obj->adev, &err_data, &entry);
|
||||
/* ue will trigger an interrupt, and in that case
|
||||
* we need do a reset to recovery the whole system.
|
||||
|
@ -1838,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
|
|||
.size = AMDGPU_GPU_PAGE_SIZE,
|
||||
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
|
||||
};
|
||||
status = amdgpu_vram_mgr_query_page_status(
|
||||
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
|
||||
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
|
||||
data->bps[i].retired_page);
|
||||
if (status == -EBUSY)
|
||||
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
|
||||
|
@ -1940,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
amdgpu_vram_mgr_reserve_range(
|
||||
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
|
||||
amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
|
||||
bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
|
||||
AMDGPU_GPU_PAGE_SIZE);
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <linux/sizes.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
@ -1804,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int idx;
|
||||
if (!adev->mman.initialized)
|
||||
return;
|
||||
|
||||
|
@ -1818,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
|||
NULL, NULL);
|
||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||
|
||||
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
|
||||
|
||||
if (adev->mman.aper_base_kaddr)
|
||||
iounmap(adev->mman.aper_base_kaddr);
|
||||
adev->mman.aper_base_kaddr = NULL;
|
||||
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
amdgpu_vram_mgr_fini(adev);
|
||||
amdgpu_gtt_mgr_fini(adev);
|
||||
amdgpu_preempt_mgr_fini(adev);
|
||||
|
|
|
@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
|
|||
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
|
||||
|
||||
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
|
||||
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
|
||||
int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
|
||||
|
||||
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
|
||||
|
||||
|
@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
|||
void amdgpu_vram_mgr_free_sgt(struct device *dev,
|
||||
enum dma_data_direction dir,
|
||||
struct sg_table *sgt);
|
||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
|
||||
int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
|
||||
uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
|
||||
int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start, uint64_t size);
|
||||
int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
|
||||
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start);
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
|
|
|
@ -553,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
|
|||
static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amd_sriov_msg_vf2pf_info *vf2pf_info;
|
||||
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
|
||||
vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
|
||||
|
||||
|
@ -576,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
|||
vf2pf_info->driver_cert = 0;
|
||||
vf2pf_info->os_info.all = 0;
|
||||
|
||||
vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
|
||||
vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
|
||||
vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
|
||||
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
|
||||
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
|
||||
|
||||
|
@ -727,6 +726,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
|
|||
vi_set_virt_ops(adev);
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
soc15_set_virt_ops(adev);
|
||||
/* send a dummy GPU_INIT_DATA request to host on vega10 */
|
||||
amdgpu_virt_request_init_data(adev);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_ARCTURUS:
|
||||
case CHIP_ALDEBARAN:
|
||||
|
|
|
@ -144,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
unsigned long flags;
|
||||
if (crtc->state->event) {
|
||||
spin_lock(&crtc->dev->event_lock);
|
||||
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
||||
|
||||
if (drm_crtc_vblank_get(crtc) != 0)
|
||||
drm_crtc_send_vblank_event(crtc, crtc->state->event);
|
||||
else
|
||||
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
|
||||
|
||||
spin_unlock(&crtc->dev->event_lock);
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
|
||||
crtc->state->event = NULL;
|
||||
}
|
||||
|
|
|
@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
|
|||
/**
|
||||
* amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mgr: amdgpu_vram_mgr pointer
|
||||
* @start: start address of the range in VRAM
|
||||
* @size: size of the range
|
||||
*
|
||||
* Reserve memory from start addess with the specified size in VRAM
|
||||
* Reserve memory from start address with the specified size in VRAM
|
||||
*/
|
||||
int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
|
||||
int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start, uint64_t size)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_vram_reservation *rsv;
|
||||
|
||||
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
|
||||
|
@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
|
|||
|
||||
spin_lock(&mgr->lock);
|
||||
list_add_tail(&mgr->reservations_pending, &rsv->node);
|
||||
amdgpu_vram_mgr_do_reserve(man);
|
||||
amdgpu_vram_mgr_do_reserve(&mgr->manager);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
return 0;
|
||||
|
@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
|
|||
/**
|
||||
* amdgpu_vram_mgr_query_page_status - query the reservation status
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mgr: amdgpu_vram_mgr pointer
|
||||
* @start: start address of a page in VRAM
|
||||
*
|
||||
* Returns:
|
||||
|
@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
|
|||
* 0: the page has been reserved
|
||||
* -ENOENT: the input page is not a reservation
|
||||
*/
|
||||
int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
|
||||
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
uint64_t start)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_vram_reservation *rsv;
|
||||
int ret;
|
||||
|
||||
|
@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
|
|||
/**
|
||||
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mgr: amdgpu_vram_mgr pointer
|
||||
*
|
||||
* Returns how many bytes are used in this domain.
|
||||
*/
|
||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
|
||||
uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
|
||||
return atomic64_read(&mgr->usage);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
|
||||
*
|
||||
* @man: TTM memory type manager
|
||||
* @mgr: amdgpu_vram_mgr pointer
|
||||
*
|
||||
* Returns how many bytes are used in the visible part of VRAM
|
||||
*/
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
|
||||
return atomic64_read(&mgr->vis_usage);
|
||||
}
|
||||
|
||||
|
@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
|
|||
spin_unlock(&mgr->lock);
|
||||
|
||||
drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
|
||||
man->size, amdgpu_vram_mgr_usage(man) >> 20,
|
||||
amdgpu_vram_mgr_vis_usage(man) >> 20);
|
||||
man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
|
||||
amdgpu_vram_mgr_vis_usage(mgr) >> 20);
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
|
||||
|
|
|
@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = {
|
|||
&amdgpu_xgmi_hive_id,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
|
||||
|
||||
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
|
@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
|
|||
struct kobj_type amdgpu_xgmi_hive_type = {
|
||||
.release = amdgpu_xgmi_hive_release,
|
||||
.sysfs_ops = &amdgpu_xgmi_hive_ops,
|
||||
.default_attrs = amdgpu_xgmi_hive_attrs,
|
||||
.default_groups = amdgpu_xgmi_hive_groups,
|
||||
};
|
||||
|
||||
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
|
||||
|
|
|
@ -989,7 +989,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
|
|||
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
|
||||
goto skip_pin_bo;
|
||||
|
||||
r = amdgpu_gart_table_vram_pin(adev);
|
||||
r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1060,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
|
|||
{
|
||||
adev->gfxhub.funcs->gart_disable(adev);
|
||||
adev->mmhub.funcs->gart_disable(adev);
|
||||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
static int gmc_v10_0_hw_fini(void *handle)
|
||||
|
|
|
@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
|||
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = amdgpu_gart_table_vram_pin(adev);
|
||||
r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
|
|||
WREG32(mmVM_L2_CNTL3,
|
||||
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
|
||||
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
|
||||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
|
||||
|
|
|
@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
|||
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = amdgpu_gart_table_vram_pin(adev);
|
||||
r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
|
|||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
|
||||
WREG32(mmVM_L2_CNTL, tmp);
|
||||
WREG32(mmVM_L2_CNTL2, 0);
|
||||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -844,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
|||
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = amdgpu_gart_table_vram_pin(adev);
|
||||
r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -999,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
|
|||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
|
||||
WREG32(mmVM_L2_CNTL, tmp);
|
||||
WREG32(mmVM_L2_CNTL2, 0);
|
||||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -72,6 +72,9 @@
|
|||
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
|
||||
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
|
||||
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
|
||||
|
||||
|
||||
static const char *gfxhub_client_ids[] = {
|
||||
"CB",
|
||||
|
@ -1134,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
|
||||
unsigned size;
|
||||
|
||||
/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
|
||||
|
||||
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
|
||||
size = AMDGPU_VBIOS_VGA_ALLOCATION;
|
||||
} else {
|
||||
|
@ -1142,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
switch (adev->ip_versions[DCE_HWIP][0]) {
|
||||
case IP_VERSION(1, 0, 0):
|
||||
case IP_VERSION(1, 0, 1):
|
||||
case IP_VERSION(2, 1, 0):
|
||||
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
|
||||
size = (REG_GET_FIELD(viewport,
|
||||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
|
||||
|
@ -1150,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
|
||||
4);
|
||||
break;
|
||||
case IP_VERSION(2, 1, 0):
|
||||
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
|
||||
size = (REG_GET_FIELD(viewport,
|
||||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
|
||||
REG_GET_FIELD(viewport,
|
||||
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
|
||||
4);
|
||||
break;
|
||||
default:
|
||||
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
|
||||
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
|
||||
|
@ -1743,7 +1755,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
|
||||
goto skip_pin_bo;
|
||||
|
||||
r = amdgpu_gart_table_vram_pin(adev);
|
||||
r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1821,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
|
|||
{
|
||||
adev->gfxhub.funcs->gart_disable(adev);
|
||||
adev->mmhub.funcs->gart_disable(adev);
|
||||
amdgpu_gart_table_vram_unpin(adev);
|
||||
}
|
||||
|
||||
static int gmc_v9_0_hw_fini(void *handle)
|
||||
|
|
|
@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
|||
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
|
||||
}
|
||||
} else if (req == IDH_REQ_GPU_INIT_DATA){
|
||||
/* Dummy REQ_GPU_INIT_DATA handling */
|
||||
r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
|
||||
/* version set to 0 since dummy */
|
||||
adev->virt.req_init_data_ver = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -381,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
|
|||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||
}
|
||||
|
||||
static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
|
||||
{
|
||||
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
|
||||
}
|
||||
|
||||
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
||||
.req_full_gpu = xgpu_ai_request_full_gpu_access,
|
||||
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
|
||||
.reset_gpu = xgpu_ai_request_reset,
|
||||
.wait_reset = NULL,
|
||||
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
||||
.req_init_data = xgpu_ai_request_init_data,
|
||||
};
|
||||
|
|
|
@ -35,6 +35,7 @@ enum idh_request {
|
|||
IDH_REQ_GPU_FINI_ACCESS,
|
||||
IDH_REL_GPU_FINI_ACCESS,
|
||||
IDH_REQ_GPU_RESET_ACCESS,
|
||||
IDH_REQ_GPU_INIT_DATA,
|
||||
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_READY_TO_RESET = 201,
|
||||
|
@ -48,6 +49,7 @@ enum idh_event {
|
|||
IDH_SUCCESS,
|
||||
IDH_FAIL,
|
||||
IDH_QUERY_ALIVE,
|
||||
IDH_REQ_GPU_INIT_DATA_READY,
|
||||
|
||||
IDH_TEXT_MESSAGE = 255,
|
||||
};
|
||||
|
|
|
@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
|
|||
return -ENODEV;
|
||||
/* same everything but the other direction */
|
||||
props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
|
||||
if (!props2)
|
||||
return -ENOMEM;
|
||||
|
||||
props2->node_from = id_to;
|
||||
props2->node_to = id_from;
|
||||
props2->kobj = NULL;
|
||||
|
|
|
@ -68,20 +68,20 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
|
|||
case IP_VERSION(4, 0, 1):/* VEGA12 */
|
||||
case IP_VERSION(4, 1, 0):/* RAVEN */
|
||||
case IP_VERSION(4, 1, 1):/* RAVEN */
|
||||
case IP_VERSION(4, 1, 2):/* RENIOR */
|
||||
case IP_VERSION(4, 1, 2):/* RENOIR */
|
||||
case IP_VERSION(5, 2, 1):/* VANGOGH */
|
||||
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
|
||||
kfd->device_info.num_sdma_queues_per_engine = 2;
|
||||
break;
|
||||
case IP_VERSION(4, 2, 0):/* VEGA20 */
|
||||
case IP_VERSION(4, 2, 2):/* ARCTUTUS */
|
||||
case IP_VERSION(4, 2, 2):/* ARCTURUS */
|
||||
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
|
||||
case IP_VERSION(5, 0, 0):/* NAVI10 */
|
||||
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
|
||||
case IP_VERSION(5, 0, 2):/* NAVI14 */
|
||||
case IP_VERSION(5, 0, 5):/* NAVI12 */
|
||||
case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
|
||||
case IP_VERSION(5, 2, 2):/* NAVY_FLOUDER */
|
||||
case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
|
||||
case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
|
||||
case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
|
||||
kfd->device_info.num_sdma_queues_per_engine = 8;
|
||||
|
|
|
@ -1004,14 +1004,17 @@ static void uninitialize(struct device_queue_manager *dqm)
|
|||
|
||||
static int start_nocpsch(struct device_queue_manager *dqm)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
pr_info("SW scheduler is used");
|
||||
init_interrupts(dqm);
|
||||
|
||||
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
|
||||
return pm_init(&dqm->packet_mgr, dqm);
|
||||
dqm->sched_running = true;
|
||||
r = pm_init(&dqm->packet_mgr, dqm);
|
||||
if (!r)
|
||||
dqm->sched_running = true;
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int stop_nocpsch(struct device_queue_manager *dqm)
|
||||
|
|
|
@ -197,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
|
|||
*/
|
||||
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
|
||||
source_id == SOC15_INTSRC_SDMA_TRAP ||
|
||||
source_id == SOC15_INTSRC_SDMA_ECC ||
|
||||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
|
||||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
|
||||
((client_id == SOC15_IH_CLIENTID_VMC ||
|
||||
|
|
|
@ -461,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = {
|
|||
&attr_queue_gpuid,
|
||||
NULL
|
||||
};
|
||||
ATTRIBUTE_GROUPS(procfs_queue);
|
||||
|
||||
static const struct sysfs_ops procfs_queue_ops = {
|
||||
.show = kfd_procfs_queue_show,
|
||||
|
@ -468,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
|
|||
|
||||
static struct kobj_type procfs_queue_type = {
|
||||
.sysfs_ops = &procfs_queue_ops,
|
||||
.default_attrs = procfs_queue_attrs,
|
||||
.default_groups = procfs_queue_groups,
|
||||
};
|
||||
|
||||
static const struct sysfs_ops procfs_stats_ops = {
|
||||
|
|
|
@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange)
|
|||
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
|
||||
prange, prange->start, prange->last);
|
||||
|
||||
list_add_tail(&prange->list, &prange->svms->list);
|
||||
list_move_tail(&prange->list, &prange->svms->list);
|
||||
prange->it_node.start = prange->start;
|
||||
prange->it_node.last = prange->last;
|
||||
interval_tree_insert(&prange->it_node, &prange->svms->objects);
|
||||
|
@ -295,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
|
|||
prange->last = last;
|
||||
INIT_LIST_HEAD(&prange->list);
|
||||
INIT_LIST_HEAD(&prange->update_list);
|
||||
INIT_LIST_HEAD(&prange->remove_list);
|
||||
INIT_LIST_HEAD(&prange->insert_list);
|
||||
INIT_LIST_HEAD(&prange->svm_bo_list);
|
||||
INIT_LIST_HEAD(&prange->deferred_list);
|
||||
INIT_LIST_HEAD(&prange->child_list);
|
||||
|
@ -1018,7 +1016,7 @@ svm_range_split_tail(struct svm_range *prange,
|
|||
int r = svm_range_split(prange, prange->start, new_last, &tail);
|
||||
|
||||
if (!r)
|
||||
list_add(&tail->insert_list, insert_list);
|
||||
list_add(&tail->list, insert_list);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1030,7 +1028,7 @@ svm_range_split_head(struct svm_range *prange,
|
|||
int r = svm_range_split(prange, new_start, prange->last, &head);
|
||||
|
||||
if (!r)
|
||||
list_add(&head->insert_list, insert_list);
|
||||
list_add(&head->list, insert_list);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1898,8 +1896,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||
goto out;
|
||||
}
|
||||
|
||||
list_add(&old->remove_list, remove_list);
|
||||
list_add(&prange->insert_list, insert_list);
|
||||
list_add(&old->update_list, remove_list);
|
||||
list_add(&prange->list, insert_list);
|
||||
list_add(&prange->update_list, update_list);
|
||||
|
||||
if (node->start < start) {
|
||||
|
@ -1931,7 +1929,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||
goto out;
|
||||
}
|
||||
|
||||
list_add(&prange->insert_list, insert_list);
|
||||
list_add(&prange->list, insert_list);
|
||||
list_add(&prange->update_list, update_list);
|
||||
}
|
||||
|
||||
|
@ -1946,13 +1944,13 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
list_add(&prange->insert_list, insert_list);
|
||||
list_add(&prange->list, insert_list);
|
||||
list_add(&prange->update_list, update_list);
|
||||
}
|
||||
|
||||
out:
|
||||
if (r)
|
||||
list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
|
||||
list_for_each_entry_safe(prange, tmp, insert_list, list)
|
||||
svm_range_free(prange);
|
||||
|
||||
return r;
|
||||
|
@ -3236,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||
goto out;
|
||||
}
|
||||
/* Apply changes as a transaction */
|
||||
list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
|
||||
list_for_each_entry_safe(prange, next, &insert_list, list) {
|
||||
svm_range_add_to_svms(prange);
|
||||
svm_range_add_notifier_locked(mm, prange);
|
||||
}
|
||||
|
@ -3244,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
|||
svm_range_apply_attrs(p, prange, nattr, attrs);
|
||||
/* TODO: unmap ranges from GPU that lost access */
|
||||
}
|
||||
list_for_each_entry_safe(prange, next, &remove_list,
|
||||
remove_list) {
|
||||
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
|
||||
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
|
||||
prange->svms, prange, prange->start,
|
||||
prange->last);
|
||||
|
|
|
@ -76,8 +76,6 @@ struct svm_work_list_item {
|
|||
* aligned, page size is (last - start + 1)
|
||||
* @list: link list node, used to scan all ranges of svms
|
||||
* @update_list:link list node used to add to update_list
|
||||
* @remove_list:link list node used to add to remove list
|
||||
* @insert_list:link list node used to add to insert list
|
||||
* @mapping: bo_va mapping structure to create and update GPU page table
|
||||
* @npages: number of pages
|
||||
* @dma_addr: dma mapping address on each GPU for system memory physical page
|
||||
|
@ -113,8 +111,6 @@ struct svm_range {
|
|||
struct interval_tree_node it_node;
|
||||
struct list_head list;
|
||||
struct list_head update_list;
|
||||
struct list_head remove_list;
|
||||
struct list_head insert_list;
|
||||
uint64_t npages;
|
||||
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
|
||||
struct ttm_resource *ttm_res;
|
||||
|
|
|
@ -658,7 +658,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
|
|||
struct drm_connector_list_iter iter;
|
||||
struct dc_link *link;
|
||||
uint8_t link_index = 0;
|
||||
struct drm_device *dev = adev->dm.ddev;
|
||||
struct drm_device *dev;
|
||||
|
||||
if (adev == NULL)
|
||||
return;
|
||||
|
@ -675,6 +675,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
|
|||
|
||||
link_index = notify->link_index;
|
||||
link = adev->dm.dc->links[link_index];
|
||||
dev = adev->dm.ddev;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
@ -1161,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dm_dmub_hw_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
|
||||
enum dmub_status status;
|
||||
bool init;
|
||||
|
||||
if (!dmub_srv) {
|
||||
/* DMUB isn't supported on the ASIC. */
|
||||
return;
|
||||
}
|
||||
|
||||
status = dmub_srv_is_hw_init(dmub_srv, &init);
|
||||
if (status != DMUB_STATUS_OK)
|
||||
DRM_WARN("DMUB hardware init check failed: %d\n", status);
|
||||
|
||||
if (status == DMUB_STATUS_OK && init) {
|
||||
/* Wait for firmware load to finish. */
|
||||
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
|
||||
if (status != DMUB_STATUS_OK)
|
||||
DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
|
||||
} else {
|
||||
/* Perform the full hardware initialization. */
|
||||
dm_dmub_hw_init(adev);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
|
||||
{
|
||||
|
@ -2637,9 +2664,7 @@ static int dm_resume(void *handle)
|
|||
amdgpu_dm_outbox_init(adev);
|
||||
|
||||
/* Before powering on DC we need to re-initialize DMUB. */
|
||||
r = dm_dmub_hw_init(adev);
|
||||
if (r)
|
||||
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||
dm_dmub_hw_resume(adev);
|
||||
|
||||
/* power on hardware */
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
@ -6073,6 +6098,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
|
|||
struct dsc_dec_dpcd_caps *dsc_caps)
|
||||
{
|
||||
stream->timing.flags.DSC = 0;
|
||||
dsc_caps->is_dsc_supported = false;
|
||||
|
||||
if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
|
||||
|
@ -10737,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc,
|
|||
|
||||
dm_new_plane_state->dc_state = dc_new_plane_state;
|
||||
|
||||
dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
|
||||
|
||||
/* Tell DC to do a full surface update every time there
|
||||
* is a plane change. Inefficient, but works for now.
|
||||
*/
|
||||
|
@ -10889,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
enum dc_status status;
|
||||
int ret, i;
|
||||
bool lock_and_validation_needed = false;
|
||||
struct dm_crtc_state *dm_old_crtc_state;
|
||||
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct dsc_mst_fairness_vars vars[MAX_PIPES];
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
|
@ -11071,6 +11099,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
if (dm_new_crtc_state->mpo_requested)
|
||||
DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
|
||||
}
|
||||
|
||||
/* Check cursor planes scaling */
|
||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
|
||||
|
|
|
@ -626,6 +626,8 @@ struct dm_crtc_state {
|
|||
bool cm_has_degamma;
|
||||
bool cm_is_degamma_srgb;
|
||||
|
||||
bool mpo_requested;
|
||||
|
||||
int update_type;
|
||||
int active_planes;
|
||||
|
||||
|
|
|
@ -119,6 +119,12 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
|||
|
||||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
if (result == VBIOSSMC_Result_Failed) {
|
||||
ASSERT(0);
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_SMU_TIMEOUT(result)) {
|
||||
ASSERT(0);
|
||||
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
|
||||
|
|
|
@ -3971,102 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
||||
{
|
||||
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
struct link_encoder *link_enc = NULL;
|
||||
#endif
|
||||
struct cp_psp_stream_config config = {0};
|
||||
enum dp_panel_mode panel_mode =
|
||||
dp_get_panel_mode(pipe_ctx->stream->link);
|
||||
|
||||
if (cp_psp && cp_psp->funcs.update_stream_config) {
|
||||
struct cp_psp_stream_config config = {0};
|
||||
enum dp_panel_mode panel_mode =
|
||||
dp_get_panel_mode(pipe_ctx->stream->link);
|
||||
if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
|
||||
return;
|
||||
|
||||
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
|
||||
/*stream_enc_inst*/
|
||||
config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
|
||||
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
|
||||
link_enc = pipe_ctx->stream->link->link_enc;
|
||||
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
|
||||
pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign)
|
||||
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
|
||||
pipe_ctx->stream->ctx->dc,
|
||||
pipe_ctx->stream);
|
||||
ASSERT(link_enc);
|
||||
if (link_enc == NULL)
|
||||
return;
|
||||
|
||||
/* otg instance */
|
||||
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
|
||||
|
||||
/* dig front end */
|
||||
config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
|
||||
|
||||
/* stream encoder index */
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
||||
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
|
||||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
|
||||
link_enc = pipe_ctx->stream->link->link_enc;
|
||||
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
|
||||
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
|
||||
pipe_ctx->stream->ctx->dc,
|
||||
pipe_ctx->stream);
|
||||
}
|
||||
ASSERT(link_enc);
|
||||
|
||||
// Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
|
||||
config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
|
||||
// Add flag to guard new A0 DIG mapping
|
||||
if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
|
||||
pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) {
|
||||
config.dig_be = link_enc->preferred_engine;
|
||||
config.dio_output_type = pipe_ctx->stream->link->ep_type;
|
||||
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
} else {
|
||||
config.dio_output_type = 0;
|
||||
config.dio_output_idx = 0;
|
||||
}
|
||||
|
||||
// Add flag to guard B0 implementation
|
||||
if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
|
||||
link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
// enum ID 1-4 maps to DPIA PHY ID 0-3
|
||||
config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
|
||||
} else { // for non DPIA mode over B0, ABCDE maps to 01564
|
||||
|
||||
switch (link_enc->transmitter) {
|
||||
case TRANSMITTER_UNIPHY_A:
|
||||
config.phy_idx = 0;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_B:
|
||||
config.phy_idx = 1;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_C:
|
||||
config.phy_idx = 5;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_D:
|
||||
config.phy_idx = 6;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_E:
|
||||
config.phy_idx = 4;
|
||||
break;
|
||||
default:
|
||||
config.phy_idx = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
|
||||
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
|
||||
pipe_ctx->stream->ctx->dc,
|
||||
pipe_ctx->stream);
|
||||
config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
|
||||
}
|
||||
ASSERT(link_enc);
|
||||
if (link_enc)
|
||||
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
|
||||
|
||||
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
||||
config.dp2_enabled = 1;
|
||||
}
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
config.stream_enc_idx =
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
|
||||
#endif
|
||||
config.dpms_off = dpms_off;
|
||||
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
|
||||
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
|
||||
config.mst_enabled = (pipe_ctx->stream->signal ==
|
||||
SIGNAL_TYPE_DISPLAY_PORT_MST);
|
||||
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
|
||||
}
|
||||
|
||||
/* dig back end */
|
||||
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
|
||||
|
||||
/* link encoder index */
|
||||
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
||||
#endif
|
||||
/* dio output index */
|
||||
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
|
||||
/* phy index */
|
||||
config.phy_idx = resource_transmitter_to_phy_idx(
|
||||
pipe_ctx->stream->link->dc, link_enc->transmitter);
|
||||
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
/* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
|
||||
config.phy_idx = 0;
|
||||
|
||||
/* stream properties */
|
||||
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
|
||||
config.mst_enabled = (pipe_ctx->stream->signal ==
|
||||
SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
|
||||
#endif
|
||||
config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
|
||||
1 : 0;
|
||||
config.dpms_off = dpms_off;
|
||||
|
||||
/* dm stream context */
|
||||
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
|
||||
|
||||
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -3216,3 +3216,36 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
|
|||
return hpo_dp_link_enc;
|
||||
}
|
||||
#endif
|
||||
|
||||
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
|
||||
{
|
||||
/* TODO - get transmitter to phy idx mapping from DMUB */
|
||||
uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (dc->ctx->dce_version == DCN_VERSION_3_1 &&
|
||||
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
|
||||
switch (transmitter) {
|
||||
case TRANSMITTER_UNIPHY_A:
|
||||
phy_idx = 0;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_B:
|
||||
phy_idx = 1;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_C:
|
||||
phy_idx = 5;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_D:
|
||||
phy_idx = 6;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_E:
|
||||
phy_idx = 4;
|
||||
break;
|
||||
default:
|
||||
phy_idx = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return phy_idx;
|
||||
}
|
||||
|
|
|
@ -1365,7 +1365,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
|
|||
uint32_t opp_id_src1 = OPP_ID_INVALID;
|
||||
|
||||
// Step 1: To find out which OPTC is running & OPTC DSC is ON
|
||||
for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) {
|
||||
// We can't use res_pool->res_cap->num_timing_generator to check
|
||||
// Because it records display pipes default setting built in driver,
|
||||
// not display pipes of the current chip.
|
||||
// Some ASICs would be fused display pipes less than the default setting.
|
||||
// In dcnxx_resource_construct function, driver would obatin real information.
|
||||
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
|
||||
uint32_t optc_dsc_state = 0;
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
|
||||
|
|
|
@ -100,6 +100,35 @@ static uint8_t phy_id_from_transmitter(enum transmitter t)
|
|||
return phy_id;
|
||||
}
|
||||
|
||||
static bool has_query_dp_alt(struct link_encoder *enc)
|
||||
{
|
||||
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
|
||||
|
||||
/* Supports development firmware and firmware >= 4.0.11 */
|
||||
return dc_dmub_srv &&
|
||||
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
|
||||
dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10));
|
||||
}
|
||||
|
||||
static bool query_dp_alt_from_dmub(struct link_encoder *enc,
|
||||
union dmub_rb_cmd *cmd)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
|
||||
cmd->query_dp_alt.header.sub_type =
|
||||
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
|
||||
cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
|
||||
cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
|
||||
|
||||
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dcn31_link_encoder_set_dio_phy_mux(
|
||||
struct link_encoder *enc,
|
||||
enum encoder_type_select sel,
|
||||
|
@ -569,45 +598,90 @@ void dcn31_link_encoder_disable_output(
|
|||
bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
|
||||
union dmub_rb_cmd cmd;
|
||||
bool is_usb_c_alt_mode = false;
|
||||
uint32_t dp_alt_mode_disable;
|
||||
|
||||
if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) {
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS;
|
||||
cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
|
||||
cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
|
||||
cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
|
||||
/* Only applicable to USB-C PHY. */
|
||||
if (!enc->features.flags.bits.DP_IS_USB_C)
|
||||
return false;
|
||||
|
||||
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
|
||||
/*
|
||||
* Use the new interface from DMCUB if available.
|
||||
* Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
|
||||
*/
|
||||
if (has_query_dp_alt(enc)) {
|
||||
if (!query_dp_alt_from_dmub(enc, &cmd))
|
||||
return false;
|
||||
|
||||
is_usb_c_alt_mode = (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
|
||||
return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
|
||||
}
|
||||
|
||||
return is_usb_c_alt_mode;
|
||||
/* Legacy path, avoid if possible. */
|
||||
if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
|
||||
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
|
||||
&dp_alt_mode_disable);
|
||||
} else {
|
||||
/*
|
||||
* B0 phys use a new set of registers to check whether alt mode is disabled.
|
||||
* if value == 1 alt mode is disabled, otherwise it is enabled.
|
||||
*/
|
||||
if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
|
||||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
|
||||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
|
||||
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
|
||||
&dp_alt_mode_disable);
|
||||
} else {
|
||||
REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
|
||||
&dp_alt_mode_disable);
|
||||
}
|
||||
}
|
||||
|
||||
return (dp_alt_mode_disable == 0);
|
||||
}
|
||||
|
||||
void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
|
||||
union dmub_rb_cmd cmd;
|
||||
uint32_t is_in_usb_c_dp4_mode = 0;
|
||||
|
||||
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
|
||||
|
||||
if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) {
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS;
|
||||
cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
|
||||
cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
|
||||
cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
|
||||
/* Take the link cap directly if not USB */
|
||||
if (!enc->features.flags.bits.DP_IS_USB_C)
|
||||
return;
|
||||
|
||||
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
|
||||
/*
|
||||
* Use the new interface from DMCUB if available.
|
||||
* Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
|
||||
*/
|
||||
if (has_query_dp_alt(enc)) {
|
||||
if (!query_dp_alt_from_dmub(enc, &cmd))
|
||||
return;
|
||||
|
||||
if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0)
|
||||
if (cmd.query_dp_alt.data.is_usb &&
|
||||
cmd.query_dp_alt.data.is_dp4 == 0)
|
||||
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Legacy path, avoid if possible. */
|
||||
if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
|
||||
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
|
||||
&is_in_usb_c_dp4_mode);
|
||||
} else {
|
||||
if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
|
||||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
|
||||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
|
||||
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
|
||||
&is_in_usb_c_dp4_mode);
|
||||
} else {
|
||||
REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
|
||||
&is_in_usb_c_dp4_mode);
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_in_usb_c_dp4_mode)
|
||||
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
|
||||
}
|
||||
|
|
|
@ -1984,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
|
|||
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
|
||||
if (dc->config.forced_clocks) {
|
||||
if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
|
||||
}
|
||||
|
|
|
@ -34,12 +34,12 @@ struct cp_psp_stream_config {
|
|||
uint8_t dig_fe;
|
||||
uint8_t link_enc_idx;
|
||||
uint8_t stream_enc_idx;
|
||||
uint8_t phy_idx;
|
||||
uint8_t dio_output_idx;
|
||||
uint8_t dio_output_type;
|
||||
uint8_t phy_idx;
|
||||
uint8_t assr_enabled;
|
||||
uint8_t mst_enabled;
|
||||
uint8_t dp2_enabled;
|
||||
uint8_t usb4_enabled;
|
||||
void *dm_stream_ctx;
|
||||
bool dpms_off;
|
||||
};
|
||||
|
|
|
@ -208,4 +208,6 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
|
|||
const struct dc_link *link);
|
||||
#endif
|
||||
|
||||
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
|
||||
|
||||
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
|
||||
|
|
|
@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
|
|||
uint8_t rev;
|
||||
uint8_t assr_enabled;
|
||||
uint8_t mst_enabled;
|
||||
uint8_t usb4_enabled;
|
||||
};
|
||||
|
||||
struct mod_hdcp_hdmi {
|
||||
|
@ -249,7 +250,6 @@ struct mod_hdcp_link {
|
|||
uint8_t ddc_line;
|
||||
uint8_t link_enc_idx;
|
||||
uint8_t phy_idx;
|
||||
uint8_t dio_output_type;
|
||||
uint8_t dio_output_id;
|
||||
uint8_t hdcp_supported_informational;
|
||||
union {
|
||||
|
|
|
@ -1625,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
|
|||
|
||||
static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
en ? 0 : 1,
|
||||
NULL);
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
/* The message only works on master die and NACK will be sent
|
||||
back for other dies, only send it on master die */
|
||||
if (!adev->smuio.funcs->get_socket_id(adev) &&
|
||||
!adev->smuio.funcs->get_die_id(adev))
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
en ? 0 : 1,
|
||||
NULL);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct throttling_logging_label {
|
||||
|
|
|
@ -828,8 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
|
|||
}
|
||||
|
||||
if (!crtc_state->enable && !can_update_disabled) {
|
||||
drm_dbg_kms(plane_state->crtc->dev,
|
||||
"Cannot update plane of a disabled CRTC.\n");
|
||||
drm_dbg_kms(plane_state->plane->dev,
|
||||
"Cannot update plane of a disabled CRTC.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -839,8 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
|
|||
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
|
||||
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
|
||||
if (hscale < 0 || vscale < 0) {
|
||||
drm_dbg_kms(plane_state->crtc->dev,
|
||||
"Invalid scaling of plane\n");
|
||||
drm_dbg_kms(plane_state->plane->dev,
|
||||
"Invalid scaling of plane\n");
|
||||
drm_rect_debug_print("src: ", &plane_state->src, true);
|
||||
drm_rect_debug_print("dst: ", &plane_state->dst, false);
|
||||
return -ERANGE;
|
||||
|
@ -864,8 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
|
|||
return 0;
|
||||
|
||||
if (!can_position && !drm_rect_equals(dst, &clip)) {
|
||||
drm_dbg_kms(plane_state->crtc->dev,
|
||||
"Plane must cover entire CRTC\n");
|
||||
drm_dbg_kms(plane_state->plane->dev,
|
||||
"Plane must cover entire CRTC\n");
|
||||
drm_rect_debug_print("dst: ", dst, false);
|
||||
drm_rect_debug_print("clip: ", &clip, false);
|
||||
return -EINVAL;
|
||||
|
@ -1016,7 +1016,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
|
|||
* it's in self refresh mode and needs to be fully disabled.
|
||||
*/
|
||||
return old_state->active ||
|
||||
(old_state->self_refresh_active && !new_state->enable) ||
|
||||
(old_state->self_refresh_active && !new_state->active) ||
|
||||
new_state->self_refresh_active;
|
||||
}
|
||||
|
||||
|
|
|
@ -209,11 +209,11 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
|
|||
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
|
||||
if (ret)
|
||||
return ret;
|
||||
src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
|
||||
|
||||
ret = drm_gem_fb_vmap(fb, map, data);
|
||||
if (ret)
|
||||
goto out_drm_gem_fb_end_cpu_access;
|
||||
src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
|
||||
|
||||
switch (fb->format->format) {
|
||||
case DRM_FORMAT_RGB565:
|
||||
|
|
|
@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
struct i915_mmap_offset *mmo, *mn;
|
||||
|
||||
if (obj->ops->unmap_virtual)
|
||||
obj->ops->unmap_virtual(obj);
|
||||
|
||||
spin_lock(&obj->mmo.lock);
|
||||
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
||||
&obj->mmo.offsets, offset) {
|
||||
|
|
|
@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops {
|
|||
int (*pwrite)(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg);
|
||||
u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
|
||||
void (*unmap_virtual)(struct drm_i915_gem_object *obj);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
|
||||
|
||||
|
|
|
@ -161,7 +161,6 @@ retry:
|
|||
/* Immediately discard the backing storage */
|
||||
int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_free_mmap_offset(&obj->base);
|
||||
if (obj->ops->truncate)
|
||||
return obj->ops->truncate(obj);
|
||||
|
||||
|
|
|
@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
|
|||
return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
|
||||
}
|
||||
|
||||
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
int err;
|
||||
|
||||
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
|
||||
|
||||
err = i915_ttm_move_notify(bo);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return i915_ttm_purge(obj);
|
||||
}
|
||||
|
||||
static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
|
@ -883,6 +897,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (drm_dev_enter(dev, &idx)) {
|
||||
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
|
@ -945,6 +964,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
|
|||
return drm_vma_node_offset_addr(&obj->base.vma_node);
|
||||
}
|
||||
|
||||
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
||||
.name = "i915_gem_object_ttm",
|
||||
.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
|
||||
|
@ -952,7 +976,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
|||
|
||||
.get_pages = i915_ttm_get_pages,
|
||||
.put_pages = i915_ttm_put_pages,
|
||||
.truncate = i915_ttm_purge,
|
||||
.truncate = i915_ttm_truncate,
|
||||
.shrinker_release_pages = i915_ttm_shrinker_release_pages,
|
||||
|
||||
.adjust_lru = i915_ttm_adjust_lru,
|
||||
|
@ -960,6 +984,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
|||
.migrate = i915_ttm_migrate,
|
||||
|
||||
.mmap_offset = i915_ttm_mmap_offset,
|
||||
.unmap_virtual = i915_ttm_unmap_virtual,
|
||||
.mmap_ops = &vm_ops_ttm,
|
||||
};
|
||||
|
||||
|
|
|
@ -1368,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
|||
}
|
||||
}
|
||||
|
||||
if (!obj->ops->mmap_ops) {
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
} else {
|
||||
/* ttm allows access to evicted regions by design */
|
||||
|
||||
err = check_present(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not present\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
out_unmap:
|
||||
|
|
|
@ -107,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
|
|||
static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
|
||||
struct device *tee_kdev, void *data)
|
||||
{
|
||||
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
|
||||
struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
intel_pxp_fini_hw(pxp);
|
||||
with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
|
||||
intel_pxp_fini_hw(pxp);
|
||||
|
||||
mutex_lock(&pxp->tee_mutex);
|
||||
pxp->pxp_component = NULL;
|
||||
|
|
|
@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
|||
return -EPROBE_DEFER;
|
||||
|
||||
phy = platform_get_drvdata(pdev);
|
||||
if (!phy)
|
||||
if (!phy) {
|
||||
put_device(&pdev->dev);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
hdmi->phy = phy;
|
||||
|
||||
|
|
|
@ -68,9 +68,11 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
|
|||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (caching == ttm_write_combined)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
#ifndef CONFIG_UML
|
||||
else if (boot_cpu_data.x86 > 3)
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
#endif /* CONFIG_UML */
|
||||
#endif /* __i386__ || __x86_64__ */
|
||||
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
|
||||
defined(__powerpc__) || defined(__mips__)
|
||||
if (caching == ttm_write_combined)
|
||||
|
|
|
@ -184,6 +184,25 @@ static inline void setindex(int index)
|
|||
vga_io_w(VGA_GFX_I, index);
|
||||
}
|
||||
|
||||
/* Check if the video mode is supported by the driver */
|
||||
static inline int check_mode_supported(void)
|
||||
{
|
||||
/* non-x86 architectures treat orig_video_isVGA as a boolean flag */
|
||||
#if defined(CONFIG_X86)
|
||||
/* only EGA and VGA in 16 color graphic mode are supported */
|
||||
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
|
||||
screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
|
||||
return -ENODEV;
|
||||
|
||||
if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
|
||||
screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
|
||||
screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
|
||||
screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */
|
||||
return -ENODEV;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vga16fb_pan_var(struct fb_info *info,
|
||||
struct fb_var_screeninfo *var)
|
||||
{
|
||||
|
@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void)
|
|||
|
||||
vga16fb_setup(option);
|
||||
#endif
|
||||
|
||||
ret = check_mode_supported();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = platform_driver_register(&vga16fb_driver);
|
||||
|
||||
if (!ret) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR MIT WITH Linux-syscall-note */
|
||||
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
|
||||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
|
Загрузка…
Ссылка в новой задаче