Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Chris Wilson needs the new drm_driver->release callback to make sure the shiny new dma-buf testcases don't oops the driver on unload. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
Коммит
51a831a772
|
@ -56,6 +56,18 @@ Required properties for V3D:
|
|||
- interrupts: The interrupt number
|
||||
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
|
||||
|
||||
Required properties for DSI:
|
||||
- compatible: Should be "brcm,bcm2835-dsi0" or "brcm,bcm2835-dsi1"
|
||||
- reg: Physical base address and length of the DSI block's registers
|
||||
- interrupts: The interrupt number
|
||||
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
|
||||
- clocks: a) phy: The DSI PLL clock feeding the DSI analog PHY
|
||||
b) escape: The DSI ESC clock from CPRMAN
|
||||
c) pixel: The DSI pixel clock from CPRMAN
|
||||
- clock-output-names:
|
||||
The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
|
||||
dsi[01]_ddr2, and dsi[01]_ddr
|
||||
|
||||
[1] Documentation/devicetree/bindings/media/video-interfaces.txt
|
||||
|
||||
Example:
|
||||
|
@ -99,6 +111,29 @@ dpi: dpi@7e208000 {
|
|||
};
|
||||
};
|
||||
|
||||
dsi1: dsi@7e700000 {
|
||||
compatible = "brcm,bcm2835-dsi1";
|
||||
reg = <0x7e700000 0x8c>;
|
||||
interrupts = <2 12>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
#clock-cells = <1>;
|
||||
|
||||
clocks = <&clocks BCM2835_PLLD_DSI1>,
|
||||
<&clocks BCM2835_CLOCK_DSI1E>,
|
||||
<&clocks BCM2835_CLOCK_DSI1P>;
|
||||
clock-names = "phy", "escape", "pixel";
|
||||
|
||||
clock-output-names = "dsi1_byte", "dsi1_ddr2", "dsi1_ddr";
|
||||
|
||||
pitouchscreen: panel@0 {
|
||||
compatible = "raspberrypi,touchscreen";
|
||||
reg = <0>;
|
||||
|
||||
<...>
|
||||
};
|
||||
};
|
||||
|
||||
vec: vec@7e806000 {
|
||||
compatible = "brcm,bcm2835-vec";
|
||||
reg = <0x7e806000 0x1000>;
|
||||
|
|
|
@ -1,23 +1,19 @@
|
|||
Qualcomm adreno/snapdragon GPU
|
||||
|
||||
Required properties:
|
||||
- compatible: "qcom,adreno-3xx"
|
||||
- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
|
||||
for example: "qcom,adreno-306.0", "qcom,adreno"
|
||||
Note that you need to list the less specific "qcom,adreno" (since this
|
||||
is what the device is matched on), in addition to the more specific
|
||||
with the chip-id.
|
||||
- reg: Physical base address and length of the controller's registers.
|
||||
- interrupts: The interrupt signal from the gpu.
|
||||
- clocks: device clocks
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: the following clocks are required:
|
||||
* "core_clk"
|
||||
* "iface_clk"
|
||||
* "mem_iface_clk"
|
||||
- qcom,chipid: gpu chip-id. Note this may become optional for future
|
||||
devices if we can reliably read the chipid from hw
|
||||
- qcom,gpu-pwrlevels: list of operating points
|
||||
- compatible: "qcom,gpu-pwrlevels"
|
||||
- for each qcom,gpu-pwrlevel:
|
||||
- qcom,gpu-freq: requested gpu clock speed
|
||||
- NOTE: downstream android driver defines additional parameters to
|
||||
configure memory bandwidth scaling per OPP.
|
||||
* "core"
|
||||
* "iface"
|
||||
* "mem_iface"
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -25,28 +21,18 @@ Example:
|
|||
...
|
||||
|
||||
gpu: qcom,kgsl-3d0@4300000 {
|
||||
compatible = "qcom,adreno-3xx";
|
||||
compatible = "qcom,adreno-320.2", "qcom,adreno";
|
||||
reg = <0x04300000 0x20000>;
|
||||
reg-names = "kgsl_3d0_reg_memory";
|
||||
interrupts = <GIC_SPI 80 0>;
|
||||
interrupt-names = "kgsl_3d0_irq";
|
||||
clock-names =
|
||||
"core_clk",
|
||||
"iface_clk",
|
||||
"mem_iface_clk";
|
||||
"core",
|
||||
"iface",
|
||||
"mem_iface";
|
||||
clocks =
|
||||
<&mmcc GFX3D_CLK>,
|
||||
<&mmcc GFX3D_AHB_CLK>,
|
||||
<&mmcc MMSS_IMEM_AHB_CLK>;
|
||||
qcom,chipid = <0x03020100>;
|
||||
qcom,gpu-pwrlevels {
|
||||
compatible = "qcom,gpu-pwrlevels";
|
||||
qcom,gpu-pwrlevel@0 {
|
||||
qcom,gpu-freq = <450000000>;
|
||||
};
|
||||
qcom,gpu-pwrlevel@1 {
|
||||
qcom,gpu-freq = <27000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -291,10 +291,17 @@ To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
|
|||
:c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
|
||||
with a pointer to VM operations.
|
||||
|
||||
struct vm_operations_struct \*gem_vm_ops struct
|
||||
vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
|
||||
void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
|
||||
vm_area_struct \*vma, struct vm_fault \*vmf); };
|
||||
The VM operations is a :c:type:`struct vm_operations_struct <vm_operations_struct>`
|
||||
made up of several fields, the more interesting ones being:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct vm_operations_struct {
|
||||
void (*open)(struct vm_area_struct * area);
|
||||
void (*close)(struct vm_area_struct * area);
|
||||
int (*fault)(struct vm_fault *vmf);
|
||||
};
|
||||
|
||||
|
||||
The open and close operations must update the GEM object reference
|
||||
count. Drivers can use the :c:func:`drm_gem_vm_open()` and
|
||||
|
|
|
@ -3970,6 +3970,7 @@ S: Maintained
|
|||
L: linux-media@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
F: drivers/dma-buf/sync_*
|
||||
F: drivers/dma-buf/dma-fence*
|
||||
F: drivers/dma-buf/sw_sync.c
|
||||
F: include/linux/sync_file.h
|
||||
F: include/uapi/linux/sync_file.h
|
||||
|
|
|
@ -1709,6 +1709,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
|||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
u32 ip_instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring);
|
||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||
|
|
|
@ -850,16 +850,37 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
strcpy(fw_name, "amdgpu/fiji_smc.bin");
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
if (type == CGS_UCODE_ID_SMU)
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
|
||||
else if (type == CGS_UCODE_ID_SMU_SK)
|
||||
if (type == CGS_UCODE_ID_SMU) {
|
||||
if (((adev->pdev->device == 0x67ef) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe2) ||
|
||||
(adev->pdev->revision == 0xe5))) ||
|
||||
((adev->pdev->device == 0x67ff) &&
|
||||
((adev->pdev->revision == 0xcf) ||
|
||||
(adev->pdev->revision == 0xef) ||
|
||||
(adev->pdev->revision == 0xff))))
|
||||
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
|
||||
else
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
if (type == CGS_UCODE_ID_SMU)
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
|
||||
else if (type == CGS_UCODE_ID_SMU_SK)
|
||||
if (type == CGS_UCODE_ID_SMU) {
|
||||
if ((adev->pdev->device == 0x67df) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe3) ||
|
||||
(adev->pdev->revision == 0xe4) ||
|
||||
(adev->pdev->revision == 0xe5) ||
|
||||
(adev->pdev->revision == 0xe7) ||
|
||||
(adev->pdev->revision == 0xef)))
|
||||
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
|
||||
else
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
|
|
|
@ -344,8 +344,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
|||
* submission. This can result in a debt that can stop buffer migrations
|
||||
* temporarily.
|
||||
*/
|
||||
static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
|
||||
u64 num_bytes)
|
||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
|
||||
{
|
||||
spin_lock(&adev->mm_stats.lock);
|
||||
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
|
||||
|
|
|
@ -374,7 +374,6 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
|||
&amdgpu_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
|
||||
adev->mode_info.num_crtc,
|
||||
AMDGPUFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(rfbdev);
|
||||
|
|
|
@ -487,67 +487,50 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
|
|||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo_va: bo_va to update
|
||||
* @list: validation list
|
||||
* @operation: map or unmap
|
||||
*
|
||||
* Update the bo_va directly after setting it's address. Errors are not
|
||||
* Update the bo_va directly after setting its address. Errors are not
|
||||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct list_head *list,
|
||||
uint32_t operation)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
int r;
|
||||
struct ttm_validate_buffer *entry;
|
||||
int r = -ERESTARTSYS;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
|
||||
tv.bo = &bo_va->bo->tbo;
|
||||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
|
||||
|
||||
/* Provide duplicates to avoid -EALREADY */
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r)
|
||||
goto error_print;
|
||||
|
||||
list_for_each_entry(entry, &list, head) {
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct amdgpu_bo *bo =
|
||||
container_of(entry->bo, struct amdgpu_bo, tbo);
|
||||
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (!amdgpu_bo_gpu_accessible(bo))
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
|
||||
NULL);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP)
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
|
||||
error_unreserve:
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
error_print:
|
||||
error:
|
||||
if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
||||
}
|
||||
|
@ -564,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
struct list_head list;
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
int r = 0;
|
||||
|
||||
|
@ -602,14 +585,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
return -ENOENT;
|
||||
abo = gem_to_amdgpu_bo(gobj);
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
tv.bo = &abo->tbo;
|
||||
tv.shared = true;
|
||||
tv.shared = false;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
|
@ -640,10 +622,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
default:
|
||||
break;
|
||||
}
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
|
||||
!amdgpu_vm_debug)
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
|
|
|
@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
|||
{
|
||||
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||
struct drm_mm_node *node = mem->mm_node;
|
||||
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
|
||||
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
|
||||
enum drm_mm_insert_mode mode;
|
||||
unsigned long fpfn, lpfn;
|
||||
int r;
|
||||
|
||||
|
@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
|||
else
|
||||
lpfn = man->size;
|
||||
|
||||
if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||
sflags = DRM_MM_SEARCH_BELOW;
|
||||
aflags = DRM_MM_CREATE_TOP;
|
||||
}
|
||||
mode = DRM_MM_INSERT_BEST;
|
||||
if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
|
||||
mem->page_alignment, 0,
|
||||
fpfn, lpfn, sflags, aflags);
|
||||
r = drm_mm_insert_node_in_range(&mgr->mm, node,
|
||||
mem->num_pages, mem->page_alignment, 0,
|
||||
fpfn, lpfn, mode);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
if (!r) {
|
||||
|
|
|
@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
unsigned long page_align;
|
||||
u64 initial_bytes_moved;
|
||||
size_t acc_size;
|
||||
int r;
|
||||
|
||||
|
@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
|
||||
thanks to write-combining
|
||||
#endif
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
|
||||
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
|
||||
|
@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
|
||||
WARN_ON(!locked);
|
||||
}
|
||||
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
|
||||
&amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0))
|
||||
amdgpu_cs_report_moved_bytes(adev,
|
||||
atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
|
||||
|
||||
if (unlikely(r != 0)) {
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
bo->tbo.priority = ilog2(bo->tbo.num_pages);
|
||||
if (kernel)
|
||||
|
|
|
@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
|||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
} else {
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
|||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
|
||||
|
@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
|
|||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
|
|||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
|
||||
|
@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
|
|||
amdgpu_dpm_enable_vce(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
|
||||
}
|
||||
}
|
||||
mutex_unlock(&adev->vce.idle_mutex);
|
||||
|
|
|
@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
kfree(bo_va);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
|||
struct amdgpu_vram_mgr *mgr = man->priv;
|
||||
struct drm_mm *mm = &mgr->mm;
|
||||
struct drm_mm_node *nodes;
|
||||
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
|
||||
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
|
||||
enum drm_mm_insert_mode mode;
|
||||
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
|||
if (!nodes)
|
||||
return -ENOMEM;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||
sflags = DRM_MM_SEARCH_BELOW;
|
||||
aflags = DRM_MM_CREATE_TOP;
|
||||
}
|
||||
mode = DRM_MM_INSERT_BEST;
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
|
||||
pages_left = mem->num_pages;
|
||||
|
||||
|
@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
|||
|
||||
if (pages == pages_per_node)
|
||||
alignment = pages_per_node;
|
||||
else
|
||||
sflags |= DRM_MM_SEARCH_BEST;
|
||||
|
||||
r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
|
||||
alignment, 0,
|
||||
place->fpfn, lpfn,
|
||||
sflags, aflags);
|
||||
r = drm_mm_insert_node_in_range(mm, &nodes[i],
|
||||
pages, alignment, 0,
|
||||
place->fpfn, lpfn,
|
||||
mode);
|
||||
if (unlikely(r))
|
||||
goto error;
|
||||
|
||||
|
|
|
@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev)
|
|||
|
||||
static int ci_upload_firmware(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
int i, ret;
|
||||
|
||||
if (amdgpu_ci_is_smc_running(adev)) {
|
||||
|
@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
|
|||
amdgpu_ci_stop_smc_clock(adev);
|
||||
amdgpu_ci_reset_smc(adev);
|
||||
|
||||
ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
|
||||
ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
|
|||
|
||||
if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
|
||||
if (amdgpu_new_state->evclk) {
|
||||
/* turn the clocks on when encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
|
||||
tmp = RREG32_SMC(ixDPM_TABLE_475);
|
||||
tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
|
||||
|
@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
|
|||
ret = ci_enable_vce_dpm(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* turn the clocks off when not encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle)
|
|||
adev->pm.current_mclk = adev->clock.default_mclk;
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
return 0;
|
||||
|
||||
ret = ci_dpm_init_microcode(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
return 0;
|
||||
|
||||
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ret = ci_dpm_init(adev);
|
||||
|
@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle)
|
|||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
if (!amdgpu_dpm) {
|
||||
ret = ci_upload_firmware(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("ci_upload_firmware failed\n");
|
||||
return ret;
|
||||
}
|
||||
ci_dpm_start_smc(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ci_dpm_setup_asic(adev);
|
||||
|
@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle)
|
|||
mutex_lock(&adev->pm.mutex);
|
||||
ci_dpm_disable(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
ci_dpm_stop_smc(adev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1722,8 +1722,8 @@ static int cik_common_early_init(void *handle)
|
|||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |*/
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
/*AMD_PG_SUPPORT_VCE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_VCE |
|
||||
/* AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_ACP |
|
||||
|
|
|
@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width)
|
|||
return (u32)(((u64)1 << bit_width) - 1);
|
||||
}
|
||||
|
||||
static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
|
||||
u32 max_rb_num_per_se,
|
||||
u32 sh_per_se)
|
||||
static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 data, mask;
|
||||
|
||||
data = RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
data = RREG32(mmCC_RB_BACKEND_DISABLE) |
|
||||
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
|
||||
data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
|
||||
data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
|
||||
|
||||
mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
|
||||
mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/
|
||||
adev->gfx.config.max_sh_per_se);
|
||||
|
||||
return data & mask;
|
||||
return ~data & mask;
|
||||
}
|
||||
|
||||
static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
|
||||
|
@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
|
|||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
}
|
||||
|
||||
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
|
||||
u32 se_num, u32 sh_per_se,
|
||||
u32 max_rb_num_per_se)
|
||||
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
u32 data, mask;
|
||||
u32 disabled_rbs = 0;
|
||||
u32 enabled_rbs = 0;
|
||||
u32 data;
|
||||
u32 raster_config = 0;
|
||||
u32 active_rbs = 0;
|
||||
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
unsigned num_rb_pipes;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < se_num; i++) {
|
||||
for (j = 0; j < sh_per_se; j++) {
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
|
||||
disabled_rbs |= data << ((i * sh_per_se + j) * 2);
|
||||
data = gfx_v6_0_get_rb_active_bitmap(adev);
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
rb_bitmap_width_per_sh);
|
||||
}
|
||||
}
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
mask = 1;
|
||||
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
|
||||
if (!(disabled_rbs & mask))
|
||||
enabled_rbs |= mask;
|
||||
mask <<= 1;
|
||||
}
|
||||
|
||||
adev->gfx.config.backend_enable_mask = enabled_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(enabled_rbs);
|
||||
adev->gfx.config.backend_enable_mask = active_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rbs);
|
||||
|
||||
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
|
||||
adev->gfx.config.max_shader_engines, 16);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < se_num; i++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
|
||||
data = 0;
|
||||
for (j = 0; j < sh_per_se; j++) {
|
||||
switch (enabled_rbs & 3) {
|
||||
case 1:
|
||||
data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
|
||||
break;
|
||||
case 2:
|
||||
data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
|
||||
break;
|
||||
case 3:
|
||||
default:
|
||||
data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
|
||||
break;
|
||||
}
|
||||
enabled_rbs >>= 2;
|
||||
}
|
||||
gfx_v6_0_raster_config(adev, &data);
|
||||
gfx_v6_0_raster_config(adev, &raster_config);
|
||||
|
||||
if (!adev->gfx.config.backend_enable_mask ||
|
||||
adev->gfx.config.num_rbs >= num_rb_pipes)
|
||||
WREG32(mmPA_SC_RASTER_CONFIG, data);
|
||||
else
|
||||
gfx_v6_0_write_harvested_raster_configs(adev, data,
|
||||
adev->gfx.config.backend_enable_mask,
|
||||
num_rb_pipes);
|
||||
if (!adev->gfx.config.backend_enable_mask ||
|
||||
adev->gfx.config.num_rbs >= num_rb_pipes) {
|
||||
WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
|
||||
} else {
|
||||
gfx_v6_0_write_harvested_raster_configs(adev, raster_config,
|
||||
adev->gfx.config.backend_enable_mask,
|
||||
num_rb_pipes);
|
||||
}
|
||||
|
||||
/* cache the values for userspace */
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
adev->gfx.config.rb_config[i][j].rb_backend_disable =
|
||||
RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
|
||||
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
adev->gfx.config.rb_config[i][j].raster_config =
|
||||
RREG32(mmPA_SC_RASTER_CONFIG);
|
||||
}
|
||||
}
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
}
|
||||
*/
|
||||
|
||||
static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
|
||||
static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
|
||||
u32 bitmap)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
if (!bitmap)
|
||||
return;
|
||||
|
||||
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||
|
||||
WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
|
||||
}
|
||||
|
||||
static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 data, mask;
|
||||
|
||||
data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
|
||||
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||
data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
|
||||
data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
|
||||
RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
|
||||
|
||||
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||
|
||||
mask = gfx_v6_0_create_bitmask(cu_per_sh);
|
||||
|
||||
return ~data & mask;
|
||||
mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
|
||||
return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
|
||||
}
|
||||
|
||||
|
||||
static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
|
||||
u32 se_num, u32 sh_per_se,
|
||||
u32 cu_per_sh)
|
||||
static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
u32 data, mask;
|
||||
u32 active_cu = 0;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < se_num; i++) {
|
||||
for (j = 0; j < sh_per_se; j++) {
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
|
||||
active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
|
||||
active_cu = gfx_v6_0_get_cu_enabled(adev);
|
||||
|
||||
mask = 1;
|
||||
for (k = 0; k < 16; k++) {
|
||||
|
@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
|
|||
gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
|
||||
break;
|
||||
}
|
||||
gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK;
|
||||
if (adev->gfx.config.max_shader_engines == 2)
|
||||
gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT;
|
||||
adev->gfx.config.gb_addr_config = gb_addr_config;
|
||||
|
||||
WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
|
||||
|
@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
|
|||
#endif
|
||||
gfx_v6_0_tiling_mode_table_init(adev);
|
||||
|
||||
gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
|
||||
adev->gfx.config.max_sh_per_se,
|
||||
adev->gfx.config.max_backends_per_se);
|
||||
gfx_v6_0_setup_rb(adev);
|
||||
|
||||
gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
|
||||
adev->gfx.config.max_sh_per_se,
|
||||
adev->gfx.config.max_cu_per_sh);
|
||||
gfx_v6_0_setup_spi(adev);
|
||||
|
||||
gfx_v6_0_get_cu_info(adev);
|
||||
|
||||
|
@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
|
||||
u32 se, u32 sh)
|
||||
{
|
||||
|
||||
u32 mask = 0, tmp, tmp1;
|
||||
int i;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
|
||||
tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
|
||||
tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
tmp &= 0xffff0000;
|
||||
|
||||
tmp |= tmp1;
|
||||
tmp >>= 16;
|
||||
|
||||
for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
|
||||
mask <<= 1;
|
||||
mask |= 1;
|
||||
}
|
||||
|
||||
return (~tmp) & mask;
|
||||
}
|
||||
|
||||
static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 i, j, k, active_cu_number = 0;
|
||||
u32 tmp;
|
||||
|
||||
u32 mask, counter, cu_bitmap;
|
||||
u32 tmp = 0;
|
||||
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
|
||||
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
cu_bitmap = 0;
|
||||
counter = 0;
|
||||
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
|
||||
if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
|
||||
if (counter < 2)
|
||||
cu_bitmap |= mask;
|
||||
counter++;
|
||||
}
|
||||
mask <<= 1;
|
||||
}
|
||||
|
||||
active_cu_number += counter;
|
||||
tmp |= (cu_bitmap << (i * 16 + j * 8));
|
||||
}
|
||||
}
|
||||
|
||||
WREG32(mmRLC_PG_AO_CU_MASK, tmp);
|
||||
WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
|
||||
tmp = RREG32(mmRLC_MAX_PG_CU);
|
||||
tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
|
||||
tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
|
||||
WREG32(mmRLC_MAX_PG_CU, tmp);
|
||||
}
|
||||
|
||||
static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
|
||||
|
@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
|
|||
int i, j, k, counter, active_cu_number = 0;
|
||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
|
||||
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
|
||||
unsigned disable_masks[4 * 2];
|
||||
|
||||
memset(cu_info, 0, sizeof(*cu_info));
|
||||
|
||||
amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
ao_bitmap = 0;
|
||||
counter = 0;
|
||||
bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
if (i < 4 && j < 2)
|
||||
gfx_v6_0_set_user_cu_inactive_bitmap(
|
||||
adev, disable_masks[i * 2 + j]);
|
||||
bitmap = gfx_v6_0_get_cu_enabled(adev);
|
||||
cu_info->bitmap[i][j] = bitmap;
|
||||
|
||||
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
|
||||
for (k = 0; k < 16; k++) {
|
||||
if (bitmap & mask) {
|
||||
if (counter < 2)
|
||||
ao_bitmap |= mask;
|
||||
|
@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
cu_info->number = active_cu_number;
|
||||
cu_info->ao_cu_mask = ao_cu_mask;
|
||||
}
|
||||
|
|
|
@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
|
|||
|
||||
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
|
||||
kv_dpm_powergate_vce(adev, false);
|
||||
/* turn the clocks on when encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (pi->caps_stable_p_state)
|
||||
pi->vce_boot_level = table->count - 1;
|
||||
else
|
||||
|
@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
|
|||
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_VCEDPM_SetEnabledMask,
|
||||
(1 << pi->vce_boot_level));
|
||||
|
||||
kv_enable_vce_dpm(adev, true);
|
||||
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
|
||||
kv_enable_vce_dpm(adev, false);
|
||||
/* turn the clocks off when not encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
kv_dpm_powergate_vce(adev, true);
|
||||
}
|
||||
|
||||
|
@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
|||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
int ret;
|
||||
|
||||
if (pi->uvd_power_gated == gate)
|
||||
return;
|
||||
|
||||
pi->uvd_power_gated = gate;
|
||||
|
||||
if (gate) {
|
||||
if (pi->caps_uvd_pg) {
|
||||
/* disable clockgating so we can properly shut down the block */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
/* shutdown the UVD block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
/* XXX: check for errors */
|
||||
}
|
||||
/* stop the UVD block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
kv_update_uvd_dpm(adev, gate);
|
||||
if (pi->caps_uvd_pg)
|
||||
/* power off the UVD block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
|
||||
} else {
|
||||
if (pi->caps_uvd_pg) {
|
||||
if (pi->caps_uvd_pg)
|
||||
/* power on the UVD block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
|
||||
/* re-init the UVD block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
/* XXX: check for errors */
|
||||
}
|
||||
kv_update_uvd_dpm(adev, gate);
|
||||
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
}
|
||||
}
|
||||
|
||||
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
||||
{
|
||||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
int ret;
|
||||
|
||||
if (pi->vce_power_gated == gate)
|
||||
return;
|
||||
|
||||
pi->vce_power_gated = gate;
|
||||
|
||||
if (gate) {
|
||||
if (pi->caps_vce_pg) {
|
||||
/* shutdown the VCE block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
/* XXX: check for errors */
|
||||
/* power off the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
|
||||
}
|
||||
} else {
|
||||
if (pi->caps_vce_pg) {
|
||||
/* power on the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
|
||||
/* re-init the VCE block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
/* XXX: check for errors */
|
||||
}
|
||||
}
|
||||
if (!pi->caps_vce_pg)
|
||||
return;
|
||||
|
||||
if (gate)
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
|
||||
else
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
|
||||
}
|
||||
|
||||
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
|
||||
|
@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle)
|
|||
|
||||
kv_dpm_powergate_acp(adev, true);
|
||||
kv_dpm_powergate_samu(adev, true);
|
||||
kv_dpm_powergate_vce(adev, true);
|
||||
kv_dpm_powergate_uvd(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
|
|||
{PA_SC_RASTER_CONFIG, false, true},
|
||||
};
|
||||
|
||||
static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
|
||||
u32 se_num, u32 sh_num,
|
||||
u32 reg_offset)
|
||||
static uint32_t si_get_register_value(struct amdgpu_device *adev,
|
||||
bool indexed, u32 se_num,
|
||||
u32 sh_num, u32 reg_offset)
|
||||
{
|
||||
uint32_t val;
|
||||
if (indexed) {
|
||||
uint32_t val;
|
||||
unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
|
||||
unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
|
||||
switch (reg_offset) {
|
||||
case mmCC_RB_BACKEND_DISABLE:
|
||||
return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
|
||||
case mmGC_USER_RB_BACKEND_DISABLE:
|
||||
return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
|
||||
case mmPA_SC_RASTER_CONFIG:
|
||||
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
|
||||
}
|
||||
|
||||
val = RREG32(reg_offset);
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
|
||||
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
return val;
|
||||
val = RREG32(reg_offset);
|
||||
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
return val;
|
||||
} else {
|
||||
unsigned idx;
|
||||
|
||||
switch (reg_offset) {
|
||||
case mmGB_ADDR_CONFIG:
|
||||
return adev->gfx.config.gb_addr_config;
|
||||
case mmMC_ARB_RAMCFG:
|
||||
return adev->gfx.config.mc_arb_ramcfg;
|
||||
case mmGB_TILE_MODE0:
|
||||
case mmGB_TILE_MODE1:
|
||||
case mmGB_TILE_MODE2:
|
||||
case mmGB_TILE_MODE3:
|
||||
case mmGB_TILE_MODE4:
|
||||
case mmGB_TILE_MODE5:
|
||||
case mmGB_TILE_MODE6:
|
||||
case mmGB_TILE_MODE7:
|
||||
case mmGB_TILE_MODE8:
|
||||
case mmGB_TILE_MODE9:
|
||||
case mmGB_TILE_MODE10:
|
||||
case mmGB_TILE_MODE11:
|
||||
case mmGB_TILE_MODE12:
|
||||
case mmGB_TILE_MODE13:
|
||||
case mmGB_TILE_MODE14:
|
||||
case mmGB_TILE_MODE15:
|
||||
case mmGB_TILE_MODE16:
|
||||
case mmGB_TILE_MODE17:
|
||||
case mmGB_TILE_MODE18:
|
||||
case mmGB_TILE_MODE19:
|
||||
case mmGB_TILE_MODE20:
|
||||
case mmGB_TILE_MODE21:
|
||||
case mmGB_TILE_MODE22:
|
||||
case mmGB_TILE_MODE23:
|
||||
case mmGB_TILE_MODE24:
|
||||
case mmGB_TILE_MODE25:
|
||||
case mmGB_TILE_MODE26:
|
||||
case mmGB_TILE_MODE27:
|
||||
case mmGB_TILE_MODE28:
|
||||
case mmGB_TILE_MODE29:
|
||||
case mmGB_TILE_MODE30:
|
||||
case mmGB_TILE_MODE31:
|
||||
idx = (reg_offset - mmGB_TILE_MODE0);
|
||||
return adev->gfx.config.tile_mode_array[idx];
|
||||
default:
|
||||
return RREG32(reg_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int si_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||
u32 sh_num, u32 reg_offset, u32 *value)
|
||||
{
|
||||
|
@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
|
|||
continue;
|
||||
|
||||
if (!si_allowed_read_registers[i].untouched)
|
||||
*value = si_allowed_read_registers[i].grbm_indexed ?
|
||||
si_read_indexed_register(adev, se_num,
|
||||
sh_num, reg_offset) :
|
||||
RREG32(reg_offset);
|
||||
*value = si_get_register_value(adev,
|
||||
si_allowed_read_registers[i].grbm_indexed,
|
||||
se_num, sh_num, reg_offset);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
|
|
|
@ -143,8 +143,8 @@
|
|||
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
|
||||
|
||||
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
|
||||
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
|
||||
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
|
||||
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002
|
||||
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
|
||||
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
|
|
|
@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle)
|
|||
|
||||
uvd_v4_2_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
r = uvd_v4_2_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
|
@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle)
|
|||
amdgpu_ring_commit(ring);
|
||||
|
||||
done:
|
||||
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
|
||||
|
@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
|
||||
uvd_v4_2_stop(adev);
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v4_2_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
return 0;
|
||||
|
@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
uint32_t rb_bufsz;
|
||||
int i, j, r;
|
||||
u32 tmp;
|
||||
/* disable byte swapping */
|
||||
u32 lmi_swap_cntl = 0;
|
||||
u32 mp_swap_cntl = 0;
|
||||
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
/* set uvd busy */
|
||||
WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
|
||||
|
||||
uvd_v4_2_set_dcm(adev, true);
|
||||
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
/* Stall UMC and register bus before resetting VCPU */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
|
||||
mdelay(1);
|
||||
|
||||
/* put LMI, VCPU, RBC etc... into reset */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
|
||||
mdelay(5);
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
|
||||
/* take UVD block out of reset */
|
||||
WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
|
||||
mdelay(5);
|
||||
|
||||
/* initialize UVD memory controller */
|
||||
WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
|
||||
(1 << 21) | (1 << 9) | (1 << 20));
|
||||
/* enable VCPU clock */
|
||||
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
/* swap (8 in 32) RB and IB */
|
||||
|
@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
#endif
|
||||
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
|
||||
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
|
||||
/* initialize UVD memory controller */
|
||||
WREG32(mmUVD_LMI_CTRL, 0x203108);
|
||||
|
||||
tmp = RREG32(mmUVD_MPC_CNTL);
|
||||
WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
|
||||
|
||||
WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
|
||||
WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
|
||||
|
@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
WREG32(mmUVD_MPC_SET_ALU, 0);
|
||||
WREG32(mmUVD_MPC_SET_MUX, 0x88);
|
||||
|
||||
/* take all subblocks out of reset, except VCPU */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
mdelay(5);
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* enable VCPU clock */
|
||||
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
|
||||
tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
|
||||
WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
|
||||
|
||||
/* enable UMC */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
/* boot up the VCPU */
|
||||
WREG32(mmUVD_SOFT_RESET, 0);
|
||||
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
|
||||
|
||||
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
|
||||
|
||||
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
|
||||
mdelay(10);
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
|
@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
/* enable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
|
||||
|
||||
WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
|
||||
|
||||
/* force RBC into idle state */
|
||||
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
|
||||
|
||||
|
@ -393,22 +389,54 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void uvd_v4_2_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
/* force RBC into idle state */
|
||||
uint32_t i, j;
|
||||
uint32_t status;
|
||||
|
||||
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmUVD_STATUS);
|
||||
if (status & 2)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmUVD_LMI_STATUS);
|
||||
if (status & 0xf)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Stall UMC and register bus before resetting VCPU */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
|
||||
mdelay(1);
|
||||
|
||||
/* put VCPU into reset */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
mdelay(5);
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmUVD_LMI_STATUS);
|
||||
if (status & 0x240)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* disable VCPU clock */
|
||||
WREG32(mmUVD_VCPU_CNTL, 0x0);
|
||||
WREG32_P(0x3D49, 0, ~(1 << 2));
|
||||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
|
||||
|
||||
/* put LMI, VCPU, RBC etc... into reset */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
|
||||
|
||||
WREG32(mmUVD_STATUS, 0);
|
||||
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
}
|
||||
|
@ -694,8 +722,24 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
|||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
|
||||
if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4)) {
|
||||
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
|
||||
mdelay(20);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
|
||||
if (RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4) {
|
||||
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
|
||||
mdelay(30);
|
||||
}
|
||||
}
|
||||
return uvd_v4_2_start(adev);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
|
|||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
r = uvd_v5_0_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v5_0_enable_mgcg(adev, true);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
|
@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
|
|||
amdgpu_ring_write(ring, 3);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
done:
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
|
||||
uvd_v5_0_stop(adev);
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v5_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
return 0;
|
||||
|
@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
|
|||
|
||||
uvd_v5_0_mc_resume(adev);
|
||||
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v5_0_enable_mgcg(adev, true);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
|
@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
|
|||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
WREG32(mmUVD_STATUS, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v5_0_wait_for_idle(handle))
|
||||
|
@ -824,9 +823,6 @@ static int uvd_v5_0_set_powergating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v5_0_stop(adev);
|
||||
adev->uvd.is_powergated = true;
|
||||
|
|
|
@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
|
|||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
r = uvd_v6_0_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v6_0_enable_mgcg(adev, true);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
|
@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
|
||||
uvd_v6_0_stop(adev);
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
return 0;
|
||||
|
@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
|
|||
lmi_swap_cntl = 0;
|
||||
mp_swap_cntl = 0;
|
||||
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v6_0_enable_mgcg(adev, true);
|
||||
uvd_v6_0_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
|
@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
|
|||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
WREG32(mmUVD_STATUS, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v6_0_wait_for_idle(handle))
|
||||
|
@ -1049,9 +1047,6 @@ static int uvd_v6_0_set_powergating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
|
|
|
@ -42,10 +42,9 @@
|
|||
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
|
||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||
|
||||
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
|
||||
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int vce_v2_0_wait_for_idle(void *handle);
|
||||
|
||||
/**
|
||||
* vce_v2_0_ring_get_rptr - get read pointer
|
||||
*
|
||||
|
@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
|
||||
}
|
||||
|
||||
static void vce_v2_0_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_A);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
tmp |= 0x40000;
|
||||
WREG32(mmVCE_CLOCK_GATING_A, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0x10;
|
||||
tmp &= ~0x100000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
}
|
||||
|
||||
static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t addr = adev->vce.gpu_addr;
|
||||
uint32_t size;
|
||||
|
||||
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
|
||||
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
|
||||
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
|
||||
|
||||
WREG32(mmVCE_LMI_CTRL, 0x00398000);
|
||||
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
|
||||
WREG32(mmVCE_LMI_VM_CTRL, 0);
|
||||
|
||||
addr += AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||
size = VCE_V2_0_FW_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_STACK_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_DATA_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
|
||||
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
|
||||
}
|
||||
|
||||
static bool vce_v2_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int vce_v2_0_wait_for_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (vce_v2_0_is_idle(handle))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* vce_v2_0_start - start VCE block
|
||||
*
|
||||
|
@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
|
||||
vce_v2_0_mc_resume(adev);
|
||||
|
||||
/* set BUSY flag */
|
||||
WREG32_P(mmVCE_STATUS, 1, ~1);
|
||||
|
||||
vce_v2_0_init_cg(adev);
|
||||
vce_v2_0_disable_cg(adev);
|
||||
|
||||
vce_v2_0_mc_resume(adev);
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
WREG32(mmVCE_RB_RPTR, ring->wptr);
|
||||
WREG32(mmVCE_RB_WPTR, ring->wptr);
|
||||
|
@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vce_v2_0_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
int status;
|
||||
|
||||
if (vce_v2_0_lmi_clean(adev)) {
|
||||
DRM_INFO("vce is not idle \n");
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmVCE_FW_REG_STATUS);
|
||||
if (!(status & 1))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
*/
|
||||
if (vce_v2_0_wait_for_idle(adev)) {
|
||||
DRM_INFO("VCE is busy, Can't set clock gateing");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Stall UMC and register bus before resetting VCPU */
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmVCE_LMI_STATUS);
|
||||
if (status & 0x240)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
|
||||
|
||||
/* put LMI, VCPU, RBC etc... into reset */
|
||||
WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
|
||||
|
||||
WREG32(mmVCE_STATUS, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (gated) {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp &= ~0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
} else {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe7;
|
||||
tmp &= ~0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0x1fe000;
|
||||
tmp &= ~0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp |= 0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
u32 orig, tmp;
|
||||
|
||||
/* LMI_MC/LMI_UMC always set in dynamic,
|
||||
* set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
|
||||
*/
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp &= ~0x00060006;
|
||||
|
||||
/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
|
||||
if (gated) {
|
||||
tmp |= 0xe10000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
} else {
|
||||
tmp |= 0xe1;
|
||||
tmp &= ~0xe10000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
}
|
||||
|
||||
orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp &= ~0x1fe000;
|
||||
tmp &= ~0xff000000;
|
||||
if (tmp != orig)
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp &= ~0x3fc;
|
||||
if (tmp != orig)
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
|
||||
|
||||
if(gated)
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
}
|
||||
|
||||
static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
|
||||
bool sw_cg)
|
||||
{
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, true);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, true);
|
||||
} else {
|
||||
vce_v2_0_disable_cg(adev);
|
||||
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, false);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
static int vce_v2_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle)
|
|||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = vce_v2_0_start(adev);
|
||||
/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
|
||||
if (r)
|
||||
return 0;
|
||||
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
vce_v2_0_enable_mgcg(adev, true, false);
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
|
@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (gated) {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp &= ~0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
} else {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe7;
|
||||
tmp &= ~0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0x1fe000;
|
||||
tmp &= ~0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp |= 0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
if (vce_v2_0_wait_for_idle(adev)) {
|
||||
DRM_INFO("VCE is busy, Can't set clock gateing");
|
||||
return;
|
||||
}
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
|
||||
|
||||
if (vce_v2_0_lmi_clean(adev)) {
|
||||
DRM_INFO("LMI is busy, Can't set clock gateing");
|
||||
return;
|
||||
}
|
||||
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
|
||||
WREG32_P(mmVCE_SOFT_RESET,
|
||||
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
|
||||
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
|
||||
WREG32(mmVCE_STATUS, 0);
|
||||
|
||||
if (gated)
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
/* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
|
||||
if (gated) {
|
||||
/* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
|
||||
} else {
|
||||
/* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
|
||||
}
|
||||
|
||||
/* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
|
||||
|
||||
/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
|
||||
if(!gated) {
|
||||
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
|
||||
mdelay(100);
|
||||
WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
|
||||
|
||||
vce_v2_0_firmware_loaded(adev);
|
||||
WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
|
||||
}
|
||||
|
||||
static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
bool sw_cg = false;
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, true);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, true);
|
||||
} else {
|
||||
vce_v2_0_disable_cg(adev);
|
||||
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, false);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_A);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
tmp |= 0x40000;
|
||||
WREG32(mmVCE_CLOCK_GATING_A, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0x10;
|
||||
tmp &= ~0x100000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
}
|
||||
|
||||
static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t addr = adev->vce.gpu_addr;
|
||||
uint32_t size;
|
||||
|
||||
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
|
||||
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
|
||||
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
|
||||
|
||||
WREG32(mmVCE_LMI_CTRL, 0x00398000);
|
||||
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
|
||||
WREG32(mmVCE_LMI_VM_CTRL, 0);
|
||||
|
||||
addr += AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||
size = VCE_V2_0_FW_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_STACK_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_DATA_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
|
||||
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
|
||||
|
||||
vce_v2_0_init_cg(adev);
|
||||
}
|
||||
|
||||
static bool vce_v2_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int vce_v2_0_wait_for_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (vce_v2_0_is_idle(handle))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int vce_v2_0_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
else
|
||||
tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
|
||||
static int vce_v2_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
bool sw_cg = false;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
|
||||
vce_v2_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
if (state == AMD_CG_STATE_GATE) {
|
||||
gate = true;
|
||||
sw_cg = true;
|
||||
}
|
||||
|
||||
vce_v2_0_enable_mgcg(adev, gate);
|
||||
vce_v2_0_enable_mgcg(adev, gate, sw_cg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle,
|
|||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
/* XXX do we need a vce_v2_0_stop()? */
|
||||
return 0;
|
||||
return vce_v2_0_stop(adev);
|
||||
else
|
||||
return vce_v2_0_start(adev);
|
||||
}
|
||||
|
|
|
@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring;
|
||||
int idx, r;
|
||||
|
||||
vce_v3_0_override_vce_clock_gating(adev, true);
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
WREG32(mmVCE_RB_RPTR, ring->wptr);
|
||||
WREG32(mmVCE_RB_WPTR, ring->wptr);
|
||||
|
@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle)
|
|||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = vce_v3_0_start(adev);
|
||||
if (r)
|
||||
return r;
|
||||
vce_v3_0_override_vce_clock_gating(adev, true);
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
@ -766,12 +762,11 @@ static int vce_v3_0_set_powergating_state(void *handle,
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
ret = vce_v3_0_stop(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->vce.is_powergated = true;
|
||||
/* XXX do we need a vce_v3_0_stop()? */
|
||||
} else {
|
||||
ret = vce_v3_0_start(adev);
|
||||
if (ret)
|
||||
|
|
|
@ -1310,5 +1310,6 @@
|
|||
#define ixROM_SW_DATA_62 0xc060012c
|
||||
#define ixROM_SW_DATA_63 0xc0600130
|
||||
#define ixROM_SW_DATA_64 0xc0600134
|
||||
#define ixCURRENT_PG_STATUS 0xc020029c
|
||||
|
||||
#endif /* SMU_7_0_1_D_H */
|
||||
|
|
|
@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (cz_hwmgr->uvd_power_gated == bgate)
|
||||
return 0;
|
||||
|
||||
cz_hwmgr->uvd_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, true);
|
||||
cz_dpm_powerdown_uvd(hwmgr);
|
||||
} else {
|
||||
cz_dpm_powerup_uvd(hwmgr);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
|
@ -193,47 +190,34 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEPowerGating)) {
|
||||
if (cz_hwmgr->vce_power_gated != bgate) {
|
||||
if (bgate) {
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
cz_enable_disable_vce_dpm(hwmgr, false);
|
||||
cz_dpm_powerdown_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = true;
|
||||
} else {
|
||||
cz_dpm_powerup_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = false;
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
cz_enable_disable_vce_dpm(hwmgr, false);
|
||||
cz_dpm_powerdown_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = true;
|
||||
} else {
|
||||
cz_hwmgr->vce_power_gated = bgate;
|
||||
cz_dpm_powerup_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = false;
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, !bgate);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!cz_hwmgr->vce_power_gated)
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
data->uvd_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
smu7_update_uvd_dpm(hwmgr, true);
|
||||
smu7_powerdown_uvd(hwmgr);
|
||||
} else {
|
||||
smu7_powerup_uvd(hwmgr);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
smu7_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
|
@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (data->vce_power_gated == bgate)
|
||||
return 0;
|
||||
|
||||
data->vce_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
|
@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
smu7_powerdown_vce(hwmgr);
|
||||
} else {
|
||||
smu7_powerup_vce(hwmgr);
|
||||
smu7_update_vce_dpm(hwmgr, false);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
smu7_update_vce_dpm(hwmgr, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2624,6 +2624,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|||
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
|
||||
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
|
||||
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
|
||||
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_MANUAL:
|
||||
hwmgr->dpm_level = level;
|
||||
|
@ -2633,9 +2634,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|||
break;
|
||||
}
|
||||
|
||||
if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH))
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
else
|
||||
else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
|
||||
|
||||
return 0;
|
||||
|
@ -4397,16 +4398,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
|
|||
if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
|
||||
return -EINVAL;
|
||||
dep_sclk_table = table_info->vdd_dep_on_sclk;
|
||||
for (i = 0; i < dep_sclk_table->count; i++) {
|
||||
for (i = 0; i < dep_sclk_table->count; i++)
|
||||
clocks->clock[i] = dep_sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = dep_sclk_table->count;
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
for (i = 0; i < sclk_table->count; i++) {
|
||||
for (i = 0; i < sclk_table->count; i++)
|
||||
clocks->clock[i] = sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = sclk_table->count;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4440,14 +4439,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
|
|||
clocks->clock[i] = dep_mclk_table->entries[i].clk;
|
||||
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
|
||||
dep_mclk_table->entries[i].clk);
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = dep_mclk_table->count;
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
|
||||
for (i = 0; i < mclk_table->count; i++) {
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
clocks->clock[i] = mclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = mclk_table->count;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
|
|||
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
|
||||
|
||||
|
||||
|
|
|
@ -135,8 +135,7 @@ static int arcpgu_load(struct drm_device *drm)
|
|||
drm_kms_helper_poll_init(drm);
|
||||
|
||||
arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
|
||||
drm->mode_config.num_crtc,
|
||||
drm->mode_config.num_connector);
|
||||
drm->mode_config.num_connector);
|
||||
if (IS_ERR(arcpgu->fbdev)) {
|
||||
ret = PTR_ERR(arcpgu->fbdev);
|
||||
arcpgu->fbdev = NULL;
|
||||
|
|
|
@ -349,7 +349,7 @@ static int hdlcd_drm_bind(struct device *dev)
|
|||
drm_mode_config_reset(drm);
|
||||
drm_kms_helper_poll_init(drm);
|
||||
|
||||
hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
|
||||
hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
|
||||
drm->mode_config.num_connector);
|
||||
|
||||
if (IS_ERR(hdlcd->fbdev)) {
|
||||
|
|
|
@ -457,7 +457,7 @@ static int malidp_bind(struct device *dev)
|
|||
|
||||
drm_mode_config_reset(drm);
|
||||
|
||||
malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
|
||||
malidp->fbdev = drm_fbdev_cma_init(drm, 32,
|
||||
drm->mode_config.num_connector);
|
||||
|
||||
if (IS_ERR(malidp->fbdev)) {
|
||||
|
|
|
@ -137,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev)
|
|||
|
||||
drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, fbh, 1, 1);
|
||||
ret = drm_fb_helper_init(dev, fbh, 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize drm fb helper\n");
|
||||
goto err_fb_helper;
|
||||
|
|
|
@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
|
|||
return -ENOSPC;
|
||||
|
||||
mutex_lock(&priv->linear_lock);
|
||||
ret = drm_mm_insert_node(&priv->linear, node, size, align,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
ret = drm_mm_insert_node_generic(&priv->linear, node,
|
||||
size, align, 0, 0);
|
||||
mutex_unlock(&priv->linear_lock);
|
||||
if (ret) {
|
||||
kfree(node);
|
||||
|
|
|
@ -315,8 +315,7 @@ int ast_fbdev_init(struct drm_device *dev)
|
|||
|
||||
drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, &afbdev->helper,
|
||||
1, 1);
|
||||
ret = drm_fb_helper_init(dev, &afbdev->helper, 1);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
|
|
|
@ -647,7 +647,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
|
|||
platform_set_drvdata(pdev, dev);
|
||||
|
||||
dc->fbdev = drm_fbdev_cma_init(dev, 24,
|
||||
dev->mode_config.num_crtc,
|
||||
dev->mode_config.num_connector);
|
||||
if (IS_ERR(dc->fbdev))
|
||||
dc->fbdev = NULL;
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
|
||||
#include "bochs.h"
|
||||
|
||||
static int bochs_modeset = -1;
|
||||
module_param_named(modeset, bochs_modeset, int, 0444);
|
||||
MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
|
||||
|
||||
static bool enable_fbdev = true;
|
||||
module_param_named(fbdev, enable_fbdev, bool, 0444);
|
||||
MODULE_PARM_DESC(fbdev, "register fbdev device");
|
||||
|
@ -214,6 +218,12 @@ static struct pci_driver bochs_pci_driver = {
|
|||
|
||||
static int __init bochs_init(void)
|
||||
{
|
||||
if (vgacon_text_force() && bochs_modeset == -1)
|
||||
return -EINVAL;
|
||||
|
||||
if (bochs_modeset == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return drm_pci_init(&bochs_driver, &bochs_pci_driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -169,8 +169,7 @@ int bochs_fbdev_init(struct bochs_device *bochs)
|
|||
drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
|
||||
&bochs_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
|
||||
1, 1);
|
||||
ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -353,7 +353,7 @@
|
|||
#define REG_TTXNUMB 0x0116
|
||||
#define MSK_TTXNUMB_TTX_AFFCTRL_3_0 0xf0
|
||||
#define BIT_TTXNUMB_TTX_COM1_AT_SYNC_WAIT BIT(3)
|
||||
#define MSK_TTXNUMB_TTX_NUMBPS_2_0 0x07
|
||||
#define MSK_TTXNUMB_TTX_NUMBPS 0x07
|
||||
|
||||
/* TDM TX NUMSPISYM, default value: 0x04 */
|
||||
#define REG_TTXSPINUMS 0x0117
|
||||
|
@ -403,12 +403,16 @@
|
|||
|
||||
/* TDM RX Status 2nd, default value: 0x00 */
|
||||
#define REG_TRXSTA2 0x015c
|
||||
#define MSK_TDM_SYNCHRONIZED 0xc0
|
||||
#define VAL_TDM_SYNCHRONIZED 0x80
|
||||
|
||||
/* TDM RX INT Low, default value: 0x00 */
|
||||
#define REG_TRXINTL 0x0163
|
||||
|
||||
/* TDM RX INT High, default value: 0x00 */
|
||||
#define REG_TRXINTH 0x0164
|
||||
#define BIT_TDM_INTR_SYNC_DATA BIT(0)
|
||||
#define BIT_TDM_INTR_SYNC_WAIT BIT(1)
|
||||
|
||||
/* TDM RX INTMASK High, default value: 0x00 */
|
||||
#define REG_TRXINTMH 0x0166
|
||||
|
@ -429,12 +433,14 @@
|
|||
|
||||
/* HSIC Keeper, default value: 0x00 */
|
||||
#define REG_KEEPER 0x0181
|
||||
#define MSK_KEEPER_KEEPER_MODE_1_0 0x03
|
||||
#define MSK_KEEPER_MODE 0x03
|
||||
#define VAL_KEEPER_MODE_HOST 0
|
||||
#define VAL_KEEPER_MODE_DEVICE 2
|
||||
|
||||
/* HSIC Flow Control General, default value: 0x02 */
|
||||
#define REG_FCGC 0x0183
|
||||
#define BIT_FCGC_HSIC_FC_HOSTMODE BIT(1)
|
||||
#define BIT_FCGC_HSIC_FC_ENABLE BIT(0)
|
||||
#define BIT_FCGC_HSIC_HOSTMODE BIT(1)
|
||||
#define BIT_FCGC_HSIC_ENABLE BIT(0)
|
||||
|
||||
/* HSIC Flow Control CTR13, default value: 0xfc */
|
||||
#define REG_FCCTR13 0x0191
|
||||
|
@ -841,6 +847,8 @@
|
|||
#define MSK_MHL_DP_CTL7_DT_DRV_VBIAS_CASCTL 0xf0
|
||||
#define MSK_MHL_DP_CTL7_DT_DRV_IREF_CTL 0x0f
|
||||
|
||||
#define REG_MHL_DP_CTL8 0x0352
|
||||
|
||||
/* Tx Zone Ctl1, default value: 0x00 */
|
||||
#define REG_TX_ZONE_CTL1 0x0361
|
||||
#define VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE 0x08
|
||||
|
@ -1078,16 +1086,26 @@
|
|||
|
||||
/* TPI Info Frame Select, default value: 0x00 */
|
||||
#define REG_TPI_INFO_FSEL 0x06bf
|
||||
#define BIT_TPI_INFO_FSEL_TPI_INFO_EN BIT(7)
|
||||
#define BIT_TPI_INFO_FSEL_TPI_INFO_RPT BIT(6)
|
||||
#define BIT_TPI_INFO_FSEL_TPI_INFO_READ_FLAG BIT(5)
|
||||
#define MSK_TPI_INFO_FSEL_TPI_INFO_SEL 0x07
|
||||
#define BIT_TPI_INFO_FSEL_EN BIT(7)
|
||||
#define BIT_TPI_INFO_FSEL_RPT BIT(6)
|
||||
#define BIT_TPI_INFO_FSEL_READ_FLAG BIT(5)
|
||||
#define MSK_TPI_INFO_FSEL_PKT 0x07
|
||||
#define VAL_TPI_INFO_FSEL_AVI 0x00
|
||||
#define VAL_TPI_INFO_FSEL_SPD 0x01
|
||||
#define VAL_TPI_INFO_FSEL_AUD 0x02
|
||||
#define VAL_TPI_INFO_FSEL_MPG 0x03
|
||||
#define VAL_TPI_INFO_FSEL_GEN 0x04
|
||||
#define VAL_TPI_INFO_FSEL_GEN2 0x05
|
||||
#define VAL_TPI_INFO_FSEL_VSI 0x06
|
||||
|
||||
/* TPI Info Byte #0, default value: 0x00 */
|
||||
#define REG_TPI_INFO_B0 0x06c0
|
||||
|
||||
/* CoC Status, default value: 0x00 */
|
||||
#define REG_COC_STAT_0 0x0700
|
||||
#define BIT_COC_STAT_0_PLL_LOCKED BIT(7)
|
||||
#define MSK_COC_STAT_0_FSM_STATE 0x0f
|
||||
|
||||
#define REG_COC_STAT_1 0x0701
|
||||
#define REG_COC_STAT_2 0x0702
|
||||
#define REG_COC_STAT_3 0x0703
|
||||
|
@ -1282,14 +1300,14 @@
|
|||
|
||||
/* MDT Transmit Control, default value: 0x70 */
|
||||
#define REG_MDT_XMIT_CTRL 0x0588
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XMIT_EN BIT(7)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XMIT_CMD_MERGE_EN BIT(6)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_BURST_LEN BIT(5)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XMIT_FIXED_AID BIT(4)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XMIT_SINGLE_RUN_EN BIT(3)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_CLR_ABORT_WAIT BIT(2)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_ALL BIT(1)
|
||||
#define BIT_MDT_XMIT_CTRL_MDT_XFIFO_CLR_CUR BIT(0)
|
||||
#define BIT_MDT_XMIT_CTRL_EN BIT(7)
|
||||
#define BIT_MDT_XMIT_CTRL_CMD_MERGE_EN BIT(6)
|
||||
#define BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN BIT(5)
|
||||
#define BIT_MDT_XMIT_CTRL_FIXED_AID BIT(4)
|
||||
#define BIT_MDT_XMIT_CTRL_SINGLE_RUN_EN BIT(3)
|
||||
#define BIT_MDT_XMIT_CTRL_CLR_ABORT_WAIT BIT(2)
|
||||
#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_ALL BIT(1)
|
||||
#define BIT_MDT_XMIT_CTRL_XFIFO_CLR_CUR BIT(0)
|
||||
|
||||
/* MDT Receive WRITE Port, default value: 0x00 */
|
||||
#define REG_MDT_XMIT_WRITE_PORT 0x0589
|
||||
|
|
|
@ -289,7 +289,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
|
|||
&cirrus_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
|
||||
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
|
||||
CIRRUSFB_CONN_LIMIT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -307,9 +307,8 @@ static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
|
|||
* @state: the CRTC whose incoming state to update
|
||||
* @mode: kernel-internal mode to use for the CRTC, or NULL to disable
|
||||
*
|
||||
* Set a mode (originating from the kernel) on the desired CRTC state. Does
|
||||
* not change any other state properties, including enable, active, or
|
||||
* mode_changed.
|
||||
* Set a mode (originating from the kernel) on the desired CRTC state and update
|
||||
* the enable property.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero on success, error code on failure. Cannot return -EDEADLK.
|
||||
|
|
|
@ -369,7 +369,7 @@ mode_fixup(struct drm_atomic_state *state)
|
|||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
int i;
|
||||
bool ret;
|
||||
int ret;
|
||||
|
||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
if (!crtc_state->mode_changed &&
|
||||
|
|
|
@ -87,6 +87,30 @@
|
|||
* "GAMMA_LUT" property above.
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_color_lut_extract - clamp and round LUT entries
|
||||
* @user_input: input value
|
||||
* @bit_precision: number of bits the hw LUT supports
|
||||
*
|
||||
* Extract a degamma/gamma LUT value provided by user (in the form of
|
||||
* &drm_color_lut entries) and round it to the precision supported by the
|
||||
* hardware.
|
||||
*/
|
||||
uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
|
||||
{
|
||||
uint32_t val = user_input;
|
||||
uint32_t max = 0xffff >> (16 - bit_precision);
|
||||
|
||||
/* Round only if we're not using full precision. */
|
||||
if (bit_precision < 16) {
|
||||
val += 1UL << (16 - bit_precision - 1);
|
||||
val >>= 16 - bit_precision;
|
||||
}
|
||||
|
||||
return clamp_val(val, 0, max);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_color_lut_extract);
|
||||
|
||||
/**
|
||||
* drm_crtc_enable_color_mgmt - enable color management properties
|
||||
* @crtc: DRM CRTC
|
||||
|
|
|
@ -207,3 +207,6 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev,
|
|||
void *data, struct drm_file *file_priv);
|
||||
int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv);
|
||||
|
||||
/* drm_edid.c */
|
||||
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
|
||||
|
|
|
@ -465,7 +465,10 @@ static void drm_fs_inode_free(struct inode *inode)
|
|||
* that do embed &struct drm_device it must be placed first in the overall
|
||||
* structure, and the overall structure must be allocated using kmalloc(): The
|
||||
* drm core's release function unconditionally calls kfree() on the @dev pointer
|
||||
* when the final reference is released.
|
||||
* when the final reference is released. To override this behaviour, and so
|
||||
* allow embedding of the drm_device inside the driver's device struct at an
|
||||
* arbitrary offset, you must supply a &drm_driver.release callback and control
|
||||
* the finalization explicitly.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, or error code on failure.
|
||||
|
@ -552,6 +555,41 @@ err_free:
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dev_init);
|
||||
|
||||
/**
|
||||
* drm_dev_fini - Finalize a dead DRM device
|
||||
* @dev: DRM device
|
||||
*
|
||||
* Finalize a dead DRM device. This is the converse to drm_dev_init() and
|
||||
* frees up all data allocated by it. All driver private data should be
|
||||
* finalized first. Note that this function does not free the @dev, that is
|
||||
* left to the caller.
|
||||
*
|
||||
* The ref-count of @dev must be zero, and drm_dev_fini() should only be called
|
||||
* from a &drm_driver.release callback.
|
||||
*/
|
||||
void drm_dev_fini(struct drm_device *dev)
|
||||
{
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_GEM))
|
||||
drm_gem_destroy(dev);
|
||||
|
||||
drm_legacy_ctxbitmap_cleanup(dev);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
|
||||
drm_minor_free(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_free(dev, DRM_MINOR_RENDER);
|
||||
drm_minor_free(dev, DRM_MINOR_CONTROL);
|
||||
|
||||
mutex_destroy(&dev->master_mutex);
|
||||
mutex_destroy(&dev->ctxlist_mutex);
|
||||
mutex_destroy(&dev->filelist_mutex);
|
||||
mutex_destroy(&dev->struct_mutex);
|
||||
kfree(dev->unique);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_fini);
|
||||
|
||||
/**
|
||||
* drm_dev_alloc - Allocate new DRM device
|
||||
* @driver: DRM driver to allocate device for
|
||||
|
@ -598,25 +636,12 @@ static void drm_dev_release(struct kref *ref)
|
|||
{
|
||||
struct drm_device *dev = container_of(ref, struct drm_device, ref);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_GEM))
|
||||
drm_gem_destroy(dev);
|
||||
|
||||
drm_legacy_ctxbitmap_cleanup(dev);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
|
||||
drm_minor_free(dev, DRM_MINOR_PRIMARY);
|
||||
drm_minor_free(dev, DRM_MINOR_RENDER);
|
||||
drm_minor_free(dev, DRM_MINOR_CONTROL);
|
||||
|
||||
mutex_destroy(&dev->master_mutex);
|
||||
mutex_destroy(&dev->ctxlist_mutex);
|
||||
mutex_destroy(&dev->filelist_mutex);
|
||||
mutex_destroy(&dev->struct_mutex);
|
||||
kfree(dev->unique);
|
||||
kfree(dev);
|
||||
if (dev->driver->release) {
|
||||
dev->driver->release(dev);
|
||||
} else {
|
||||
drm_dev_fini(dev);
|
||||
kfree(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_displayid.h>
|
||||
|
||||
#include "drm_crtc_internal.h"
|
||||
|
||||
#define version_greater(edid, maj, min) \
|
||||
(((edid)->version > (maj)) || \
|
||||
((edid)->version == (maj) && (edid)->revision > (min)))
|
||||
|
@ -2153,7 +2155,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
|||
/* fix up 1366x768 mode from 1368x768;
|
||||
* GFT/CVT can't express 1366 width which isn't dividable by 8
|
||||
*/
|
||||
static void fixup_mode_1366x768(struct drm_display_mode *mode)
|
||||
void drm_mode_fixup_1366x768(struct drm_display_mode *mode)
|
||||
{
|
||||
if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
|
||||
mode->hdisplay = 1366;
|
||||
|
@ -2177,7 +2179,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
|||
if (!newmode)
|
||||
return modes;
|
||||
|
||||
fixup_mode_1366x768(newmode);
|
||||
drm_mode_fixup_1366x768(newmode);
|
||||
if (!mode_in_range(newmode, edid, timing) ||
|
||||
!valid_inferred_mode(connector, newmode)) {
|
||||
drm_mode_destroy(dev, newmode);
|
||||
|
@ -2206,7 +2208,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
|
|||
if (!newmode)
|
||||
return modes;
|
||||
|
||||
fixup_mode_1366x768(newmode);
|
||||
drm_mode_fixup_1366x768(newmode);
|
||||
if (!mode_in_range(newmode, edid, timing) ||
|
||||
!valid_inferred_mode(connector, newmode)) {
|
||||
drm_mode_destroy(dev, newmode);
|
||||
|
|
|
@ -489,15 +489,14 @@ static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
|
|||
* drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device
|
||||
* @num_crtc: Number of CRTCs
|
||||
* @max_conn_count: Maximum number of connectors
|
||||
* @funcs: fb helper functions, in particular a custom dirty() callback
|
||||
*
|
||||
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
|
||||
*/
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
|
||||
unsigned int preferred_bpp, unsigned int num_crtc,
|
||||
unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs)
|
||||
unsigned int preferred_bpp, unsigned int max_conn_count,
|
||||
const struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
struct drm_fbdev_cma *fbdev_cma;
|
||||
struct drm_fb_helper *helper;
|
||||
|
@ -514,7 +513,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
|
|||
|
||||
drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
|
||||
ret = drm_fb_helper_init(dev, helper, max_conn_count);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
|
||||
goto err_free;
|
||||
|
@ -554,11 +553,11 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
|
|||
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
|
||||
*/
|
||||
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
||||
unsigned int preferred_bpp, unsigned int num_crtc,
|
||||
unsigned int max_conn_count)
|
||||
unsigned int preferred_bpp, unsigned int max_conn_count)
|
||||
{
|
||||
return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
|
||||
max_conn_count, &drm_fb_cma_funcs);
|
||||
return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
|
||||
max_conn_count,
|
||||
&drm_fb_cma_funcs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
|
||||
|
||||
|
|
|
@ -712,7 +712,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
|
|||
* drm_fb_helper_init - initialize a drm_fb_helper structure
|
||||
* @dev: drm device
|
||||
* @fb_helper: driver-allocated fbdev helper structure to initialize
|
||||
* @crtc_count: maximum number of crtcs to support in this fbdev emulation
|
||||
* @max_conn_count: max connector count
|
||||
*
|
||||
* This allocates the structures for the fbdev helper with the given limits.
|
||||
|
@ -727,9 +726,10 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
|
|||
*/
|
||||
int drm_fb_helper_init(struct drm_device *dev,
|
||||
struct drm_fb_helper *fb_helper,
|
||||
int crtc_count, int max_conn_count)
|
||||
int max_conn_count)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
int i;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
|
@ -738,11 +738,11 @@ int drm_fb_helper_init(struct drm_device *dev,
|
|||
if (!max_conn_count)
|
||||
return -EINVAL;
|
||||
|
||||
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
|
||||
fb_helper->crtc_info = kcalloc(config->num_crtc, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
|
||||
if (!fb_helper->crtc_info)
|
||||
return -ENOMEM;
|
||||
|
||||
fb_helper->crtc_count = crtc_count;
|
||||
fb_helper->crtc_count = config->num_crtc;
|
||||
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
|
||||
if (!fb_helper->connector_info) {
|
||||
kfree(fb_helper->crtc_info);
|
||||
|
@ -751,7 +751,7 @@ int drm_fb_helper_init(struct drm_device *dev,
|
|||
fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
|
||||
fb_helper->connector_count = 0;
|
||||
|
||||
for (i = 0; i < crtc_count; i++) {
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
fb_helper->crtc_info[i].mode_set.connectors =
|
||||
kcalloc(max_conn_count,
|
||||
sizeof(struct drm_connector *),
|
||||
|
@ -860,6 +860,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
|
|||
if (!drm_fbdev_emulation)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&fb_helper->resume_work);
|
||||
cancel_work_sync(&fb_helper->dirty_work);
|
||||
|
||||
mutex_lock(&kernel_fb_helper_lock);
|
||||
if (!list_empty(&fb_helper->kernel_fb_list)) {
|
||||
list_del(&fb_helper->kernel_fb_list);
|
||||
|
|
|
@ -97,14 +97,6 @@
|
|||
* locking would be fully redundant.
|
||||
*/
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
u64 size,
|
||||
u64 alignment,
|
||||
unsigned long color,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags);
|
||||
|
||||
#ifdef CONFIG_DRM_DEBUG_MM
|
||||
#include <linux/stackdepot.h>
|
||||
|
||||
|
@ -226,69 +218,151 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
|
|||
&drm_mm_interval_tree_augment);
|
||||
}
|
||||
|
||||
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
struct drm_mm_node *node,
|
||||
u64 size, u64 alignment,
|
||||
unsigned long color,
|
||||
u64 range_start, u64 range_end,
|
||||
enum drm_mm_allocator_flags flags)
|
||||
#define RB_INSERT(root, member, expr) do { \
|
||||
struct rb_node **link = &root.rb_node, *rb = NULL; \
|
||||
u64 x = expr(node); \
|
||||
while (*link) { \
|
||||
rb = *link; \
|
||||
if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
|
||||
link = &rb->rb_left; \
|
||||
else \
|
||||
link = &rb->rb_right; \
|
||||
} \
|
||||
rb_link_node(&node->member, rb, link); \
|
||||
rb_insert_color(&node->member, &root); \
|
||||
} while (0)
|
||||
|
||||
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
|
||||
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
|
||||
|
||||
static void add_hole(struct drm_mm_node *node)
|
||||
{
|
||||
struct drm_mm *mm = hole_node->mm;
|
||||
u64 hole_start = drm_mm_hole_node_start(hole_node);
|
||||
u64 hole_end = drm_mm_hole_node_end(hole_node);
|
||||
u64 adj_start = hole_start;
|
||||
u64 adj_end = hole_end;
|
||||
struct drm_mm *mm = node->mm;
|
||||
|
||||
DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
|
||||
node->hole_size =
|
||||
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
|
||||
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
|
||||
|
||||
if (mm->color_adjust)
|
||||
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
|
||||
RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
|
||||
RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
|
||||
|
||||
adj_start = max(adj_start, range_start);
|
||||
adj_end = min(adj_end, range_end);
|
||||
list_add(&node->hole_stack, &mm->hole_stack);
|
||||
}
|
||||
|
||||
if (flags & DRM_MM_CREATE_TOP)
|
||||
adj_start = adj_end - size;
|
||||
static void rm_hole(struct drm_mm_node *node)
|
||||
{
|
||||
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
|
||||
|
||||
if (alignment) {
|
||||
u64 rem;
|
||||
list_del(&node->hole_stack);
|
||||
rb_erase(&node->rb_hole_size, &node->mm->holes_size);
|
||||
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
|
||||
node->hole_size = 0;
|
||||
|
||||
div64_u64_rem(adj_start, alignment, &rem);
|
||||
if (rem) {
|
||||
if (flags & DRM_MM_CREATE_TOP)
|
||||
adj_start -= rem;
|
||||
else
|
||||
adj_start += alignment - rem;
|
||||
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
|
||||
}
|
||||
|
||||
static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
|
||||
{
|
||||
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
|
||||
}
|
||||
|
||||
static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
|
||||
{
|
||||
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
|
||||
}
|
||||
|
||||
static inline u64 rb_hole_size(struct rb_node *rb)
|
||||
{
|
||||
return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
|
||||
}
|
||||
|
||||
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
|
||||
{
|
||||
struct rb_node *best = NULL;
|
||||
struct rb_node **link = &mm->holes_size.rb_node;
|
||||
|
||||
while (*link) {
|
||||
struct rb_node *rb = *link;
|
||||
|
||||
if (size <= rb_hole_size(rb)) {
|
||||
link = &rb->rb_left;
|
||||
best = rb;
|
||||
} else {
|
||||
link = &rb->rb_right;
|
||||
}
|
||||
}
|
||||
|
||||
if (adj_start == hole_start) {
|
||||
hole_node->hole_follows = 0;
|
||||
list_del(&hole_node->hole_stack);
|
||||
return rb_hole_size_to_node(best);
|
||||
}
|
||||
|
||||
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
|
||||
{
|
||||
struct drm_mm_node *node = NULL;
|
||||
struct rb_node **link = &mm->holes_addr.rb_node;
|
||||
|
||||
while (*link) {
|
||||
u64 hole_start;
|
||||
|
||||
node = rb_hole_addr_to_node(*link);
|
||||
hole_start = __drm_mm_hole_node_start(node);
|
||||
|
||||
if (addr < hole_start)
|
||||
link = &node->rb_hole_addr.rb_left;
|
||||
else if (addr > hole_start + node->hole_size)
|
||||
link = &node->rb_hole_addr.rb_right;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
node->start = adj_start;
|
||||
node->size = size;
|
||||
node->mm = mm;
|
||||
node->color = color;
|
||||
node->allocated = 1;
|
||||
return node;
|
||||
}
|
||||
|
||||
list_add(&node->node_list, &hole_node->node_list);
|
||||
static struct drm_mm_node *
|
||||
first_hole(struct drm_mm *mm,
|
||||
u64 start, u64 end, u64 size,
|
||||
enum drm_mm_insert_mode mode)
|
||||
{
|
||||
if (RB_EMPTY_ROOT(&mm->holes_size))
|
||||
return NULL;
|
||||
|
||||
drm_mm_interval_tree_add_node(hole_node, node);
|
||||
switch (mode) {
|
||||
default:
|
||||
case DRM_MM_INSERT_BEST:
|
||||
return best_hole(mm, size);
|
||||
|
||||
DRM_MM_BUG_ON(node->start < range_start);
|
||||
DRM_MM_BUG_ON(node->start < adj_start);
|
||||
DRM_MM_BUG_ON(node->start + node->size > adj_end);
|
||||
DRM_MM_BUG_ON(node->start + node->size > range_end);
|
||||
case DRM_MM_INSERT_LOW:
|
||||
return find_hole(mm, start);
|
||||
|
||||
node->hole_follows = 0;
|
||||
if (__drm_mm_hole_node_start(node) < hole_end) {
|
||||
list_add(&node->hole_stack, &mm->hole_stack);
|
||||
node->hole_follows = 1;
|
||||
case DRM_MM_INSERT_HIGH:
|
||||
return find_hole(mm, end);
|
||||
|
||||
case DRM_MM_INSERT_EVICT:
|
||||
return list_first_entry_or_null(&mm->hole_stack,
|
||||
struct drm_mm_node,
|
||||
hole_stack);
|
||||
}
|
||||
}
|
||||
|
||||
save_stack(node);
|
||||
static struct drm_mm_node *
|
||||
next_hole(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
enum drm_mm_insert_mode mode)
|
||||
{
|
||||
switch (mode) {
|
||||
default:
|
||||
case DRM_MM_INSERT_BEST:
|
||||
return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
|
||||
|
||||
case DRM_MM_INSERT_LOW:
|
||||
return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
|
||||
|
||||
case DRM_MM_INSERT_HIGH:
|
||||
return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
|
||||
|
||||
case DRM_MM_INSERT_EVICT:
|
||||
node = list_next_entry(node, hole_stack);
|
||||
return &node->hole_stack == &mm->hole_stack ? NULL : node;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -317,21 +391,12 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
|||
return -ENOSPC;
|
||||
|
||||
/* Find the relevant hole to add our node to */
|
||||
hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
|
||||
node->start, ~(u64)0);
|
||||
if (hole) {
|
||||
if (hole->start < end)
|
||||
return -ENOSPC;
|
||||
} else {
|
||||
hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
|
||||
}
|
||||
|
||||
hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
|
||||
if (!drm_mm_hole_follows(hole))
|
||||
hole = find_hole(mm, node->start);
|
||||
if (!hole)
|
||||
return -ENOSPC;
|
||||
|
||||
adj_start = hole_start = __drm_mm_hole_node_start(hole);
|
||||
adj_end = hole_end = __drm_mm_hole_node_end(hole);
|
||||
adj_end = hole_end = hole_start + hole->hole_size;
|
||||
|
||||
if (mm->color_adjust)
|
||||
mm->color_adjust(hole, node->color, &adj_start, &adj_end);
|
||||
|
@ -340,70 +405,130 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
|||
return -ENOSPC;
|
||||
|
||||
node->mm = mm;
|
||||
node->allocated = 1;
|
||||
|
||||
list_add(&node->node_list, &hole->node_list);
|
||||
|
||||
drm_mm_interval_tree_add_node(hole, node);
|
||||
node->allocated = true;
|
||||
node->hole_size = 0;
|
||||
|
||||
if (node->start == hole_start) {
|
||||
hole->hole_follows = 0;
|
||||
list_del(&hole->hole_stack);
|
||||
}
|
||||
|
||||
node->hole_follows = 0;
|
||||
if (end != hole_end) {
|
||||
list_add(&node->hole_stack, &mm->hole_stack);
|
||||
node->hole_follows = 1;
|
||||
}
|
||||
rm_hole(hole);
|
||||
if (node->start > hole_start)
|
||||
add_hole(hole);
|
||||
if (end < hole_end)
|
||||
add_hole(node);
|
||||
|
||||
save_stack(node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_reserve_node);
|
||||
|
||||
/**
|
||||
* drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
|
||||
* drm_mm_insert_node_in_range - ranged search for space and insert @node
|
||||
* @mm: drm_mm to allocate from
|
||||
* @node: preallocate node to insert
|
||||
* @size: size of the allocation
|
||||
* @alignment: alignment of the allocation
|
||||
* @color: opaque tag value to use for this node
|
||||
* @start: start of the allowed range for this node
|
||||
* @end: end of the allowed range for this node
|
||||
* @sflags: flags to fine-tune the allocation search
|
||||
* @aflags: flags to fine-tune the allocation behavior
|
||||
* @range_start: start of the allowed range for this node
|
||||
* @range_end: end of the allowed range for this node
|
||||
* @mode: fine-tune the allocation search and placement
|
||||
*
|
||||
* The preallocated @node must be cleared to 0.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
|
||||
u64 size, u64 alignment,
|
||||
unsigned long color,
|
||||
u64 start, u64 end,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags)
|
||||
int drm_mm_insert_node_in_range(struct drm_mm * const mm,
|
||||
struct drm_mm_node * const node,
|
||||
u64 size, u64 alignment,
|
||||
unsigned long color,
|
||||
u64 range_start, u64 range_end,
|
||||
enum drm_mm_insert_mode mode)
|
||||
{
|
||||
struct drm_mm_node *hole_node;
|
||||
struct drm_mm_node *hole;
|
||||
u64 remainder_mask;
|
||||
|
||||
if (WARN_ON(size == 0))
|
||||
return -EINVAL;
|
||||
DRM_MM_BUG_ON(range_start >= range_end);
|
||||
|
||||
hole_node = drm_mm_search_free_in_range_generic(mm,
|
||||
size, alignment, color,
|
||||
start, end, sflags);
|
||||
if (!hole_node)
|
||||
if (unlikely(size == 0 || range_end - range_start < size))
|
||||
return -ENOSPC;
|
||||
|
||||
drm_mm_insert_helper(hole_node, node,
|
||||
size, alignment, color,
|
||||
start, end, aflags);
|
||||
return 0;
|
||||
if (alignment <= 1)
|
||||
alignment = 0;
|
||||
|
||||
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
|
||||
for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
|
||||
hole = next_hole(mm, hole, mode)) {
|
||||
u64 hole_start = __drm_mm_hole_node_start(hole);
|
||||
u64 hole_end = hole_start + hole->hole_size;
|
||||
u64 adj_start, adj_end;
|
||||
u64 col_start, col_end;
|
||||
|
||||
if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
|
||||
break;
|
||||
|
||||
if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
|
||||
break;
|
||||
|
||||
col_start = hole_start;
|
||||
col_end = hole_end;
|
||||
if (mm->color_adjust)
|
||||
mm->color_adjust(hole, color, &col_start, &col_end);
|
||||
|
||||
adj_start = max(col_start, range_start);
|
||||
adj_end = min(col_end, range_end);
|
||||
|
||||
if (adj_end <= adj_start || adj_end - adj_start < size)
|
||||
continue;
|
||||
|
||||
if (mode == DRM_MM_INSERT_HIGH)
|
||||
adj_start = adj_end - size;
|
||||
|
||||
if (alignment) {
|
||||
u64 rem;
|
||||
|
||||
if (likely(remainder_mask))
|
||||
rem = adj_start & remainder_mask;
|
||||
else
|
||||
div64_u64_rem(adj_start, alignment, &rem);
|
||||
if (rem) {
|
||||
adj_start -= rem;
|
||||
if (mode != DRM_MM_INSERT_HIGH)
|
||||
adj_start += alignment;
|
||||
|
||||
if (adj_start < max(col_start, range_start) ||
|
||||
min(col_end, range_end) - adj_start < size)
|
||||
continue;
|
||||
|
||||
if (adj_end <= adj_start ||
|
||||
adj_end - adj_start < size)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
node->mm = mm;
|
||||
node->size = size;
|
||||
node->start = adj_start;
|
||||
node->color = color;
|
||||
node->hole_size = 0;
|
||||
|
||||
list_add(&node->node_list, &hole->node_list);
|
||||
drm_mm_interval_tree_add_node(hole, node);
|
||||
node->allocated = true;
|
||||
|
||||
rm_hole(hole);
|
||||
if (adj_start > hole_start)
|
||||
add_hole(hole);
|
||||
if (adj_start + size < hole_end)
|
||||
add_hole(node);
|
||||
|
||||
save_stack(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
|
||||
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
|
||||
|
||||
/**
|
||||
* drm_mm_remove_node - Remove a memory node from the allocator.
|
||||
|
@ -421,93 +546,21 @@ void drm_mm_remove_node(struct drm_mm_node *node)
|
|||
DRM_MM_BUG_ON(!node->allocated);
|
||||
DRM_MM_BUG_ON(node->scanned_block);
|
||||
|
||||
prev_node =
|
||||
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
|
||||
prev_node = list_prev_entry(node, node_list);
|
||||
|
||||
if (drm_mm_hole_follows(node)) {
|
||||
DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
|
||||
__drm_mm_hole_node_end(node));
|
||||
list_del(&node->hole_stack);
|
||||
} else {
|
||||
DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
|
||||
__drm_mm_hole_node_end(node));
|
||||
}
|
||||
|
||||
if (!drm_mm_hole_follows(prev_node)) {
|
||||
prev_node->hole_follows = 1;
|
||||
list_add(&prev_node->hole_stack, &mm->hole_stack);
|
||||
} else
|
||||
list_move(&prev_node->hole_stack, &mm->hole_stack);
|
||||
if (drm_mm_hole_follows(node))
|
||||
rm_hole(node);
|
||||
|
||||
drm_mm_interval_tree_remove(node, &mm->interval_tree);
|
||||
list_del(&node->node_list);
|
||||
node->allocated = 0;
|
||||
node->allocated = false;
|
||||
|
||||
if (drm_mm_hole_follows(prev_node))
|
||||
rm_hole(prev_node);
|
||||
add_hole(prev_node);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_remove_node);
|
||||
|
||||
static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
|
||||
{
|
||||
if (end - start < size)
|
||||
return 0;
|
||||
|
||||
if (alignment) {
|
||||
u64 rem;
|
||||
|
||||
div64_u64_rem(start, alignment, &rem);
|
||||
if (rem)
|
||||
start += alignment - rem;
|
||||
}
|
||||
|
||||
return end >= start + size;
|
||||
}
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
u64 size,
|
||||
u64 alignment,
|
||||
unsigned long color,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
u64 adj_start;
|
||||
u64 adj_end;
|
||||
u64 best_size;
|
||||
|
||||
DRM_MM_BUG_ON(mm->scan_active);
|
||||
|
||||
best = NULL;
|
||||
best_size = ~0UL;
|
||||
|
||||
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
|
||||
flags & DRM_MM_SEARCH_BELOW) {
|
||||
u64 hole_size = adj_end - adj_start;
|
||||
|
||||
if (mm->color_adjust) {
|
||||
mm->color_adjust(entry, color, &adj_start, &adj_end);
|
||||
if (adj_end <= adj_start)
|
||||
continue;
|
||||
}
|
||||
|
||||
adj_start = max(adj_start, start);
|
||||
adj_end = min(adj_end, end);
|
||||
|
||||
if (!check_free_hole(adj_start, adj_end, size, alignment))
|
||||
continue;
|
||||
|
||||
if (!(flags & DRM_MM_SEARCH_BEST))
|
||||
return entry;
|
||||
|
||||
if (hole_size < best_size) {
|
||||
best = entry;
|
||||
best_size = hole_size;
|
||||
}
|
||||
}
|
||||
|
||||
return best;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mm_replace_node - move an allocation from @old to @new
|
||||
* @old: drm_mm_node to remove from the allocator
|
||||
|
@ -521,18 +574,23 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
|
|||
{
|
||||
DRM_MM_BUG_ON(!old->allocated);
|
||||
|
||||
list_replace(&old->node_list, &new->node_list);
|
||||
list_replace(&old->hole_stack, &new->hole_stack);
|
||||
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
|
||||
new->hole_follows = old->hole_follows;
|
||||
new->mm = old->mm;
|
||||
new->start = old->start;
|
||||
new->size = old->size;
|
||||
new->color = old->color;
|
||||
new->__subtree_last = old->__subtree_last;
|
||||
*new = *old;
|
||||
|
||||
old->allocated = 0;
|
||||
new->allocated = 1;
|
||||
list_replace(&old->node_list, &new->node_list);
|
||||
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
|
||||
|
||||
if (drm_mm_hole_follows(old)) {
|
||||
list_replace(&old->hole_stack, &new->hole_stack);
|
||||
rb_replace_node(&old->rb_hole_size,
|
||||
&new->rb_hole_size,
|
||||
&old->mm->holes_size);
|
||||
rb_replace_node(&old->rb_hole_addr,
|
||||
&new->rb_hole_addr,
|
||||
&old->mm->holes_addr);
|
||||
}
|
||||
|
||||
old->allocated = false;
|
||||
new->allocated = true;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_replace_node);
|
||||
|
||||
|
@ -577,7 +635,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
|
|||
* @color: opaque tag value to use for the allocation
|
||||
* @start: start of the allowed range for the allocation
|
||||
* @end: end of the allowed range for the allocation
|
||||
* @flags: flags to specify how the allocation will be performed afterwards
|
||||
* @mode: fine-tune the allocation search and placement
|
||||
*
|
||||
* This simply sets up the scanning routines with the parameters for the desired
|
||||
* hole.
|
||||
|
@ -593,7 +651,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
|
|||
unsigned long color,
|
||||
u64 start,
|
||||
u64 end,
|
||||
unsigned int flags)
|
||||
enum drm_mm_insert_mode mode)
|
||||
{
|
||||
DRM_MM_BUG_ON(start >= end);
|
||||
DRM_MM_BUG_ON(!size || size > end - start);
|
||||
|
@ -608,7 +666,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
|
|||
scan->alignment = alignment;
|
||||
scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
|
||||
scan->size = size;
|
||||
scan->flags = flags;
|
||||
scan->mode = mode;
|
||||
|
||||
DRM_MM_BUG_ON(end <= start);
|
||||
scan->range_start = start;
|
||||
|
@ -667,7 +725,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
|
|||
if (adj_end <= adj_start || adj_end - adj_start < scan->size)
|
||||
return false;
|
||||
|
||||
if (scan->flags == DRM_MM_CREATE_TOP)
|
||||
if (scan->mode == DRM_MM_INSERT_HIGH)
|
||||
adj_start = adj_end - scan->size;
|
||||
|
||||
if (scan->alignment) {
|
||||
|
@ -679,7 +737,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
|
|||
div64_u64_rem(adj_start, scan->alignment, &rem);
|
||||
if (rem) {
|
||||
adj_start -= rem;
|
||||
if (scan->flags != DRM_MM_CREATE_TOP)
|
||||
if (scan->mode != DRM_MM_INSERT_HIGH)
|
||||
adj_start += scan->alignment;
|
||||
if (adj_start < max(col_start, scan->range_start) ||
|
||||
min(col_end, scan->range_end) - adj_start < scan->size)
|
||||
|
@ -775,7 +833,7 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
|
|||
|
||||
hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
|
||||
hole_start = __drm_mm_hole_node_start(hole);
|
||||
hole_end = __drm_mm_hole_node_end(hole);
|
||||
hole_end = hole_start + hole->hole_size;
|
||||
|
||||
DRM_MM_BUG_ON(hole_start > scan->hit_start);
|
||||
DRM_MM_BUG_ON(hole_end < scan->hit_end);
|
||||
|
@ -802,21 +860,22 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
|
|||
{
|
||||
DRM_MM_BUG_ON(start + size <= start);
|
||||
|
||||
mm->color_adjust = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&mm->hole_stack);
|
||||
mm->scan_active = 0;
|
||||
mm->interval_tree = RB_ROOT;
|
||||
mm->holes_size = RB_ROOT;
|
||||
mm->holes_addr = RB_ROOT;
|
||||
|
||||
/* Clever trick to avoid a special case in the free hole tracking. */
|
||||
INIT_LIST_HEAD(&mm->head_node.node_list);
|
||||
mm->head_node.allocated = 0;
|
||||
mm->head_node.hole_follows = 1;
|
||||
mm->head_node.allocated = false;
|
||||
mm->head_node.mm = mm;
|
||||
mm->head_node.start = start + size;
|
||||
mm->head_node.size = start - mm->head_node.start;
|
||||
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
|
||||
mm->head_node.size = -size;
|
||||
add_hole(&mm->head_node);
|
||||
|
||||
mm->interval_tree = RB_ROOT;
|
||||
|
||||
mm->color_adjust = NULL;
|
||||
mm->scan_active = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_init);
|
||||
|
||||
|
@ -837,20 +896,17 @@ EXPORT_SYMBOL(drm_mm_takedown);
|
|||
|
||||
static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
|
||||
{
|
||||
u64 hole_start, hole_end, hole_size;
|
||||
u64 start, size;
|
||||
|
||||
if (entry->hole_follows) {
|
||||
hole_start = drm_mm_hole_node_start(entry);
|
||||
hole_end = drm_mm_hole_node_end(entry);
|
||||
hole_size = hole_end - hole_start;
|
||||
drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
|
||||
hole_end, hole_size);
|
||||
return hole_size;
|
||||
size = entry->hole_size;
|
||||
if (size) {
|
||||
start = drm_mm_hole_node_start(entry);
|
||||
drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
|
||||
start, start + size, size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mm_print - print allocator state
|
||||
* @mm: drm_mm allocator to print
|
||||
|
|
|
@ -1481,12 +1481,8 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
|||
|
||||
mode->type |= DRM_MODE_TYPE_USERDEF;
|
||||
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
|
||||
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
|
||||
mode->hdisplay = 1366;
|
||||
mode->hsync_start--;
|
||||
mode->hsync_end--;
|
||||
drm_mode_set_name(mode);
|
||||
}
|
||||
if (cmd->xres == 1366)
|
||||
drm_mode_fixup_1366x768(mode);
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
return mode;
|
||||
}
|
||||
|
|
|
@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
|
||||
pages, 0, DRM_MM_SEARCH_DEFAULT);
|
||||
ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
etnaviv-y := \
|
||||
etnaviv_buffer.o \
|
||||
etnaviv_cmd_parser.o \
|
||||
etnaviv_cmdbuf.o \
|
||||
etnaviv_drv.o \
|
||||
etnaviv_dump.o \
|
||||
etnaviv_gem_prime.o \
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
|
@ -125,7 +126,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
|
|||
u32 *ptr = buf->vaddr + off;
|
||||
|
||||
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
|
||||
ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
|
||||
ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
|
||||
|
||||
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
|
||||
ptr, len * 4, 0);
|
||||
|
@ -158,7 +159,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
|
|||
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
|
||||
buffer->user_size = 0;
|
||||
|
||||
return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
|
||||
return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
|
||||
}
|
||||
|
||||
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
|
||||
|
@ -169,7 +170,7 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
|
|||
buffer->user_size = 0;
|
||||
|
||||
CMD_WAIT(buffer);
|
||||
CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
|
||||
CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
|
||||
buffer->user_size - 4);
|
||||
|
||||
return buffer->user_size / 8;
|
||||
|
@ -261,7 +262,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
if (drm_debug & DRM_UT_DRIVER)
|
||||
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
||||
|
||||
link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
|
||||
link_target = etnaviv_cmdbuf_get_va(cmdbuf);
|
||||
link_dwords = cmdbuf->size / 8;
|
||||
|
||||
/*
|
||||
|
@ -355,12 +356,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
||||
VIVS_GL_EVENT_FROM_PE);
|
||||
CMD_WAIT(buffer);
|
||||
CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
|
||||
CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
|
||||
buffer->user_size - 4);
|
||||
|
||||
if (drm_debug & DRM_UT_DRIVER)
|
||||
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
|
||||
return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
|
||||
return_target, etnaviv_cmdbuf_get_va(cmdbuf),
|
||||
cmdbuf->vaddr);
|
||||
|
||||
if (drm_debug & DRM_UT_DRIVER) {
|
||||
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
|
||||
|
|
|
@ -56,6 +56,8 @@ static const struct {
|
|||
ST(0x0644, 1),
|
||||
ST(0x064c, 1),
|
||||
ST(0x0680, 8),
|
||||
ST(0x086c, 1),
|
||||
ST(0x1028, 1),
|
||||
ST(0x1410, 1),
|
||||
ST(0x1430, 1),
|
||||
ST(0x1458, 1),
|
||||
|
@ -73,8 +75,12 @@ static const struct {
|
|||
ST(0x16c0, 8),
|
||||
ST(0x16e0, 8),
|
||||
ST(0x1740, 8),
|
||||
ST(0x17c0, 8),
|
||||
ST(0x17e0, 8),
|
||||
ST(0x2400, 14 * 16),
|
||||
ST(0x10800, 32 * 16),
|
||||
ST(0x14600, 16),
|
||||
ST(0x14800, 8 * 8),
|
||||
#undef ST
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Etnaviv Project
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <drm/drm_mm.h>
|
||||
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
|
||||
#define SUBALLOC_SIZE SZ_256K
|
||||
#define SUBALLOC_GRANULE SZ_4K
|
||||
#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
|
||||
|
||||
struct etnaviv_cmdbuf_suballoc {
|
||||
/* suballocated dma buffer properties */
|
||||
struct etnaviv_gpu *gpu;
|
||||
void *vaddr;
|
||||
dma_addr_t paddr;
|
||||
|
||||
/* GPU mapping */
|
||||
u32 iova;
|
||||
struct drm_mm_node vram_node; /* only used on MMUv2 */
|
||||
|
||||
/* allocation management */
|
||||
struct mutex lock;
|
||||
DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
|
||||
int free_space;
|
||||
wait_queue_head_t free_event;
|
||||
};
|
||||
|
||||
struct etnaviv_cmdbuf_suballoc *
|
||||
etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
|
||||
{
|
||||
struct etnaviv_cmdbuf_suballoc *suballoc;
|
||||
int ret;
|
||||
|
||||
suballoc = kzalloc(sizeof(*suballoc), GFP_KERNEL);
|
||||
if (!suballoc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
suballoc->gpu = gpu;
|
||||
mutex_init(&suballoc->lock);
|
||||
init_waitqueue_head(&suballoc->free_event);
|
||||
|
||||
suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
|
||||
&suballoc->paddr, GFP_KERNEL);
|
||||
if (!suballoc->vaddr)
|
||||
goto free_suballoc;
|
||||
|
||||
ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
|
||||
&suballoc->vram_node, SUBALLOC_SIZE,
|
||||
&suballoc->iova);
|
||||
if (ret)
|
||||
goto free_dma;
|
||||
|
||||
return suballoc;
|
||||
|
||||
free_dma:
|
||||
dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
|
||||
free_suballoc:
|
||||
kfree(suballoc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
|
||||
{
|
||||
etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
|
||||
SUBALLOC_SIZE, suballoc->iova);
|
||||
dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
|
||||
suballoc->paddr);
|
||||
kfree(suballoc);
|
||||
}
|
||||
|
||||
struct etnaviv_cmdbuf *
|
||||
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
|
||||
size_t nr_bos)
|
||||
{
|
||||
struct etnaviv_cmdbuf *cmdbuf;
|
||||
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
|
||||
sizeof(*cmdbuf));
|
||||
int granule_offs, order, ret;
|
||||
|
||||
cmdbuf = kzalloc(sz, GFP_KERNEL);
|
||||
if (!cmdbuf)
|
||||
return NULL;
|
||||
|
||||
cmdbuf->suballoc = suballoc;
|
||||
cmdbuf->size = size;
|
||||
|
||||
order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
|
||||
retry:
|
||||
mutex_lock(&suballoc->lock);
|
||||
granule_offs = bitmap_find_free_region(suballoc->granule_map,
|
||||
SUBALLOC_GRANULES, order);
|
||||
if (granule_offs < 0) {
|
||||
suballoc->free_space = 0;
|
||||
mutex_unlock(&suballoc->lock);
|
||||
ret = wait_event_interruptible_timeout(suballoc->free_event,
|
||||
suballoc->free_space,
|
||||
msecs_to_jiffies(10 * 1000));
|
||||
if (!ret) {
|
||||
dev_err(suballoc->gpu->dev,
|
||||
"Timeout waiting for cmdbuf space\n");
|
||||
return NULL;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
mutex_unlock(&suballoc->lock);
|
||||
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
|
||||
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
|
||||
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
||||
{
|
||||
struct etnaviv_cmdbuf_suballoc *suballoc = cmdbuf->suballoc;
|
||||
int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
|
||||
SUBALLOC_GRANULE);
|
||||
|
||||
mutex_lock(&suballoc->lock);
|
||||
bitmap_release_region(suballoc->granule_map,
|
||||
cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
|
||||
order);
|
||||
suballoc->free_space = 1;
|
||||
mutex_unlock(&suballoc->lock);
|
||||
wake_up_all(&suballoc->free_event);
|
||||
kfree(cmdbuf);
|
||||
}
|
||||
|
||||
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
|
||||
{
|
||||
return buf->suballoc->iova + buf->suballoc_offset;
|
||||
}
|
||||
|
||||
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
|
||||
{
|
||||
return buf->suballoc->paddr + buf->suballoc_offset;
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Etnaviv Project
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ETNAVIV_CMDBUF_H__
|
||||
#define __ETNAVIV_CMDBUF_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct etnaviv_gpu;
|
||||
struct etnaviv_cmdbuf_suballoc;
|
||||
|
||||
struct etnaviv_cmdbuf {
|
||||
/* suballocator this cmdbuf is allocated from */
|
||||
struct etnaviv_cmdbuf_suballoc *suballoc;
|
||||
/* user context key, must be unique between all active users */
|
||||
struct etnaviv_file_private *ctx;
|
||||
/* cmdbuf properties */
|
||||
int suballoc_offset;
|
||||
void *vaddr;
|
||||
u32 size;
|
||||
u32 user_size;
|
||||
/* fence after which this buffer is to be disposed */
|
||||
struct dma_fence *fence;
|
||||
/* target exec state */
|
||||
u32 exec_state;
|
||||
/* per GPU in-flight list */
|
||||
struct list_head node;
|
||||
/* BOs attached to this command buffer */
|
||||
unsigned int nr_bos;
|
||||
struct etnaviv_vram_mapping *bo_map[0];
|
||||
};
|
||||
|
||||
struct etnaviv_cmdbuf_suballoc *
|
||||
etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
|
||||
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
|
||||
|
||||
struct etnaviv_cmdbuf *
|
||||
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
|
||||
size_t nr_bos);
|
||||
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
|
||||
|
||||
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
|
||||
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
|
||||
|
||||
#endif /* __ETNAVIV_CMDBUF_H__ */
|
|
@ -18,11 +18,11 @@
|
|||
#include <linux/of_platform.h>
|
||||
#include <drm/drm_of.h>
|
||||
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_drv.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
|
||||
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
|
||||
static bool reglog;
|
||||
|
@ -177,7 +177,8 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|||
u32 i;
|
||||
|
||||
seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
|
||||
buf->vaddr, (u64)buf->paddr, size - buf->user_size);
|
||||
buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
|
||||
size - buf->user_size);
|
||||
|
||||
for (i = 0; i < size / 4; i++) {
|
||||
if (i && !(i % 4))
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/devcoredump.h>
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_dump.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
|
@ -177,12 +178,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
|
|||
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
|
||||
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
|
||||
gpu->buffer->size,
|
||||
etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
|
||||
etnaviv_cmdbuf_get_va(gpu->buffer));
|
||||
|
||||
list_for_each_entry(cmd, &gpu->active_cmd_list, node)
|
||||
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
|
||||
cmd->size,
|
||||
etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
|
||||
cmd->size, etnaviv_cmdbuf_get_va(cmd));
|
||||
|
||||
/* Reserve space for the bomap */
|
||||
if (n_bomap_pages) {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/reservation.h>
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_drv.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
|
@ -332,8 +333,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
|
||||
relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
|
||||
stream = drm_malloc_ab(1, args->stream_size);
|
||||
cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
|
||||
args->nr_bos);
|
||||
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
|
||||
ALIGN(args->stream_size, 8) + 8,
|
||||
args->nr_bos);
|
||||
if (!bos || !relocs || !stream || !cmdbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_submit_cmds;
|
||||
|
@ -422,7 +424,7 @@ err_submit_objects:
|
|||
err_submit_cmds:
|
||||
/* if we still own the cmdbuf */
|
||||
if (cmdbuf)
|
||||
etnaviv_gpu_cmdbuf_free(cmdbuf);
|
||||
etnaviv_cmdbuf_free(cmdbuf);
|
||||
if (stream)
|
||||
drm_free_large(stream);
|
||||
if (bos)
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include <linux/dma-fence.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_dump.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_gem.h"
|
||||
|
@ -546,6 +548,37 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
|
|||
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
|
||||
}
|
||||
|
||||
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
/*
|
||||
* Base value for VIVS_PM_PULSE_EATER register on models where it
|
||||
* cannot be read, extracted from vivante kernel driver.
|
||||
*/
|
||||
u32 pulse_eater = 0x01590880;
|
||||
|
||||
if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
|
||||
etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
|
||||
pulse_eater |= BIT(23);
|
||||
|
||||
}
|
||||
|
||||
if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
|
||||
etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
|
||||
pulse_eater &= ~BIT(16);
|
||||
pulse_eater |= BIT(17);
|
||||
}
|
||||
|
||||
if ((gpu->identity.revision > 0x5420) &&
|
||||
(gpu->identity.features & chipFeatures_PIPE_3D))
|
||||
{
|
||||
/* Performance fix: disable internal DFS */
|
||||
pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
|
||||
pulse_eater |= BIT(18);
|
||||
}
|
||||
|
||||
gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
|
||||
}
|
||||
|
||||
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
u16 prefetch;
|
||||
|
@ -586,6 +619,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
|||
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
|
||||
}
|
||||
|
||||
/* setup the pulse eater */
|
||||
etnaviv_gpu_setup_pulse_eater(gpu);
|
||||
|
||||
/* setup the MMU */
|
||||
etnaviv_iommu_restore(gpu);
|
||||
|
||||
|
@ -593,7 +629,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
|||
prefetch = etnaviv_buffer_init(gpu);
|
||||
|
||||
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
|
||||
etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
|
||||
etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
|
||||
prefetch);
|
||||
}
|
||||
|
||||
|
@ -658,8 +694,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
|
||||
if (IS_ERR(gpu->cmdbuf_suballoc)) {
|
||||
dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
|
||||
ret = PTR_ERR(gpu->cmdbuf_suballoc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Create buffer: */
|
||||
gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
|
||||
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
|
||||
if (!gpu->buffer) {
|
||||
ret = -ENOMEM;
|
||||
dev_err(gpu->dev, "could not create command buffer\n");
|
||||
|
@ -667,7 +710,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
}
|
||||
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
|
||||
gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
|
||||
etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
|
||||
ret = -EINVAL;
|
||||
dev_err(gpu->dev,
|
||||
"command buffer outside valid memory window\n");
|
||||
|
@ -694,7 +737,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
return 0;
|
||||
|
||||
free_buffer:
|
||||
etnaviv_gpu_cmdbuf_free(gpu->buffer);
|
||||
etnaviv_cmdbuf_free(gpu->buffer);
|
||||
gpu->buffer = NULL;
|
||||
destroy_iommu:
|
||||
etnaviv_iommu_destroy(gpu->mmu);
|
||||
|
@ -1117,41 +1160,6 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
|
|||
* Cmdstream submission/retirement:
|
||||
*/
|
||||
|
||||
struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
|
||||
size_t nr_bos)
|
||||
{
|
||||
struct etnaviv_cmdbuf *cmdbuf;
|
||||
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
|
||||
sizeof(*cmdbuf));
|
||||
|
||||
cmdbuf = kzalloc(sz, GFP_KERNEL);
|
||||
if (!cmdbuf)
|
||||
return NULL;
|
||||
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
|
||||
size = ALIGN(size, SZ_4K);
|
||||
|
||||
cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
|
||||
GFP_KERNEL);
|
||||
if (!cmdbuf->vaddr) {
|
||||
kfree(cmdbuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cmdbuf->gpu = gpu;
|
||||
cmdbuf->size = size;
|
||||
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
||||
{
|
||||
etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
|
||||
dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
|
||||
cmdbuf->paddr);
|
||||
kfree(cmdbuf);
|
||||
}
|
||||
|
||||
static void retire_worker(struct work_struct *work)
|
||||
{
|
||||
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
|
||||
|
@ -1177,7 +1185,7 @@ static void retire_worker(struct work_struct *work)
|
|||
etnaviv_gem_mapping_unreference(mapping);
|
||||
}
|
||||
|
||||
etnaviv_gpu_cmdbuf_free(cmdbuf);
|
||||
etnaviv_cmdbuf_free(cmdbuf);
|
||||
/*
|
||||
* We need to balance the runtime PM count caused by
|
||||
* each submission. Upon submission, we increment
|
||||
|
@ -1593,10 +1601,15 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
|
|||
#endif
|
||||
|
||||
if (gpu->buffer) {
|
||||
etnaviv_gpu_cmdbuf_free(gpu->buffer);
|
||||
etnaviv_cmdbuf_free(gpu->buffer);
|
||||
gpu->buffer = NULL;
|
||||
}
|
||||
|
||||
if (gpu->cmdbuf_suballoc) {
|
||||
etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
|
||||
gpu->cmdbuf_suballoc = NULL;
|
||||
}
|
||||
|
||||
if (gpu->mmu) {
|
||||
etnaviv_iommu_destroy(gpu->mmu);
|
||||
gpu->mmu = NULL;
|
||||
|
|
|
@ -92,6 +92,7 @@ struct etnaviv_event {
|
|||
struct dma_fence *fence;
|
||||
};
|
||||
|
||||
struct etnaviv_cmdbuf_suballoc;
|
||||
struct etnaviv_cmdbuf;
|
||||
|
||||
struct etnaviv_gpu {
|
||||
|
@ -135,6 +136,7 @@ struct etnaviv_gpu {
|
|||
int irq;
|
||||
|
||||
struct etnaviv_iommu *mmu;
|
||||
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
|
||||
|
||||
/* Power Control: */
|
||||
struct clk *clk_bus;
|
||||
|
@ -150,29 +152,6 @@ struct etnaviv_gpu {
|
|||
struct work_struct recover_work;
|
||||
};
|
||||
|
||||
struct etnaviv_cmdbuf {
|
||||
/* device this cmdbuf is allocated for */
|
||||
struct etnaviv_gpu *gpu;
|
||||
/* user context key, must be unique between all active users */
|
||||
struct etnaviv_file_private *ctx;
|
||||
/* cmdbuf properties */
|
||||
void *vaddr;
|
||||
dma_addr_t paddr;
|
||||
u32 size;
|
||||
u32 user_size;
|
||||
/* vram node used if the cmdbuf is mapped through the MMUv2 */
|
||||
struct drm_mm_node vram_node;
|
||||
/* fence after which this buffer is to be disposed */
|
||||
struct dma_fence *fence;
|
||||
/* target exec state */
|
||||
u32 exec_state;
|
||||
/* per GPU in-flight list */
|
||||
struct list_head node;
|
||||
/* BOs attached to this command buffer */
|
||||
unsigned int nr_bos;
|
||||
struct etnaviv_vram_mapping *bo_map[0];
|
||||
};
|
||||
|
||||
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
|
||||
{
|
||||
etnaviv_writel(data, gpu->mmio + reg);
|
||||
|
@ -211,9 +190,6 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
|
|||
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
|
||||
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
|
||||
struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
|
||||
u32 size, size_t nr_bos);
|
||||
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
|
||||
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
|
||||
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
|
||||
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
|
||||
|
|
|
@ -184,7 +184,7 @@ static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
|
|||
memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
|
||||
}
|
||||
|
||||
static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
||||
static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
||||
.ops = {
|
||||
.domain_free = etnaviv_domain_free,
|
||||
.map = etnaviv_iommuv1_map,
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_iommu.h"
|
||||
|
@ -229,7 +230,7 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
|
|||
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
|
||||
}
|
||||
|
||||
static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
||||
static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
||||
.ops = {
|
||||
.domain_free = etnaviv_iommuv2_domain_free,
|
||||
.map = etnaviv_iommuv2_map,
|
||||
|
@ -254,7 +255,8 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
|
|||
prefetch = etnaviv_buffer_config_mmuv2(gpu,
|
||||
(u32)etnaviv_domain->mtlb_dma,
|
||||
(u32)etnaviv_domain->bad_page_dma);
|
||||
etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
|
||||
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
|
||||
prefetch);
|
||||
etnaviv_gpu_wait_idle(gpu, 100);
|
||||
|
||||
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include "common.xml.h"
|
||||
#include "etnaviv_cmdbuf.h"
|
||||
#include "etnaviv_drv.h"
|
||||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_gpu.h"
|
||||
|
@ -107,6 +108,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
struct drm_mm_node *node, size_t size)
|
||||
{
|
||||
struct etnaviv_vram_mapping *free = NULL;
|
||||
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mmu->lock);
|
||||
|
@ -117,15 +119,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
struct list_head list;
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
|
||||
* drm_mm into giving out a low IOVA after address space
|
||||
* rollover. This needs a proper fix.
|
||||
*/
|
||||
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
|
||||
size, 0, mmu->last_iova, ~0UL,
|
||||
mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
|
||||
|
||||
size, 0, 0,
|
||||
mmu->last_iova, U64_MAX,
|
||||
mode);
|
||||
if (ret != -ENOSPC)
|
||||
break;
|
||||
|
||||
|
@ -140,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
}
|
||||
|
||||
/* Try to retire some entries */
|
||||
drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0);
|
||||
drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
|
||||
|
||||
found = 0;
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
@ -192,13 +189,12 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
list_del_init(&m->scan_node);
|
||||
}
|
||||
|
||||
mode = DRM_MM_INSERT_EVICT;
|
||||
|
||||
/*
|
||||
* We removed enough mappings so that the new allocation will
|
||||
* succeed. Ensure that the MMU will be flushed before the
|
||||
* associated commit requesting this mapping, and retry the
|
||||
* allocation one more time.
|
||||
* succeed, retry the allocation one more time.
|
||||
*/
|
||||
mmu->need_flush = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -250,6 +246,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
|
|||
}
|
||||
|
||||
list_add_tail(&mapping->mmu_node, &mmu->mappings);
|
||||
mmu->need_flush = true;
|
||||
mutex_unlock(&mmu->lock);
|
||||
|
||||
return ret;
|
||||
|
@ -267,6 +264,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
|||
etnaviv_iommu_remove_mapping(mmu, mapping);
|
||||
|
||||
list_del(&mapping->mmu_node);
|
||||
mmu->need_flush = true;
|
||||
mutex_unlock(&mmu->lock);
|
||||
}
|
||||
|
||||
|
@ -322,55 +320,50 @@ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
|
|||
etnaviv_iommuv2_restore(gpu);
|
||||
}
|
||||
|
||||
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf)
|
||||
int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
|
||||
struct drm_mm_node *vram_node, size_t size,
|
||||
u32 *iova)
|
||||
{
|
||||
struct etnaviv_iommu *mmu = gpu->mmu;
|
||||
|
||||
if (mmu->version == ETNAVIV_IOMMU_V1) {
|
||||
return buf->paddr - gpu->memory_base;
|
||||
*iova = paddr - gpu->memory_base;
|
||||
return 0;
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
if (buf->vram_node.allocated)
|
||||
return (u32)buf->vram_node.start;
|
||||
|
||||
mutex_lock(&mmu->lock);
|
||||
ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
|
||||
buf->size + SZ_64K);
|
||||
ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&mmu->lock);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
|
||||
buf->size, IOMMU_READ);
|
||||
ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
|
||||
IOMMU_READ);
|
||||
if (ret < 0) {
|
||||
drm_mm_remove_node(&buf->vram_node);
|
||||
drm_mm_remove_node(vram_node);
|
||||
mutex_unlock(&mmu->lock);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* At least on GC3000 the FE MMU doesn't properly flush old TLB
|
||||
* entries. Make sure to space the command buffers out in a way
|
||||
* that the FE MMU prefetch won't load invalid entries.
|
||||
*/
|
||||
mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
|
||||
mmu->last_iova = vram_node->start + size;
|
||||
gpu->mmu->need_flush = true;
|
||||
mutex_unlock(&mmu->lock);
|
||||
|
||||
return (u32)buf->vram_node.start;
|
||||
*iova = (u32)vram_node->start;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf)
|
||||
void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
|
||||
struct drm_mm_node *vram_node, size_t size,
|
||||
u32 iova)
|
||||
{
|
||||
struct etnaviv_iommu *mmu = gpu->mmu;
|
||||
|
||||
if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
|
||||
if (mmu->version == ETNAVIV_IOMMU_V2) {
|
||||
mutex_lock(&mmu->lock);
|
||||
iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
|
||||
drm_mm_remove_node(&buf->vram_node);
|
||||
iommu_unmap(mmu->domain,iova, size);
|
||||
drm_mm_remove_node(vram_node);
|
||||
mutex_unlock(&mmu->lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,10 +62,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
|||
struct etnaviv_vram_mapping *mapping);
|
||||
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
|
||||
|
||||
u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf);
|
||||
void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
|
||||
struct etnaviv_cmdbuf *buf);
|
||||
int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
|
||||
struct drm_mm_node *vram_node, size_t size,
|
||||
u32 *iova);
|
||||
void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
|
||||
struct drm_mm_node *vram_node, size_t size,
|
||||
u32 iova);
|
||||
|
||||
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
|
||||
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
|
||||
|
|
|
@ -13,9 +13,11 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
#include <video/exynos5433_decon.h>
|
||||
|
||||
|
@ -25,6 +27,9 @@
|
|||
#include "exynos_drm_plane.h"
|
||||
#include "exynos_drm_iommu.h"
|
||||
|
||||
#define DSD_CFG_MUX 0x1004
|
||||
#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
|
||||
|
||||
#define WINDOWS_NR 3
|
||||
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
|
||||
|
||||
|
@ -57,6 +62,7 @@ struct decon_context {
|
|||
struct exynos_drm_plane planes[WINDOWS_NR];
|
||||
struct exynos_drm_plane_config configs[WINDOWS_NR];
|
||||
void __iomem *addr;
|
||||
struct regmap *sysreg;
|
||||
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
|
||||
int pipe;
|
||||
unsigned long flags;
|
||||
|
@ -118,18 +124,29 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
|
|||
|
||||
static void decon_setup_trigger(struct decon_context *ctx)
|
||||
{
|
||||
u32 val = !(ctx->out_type & I80_HW_TRG)
|
||||
? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
|
||||
TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
|
||||
: TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
|
||||
TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
|
||||
writel(val, ctx->addr + DECON_TRIGCON);
|
||||
if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
|
||||
return;
|
||||
|
||||
if (!(ctx->out_type & I80_HW_TRG)) {
|
||||
writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
|
||||
| TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
|
||||
ctx->addr + DECON_TRIGCON);
|
||||
return;
|
||||
}
|
||||
|
||||
writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK
|
||||
| TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON);
|
||||
|
||||
if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
|
||||
DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
|
||||
DRM_ERROR("Cannot update sysreg.\n");
|
||||
}
|
||||
|
||||
static void decon_commit(struct exynos_drm_crtc *crtc)
|
||||
{
|
||||
struct decon_context *ctx = crtc->ctx;
|
||||
struct drm_display_mode *m = &crtc->base.mode;
|
||||
bool interlaced = false;
|
||||
u32 val;
|
||||
|
||||
if (test_bit(BIT_SUSPENDED, &ctx->flags))
|
||||
|
@ -140,13 +157,16 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
|
|||
m->crtc_hsync_end = m->crtc_htotal - 92;
|
||||
m->crtc_vsync_start = m->crtc_vdisplay + 1;
|
||||
m->crtc_vsync_end = m->crtc_vsync_start + 1;
|
||||
if (m->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
interlaced = true;
|
||||
}
|
||||
|
||||
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
|
||||
decon_setup_trigger(ctx);
|
||||
decon_setup_trigger(ctx);
|
||||
|
||||
/* lcd on and use command if */
|
||||
val = VIDOUT_LCD_ON;
|
||||
if (interlaced)
|
||||
val |= VIDOUT_INTERLACE_EN_F;
|
||||
if (ctx->out_type & IFTYPE_I80) {
|
||||
val |= VIDOUT_COMMAND_IF;
|
||||
} else {
|
||||
|
@ -155,15 +175,21 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
|
|||
|
||||
writel(val, ctx->addr + DECON_VIDOUTCON0);
|
||||
|
||||
val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
|
||||
VIDTCON2_HOZVAL(m->hdisplay - 1);
|
||||
if (interlaced)
|
||||
val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) |
|
||||
VIDTCON2_HOZVAL(m->hdisplay - 1);
|
||||
else
|
||||
val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
|
||||
VIDTCON2_HOZVAL(m->hdisplay - 1);
|
||||
writel(val, ctx->addr + DECON_VIDTCON2);
|
||||
|
||||
if (!(ctx->out_type & IFTYPE_I80)) {
|
||||
val = VIDTCON00_VBPD_F(
|
||||
m->crtc_vtotal - m->crtc_vsync_end - 1) |
|
||||
VIDTCON00_VFPD_F(
|
||||
m->crtc_vsync_start - m->crtc_vdisplay - 1);
|
||||
int vbp = m->crtc_vtotal - m->crtc_vsync_end;
|
||||
int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
|
||||
|
||||
if (interlaced)
|
||||
vbp = vbp / 2 - 1;
|
||||
val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1);
|
||||
writel(val, ctx->addr + DECON_VIDTCON00);
|
||||
|
||||
val = VIDTCON01_VSPW_F(
|
||||
|
@ -278,12 +304,22 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
|
|||
if (test_bit(BIT_SUSPENDED, &ctx->flags))
|
||||
return;
|
||||
|
||||
val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxA(win));
|
||||
if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
val = COORDINATE_X(state->crtc.x) |
|
||||
COORDINATE_Y(state->crtc.y / 2);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxA(win));
|
||||
|
||||
val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
|
||||
COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxB(win));
|
||||
val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
|
||||
COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxB(win));
|
||||
} else {
|
||||
val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxA(win));
|
||||
|
||||
val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) |
|
||||
COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxB(win));
|
||||
}
|
||||
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
|
||||
VIDOSD_Wx_ALPHA_B_F(0x0);
|
||||
|
@ -355,8 +391,6 @@ static void decon_swreset(struct decon_context *ctx)
|
|||
udelay(10);
|
||||
}
|
||||
|
||||
WARN(tries == 0, "failed to disable DECON\n");
|
||||
|
||||
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
|
||||
for (tries = 2000; tries; --tries) {
|
||||
if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
|
||||
|
@ -557,6 +591,13 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
|
|||
|
||||
if (val) {
|
||||
writel(val, ctx->addr + DECON_VIDINTCON1);
|
||||
if (ctx->out_type & IFTYPE_HDMI) {
|
||||
val = readl(ctx->addr + DECON_VIDOUTCON0);
|
||||
val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F;
|
||||
if (val ==
|
||||
(VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
drm_crtc_handle_vblank(&ctx->crtc->base);
|
||||
}
|
||||
|
||||
|
@ -637,6 +678,15 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
|
|||
ctx->out_type |= IFTYPE_I80;
|
||||
}
|
||||
|
||||
if (ctx->out_type | I80_HW_TRG) {
|
||||
ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
|
||||
"samsung,disp-sysreg");
|
||||
if (IS_ERR(ctx->sysreg)) {
|
||||
dev_err(dev, "failed to get system register\n");
|
||||
return PTR_ERR(ctx->sysreg);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
|
||||
struct clk *clk;
|
||||
|
||||
|
|
|
@ -208,7 +208,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
|
|||
struct exynos_drm_fbdev *fbdev;
|
||||
struct exynos_drm_private *private = dev->dev_private;
|
||||
struct drm_fb_helper *helper;
|
||||
unsigned int num_crtc;
|
||||
int ret;
|
||||
|
||||
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
|
||||
|
@ -225,9 +224,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
|
|||
|
||||
drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
|
||||
|
||||
num_crtc = dev->mode_config.num_crtc;
|
||||
|
||||
ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
|
||||
ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to initialize drm fb helper.\n");
|
||||
goto err_init;
|
||||
|
|
|
@ -125,10 +125,8 @@ static struct fimd_driver_data exynos3_fimd_driver_data = {
|
|||
.timing_base = 0x20000,
|
||||
.lcdblk_offset = 0x210,
|
||||
.lcdblk_bypass_shift = 1,
|
||||
.trg_type = I80_HW_TRG,
|
||||
.has_shadowcon = 1,
|
||||
.has_vidoutcon = 1,
|
||||
.has_trigger_per_te = 1,
|
||||
};
|
||||
|
||||
static struct fimd_driver_data exynos4_fimd_driver_data = {
|
||||
|
|
|
@ -1683,7 +1683,7 @@ struct platform_driver g2d_driver = {
|
|||
.probe = g2d_probe,
|
||||
.remove = g2d_remove,
|
||||
.driver = {
|
||||
.name = "s5p-g2d",
|
||||
.name = "exynos-drm-g2d",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &g2d_pm_ops,
|
||||
.of_match_table = exynos_g2d_match,
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
|
@ -133,6 +134,7 @@ struct hdmi_context {
|
|||
struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)];
|
||||
struct regulator *reg_hdmi_en;
|
||||
struct exynos_drm_clk phy_clk;
|
||||
struct drm_bridge *bridge;
|
||||
};
|
||||
|
||||
static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
|
||||
|
@ -509,9 +511,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
|
|||
{
|
||||
.pixel_clock = 27000000,
|
||||
.conf = {
|
||||
0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
|
||||
0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
|
||||
0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
|
||||
0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02,
|
||||
0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
|
||||
0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
|
||||
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
|
||||
},
|
||||
},
|
||||
|
@ -519,9 +521,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
|
|||
.pixel_clock = 27027000,
|
||||
.conf = {
|
||||
0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
|
||||
0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
|
||||
0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
|
||||
0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
|
||||
0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac,
|
||||
0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
|
||||
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -587,6 +589,15 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = {
|
|||
0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
|
||||
},
|
||||
},
|
||||
{
|
||||
.pixel_clock = 297000000,
|
||||
.conf = {
|
||||
0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2,
|
||||
0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
|
||||
0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
|
||||
0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static const char * const hdmi_clk_gates4[] = {
|
||||
|
@ -788,7 +799,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
|
|||
sizeof(buf));
|
||||
if (ret > 0) {
|
||||
hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC);
|
||||
hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret);
|
||||
hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3);
|
||||
hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
|
||||
}
|
||||
|
||||
ret = hdmi_audio_infoframe_init(&frm.audio);
|
||||
|
@ -912,7 +924,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
|
|||
drm_connector_register(connector);
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
|
||||
return 0;
|
||||
if (hdata->bridge) {
|
||||
encoder->bridge = hdata->bridge;
|
||||
hdata->bridge->encoder = encoder;
|
||||
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to attach bridge\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool hdmi_mode_fixup(struct drm_encoder *encoder,
|
||||
|
@ -1581,6 +1601,31 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
|
|||
hdmiphy_disable(hdata);
|
||||
}
|
||||
|
||||
static int hdmi_bridge_init(struct hdmi_context *hdata)
|
||||
{
|
||||
struct device *dev = hdata->dev;
|
||||
struct device_node *ep, *np;
|
||||
|
||||
ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
|
||||
if (!ep)
|
||||
return 0;
|
||||
|
||||
np = of_graph_get_remote_port_parent(ep);
|
||||
of_node_put(ep);
|
||||
if (!np) {
|
||||
DRM_ERROR("failed to get remote port parent");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdata->bridge = of_drm_find_bridge(np);
|
||||
of_node_put(np);
|
||||
|
||||
if (!hdata->bridge)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hdmi_resources_init(struct hdmi_context *hdata)
|
||||
{
|
||||
struct device *dev = hdata->dev;
|
||||
|
@ -1620,17 +1665,18 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
|
|||
|
||||
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
|
||||
|
||||
if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV)
|
||||
return 0;
|
||||
if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
|
||||
if (IS_ERR(hdata->reg_hdmi_en))
|
||||
return PTR_ERR(hdata->reg_hdmi_en);
|
||||
|
||||
if (IS_ERR(hdata->reg_hdmi_en))
|
||||
return PTR_ERR(hdata->reg_hdmi_en);
|
||||
ret = regulator_enable(hdata->reg_hdmi_en);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable hdmi-en regulator\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = regulator_enable(hdata->reg_hdmi_en);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to enable hdmi-en regulator\n");
|
||||
|
||||
return ret;
|
||||
return hdmi_bridge_init(hdata);
|
||||
}
|
||||
|
||||
static struct of_device_id hdmi_match_types[] = {
|
||||
|
|
|
@ -94,7 +94,7 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
|
|||
"Invalid legacyfb_depth. Defaulting to 24bpp\n");
|
||||
legacyfb_depth = 24;
|
||||
}
|
||||
fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1, 1);
|
||||
fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
|
||||
if (IS_ERR(fsl_dev->fbdev)) {
|
||||
ret = PTR_ERR(fsl_dev->fbdev);
|
||||
fsl_dev->fbdev = NULL;
|
||||
|
|
|
@ -72,10 +72,8 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
|
|||
return NULL;
|
||||
|
||||
tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
|
||||
if (!tcon) {
|
||||
ret = -ENOMEM;
|
||||
if (!tcon)
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
ret = fsl_tcon_init_regmap(dev, tcon, np);
|
||||
if (ret) {
|
||||
|
@ -89,9 +87,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
|
|||
goto err_node_put;
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
clk_prepare_enable(tcon->ipg_clk);
|
||||
ret = clk_prepare_enable(tcon->ipg_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't enable the TCON clock\n");
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
dev_info(dev, "Using TCON in bypass mode\n");
|
||||
|
||||
return tcon;
|
||||
|
|
|
@ -564,7 +564,7 @@ int psb_fbdev_init(struct drm_device *dev)
|
|||
drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
|
||||
dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
|
||||
INTELFB_CONN_LIMIT);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
|
|
|
@ -200,8 +200,7 @@ int hibmc_fbdev_init(struct hibmc_drm_private *priv)
|
|||
&hibmc_fbdev_helper_funcs);
|
||||
|
||||
/* Now just one crtc and one channel */
|
||||
ret = drm_fb_helper_init(priv->dev,
|
||||
&hifbdev->helper, 1, 1);
|
||||
ret = drm_fb_helper_init(priv->dev, &hifbdev->helper, 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize fb helper: %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -59,8 +59,7 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
|
|||
drm_fbdev_cma_hotplug_event(priv->fbdev);
|
||||
} else {
|
||||
priv->fbdev = drm_fbdev_cma_init(dev, 32,
|
||||
dev->mode_config.num_crtc,
|
||||
dev->mode_config.num_connector);
|
||||
dev->mode_config.num_connector);
|
||||
if (IS_ERR(priv->fbdev))
|
||||
priv->fbdev = NULL;
|
||||
}
|
||||
|
|
|
@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
|
|||
struct drm_mm_node *node, u32 size)
|
||||
{
|
||||
memset(node, 0, sizeof(*node));
|
||||
return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
|
||||
size, 0,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
|
||||
size, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_INSERT_LOW);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
|||
}, **phase;
|
||||
struct i915_vma *vma, *next;
|
||||
struct drm_mm_node *node;
|
||||
enum drm_mm_insert_mode mode;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
||||
|
@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
|||
* On each list, the oldest objects lie at the HEAD with the freshest
|
||||
* object on the TAIL.
|
||||
*/
|
||||
mode = DRM_MM_INSERT_BEST;
|
||||
if (flags & PIN_HIGH)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
if (flags & PIN_MAPPABLE)
|
||||
mode = DRM_MM_INSERT_LOW;
|
||||
drm_mm_scan_init_with_range(&scan, &vm->mm,
|
||||
min_size, alignment, cache_level,
|
||||
start, end,
|
||||
flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
|
||||
start, end, mode);
|
||||
|
||||
/* Retire before we search the active list. Although we have
|
||||
* reasonable accuracy in our retirement lists, we may have
|
||||
|
|
|
@ -437,12 +437,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
PIN_MAPPABLE | PIN_NONBLOCK);
|
||||
if (IS_ERR(vma)) {
|
||||
memset(&cache->node, 0, sizeof(cache->node));
|
||||
ret = drm_mm_insert_node_in_range_generic
|
||||
ret = drm_mm_insert_node_in_range
|
||||
(&ggtt->base.mm, &cache->node,
|
||||
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
DRM_MM_INSERT_LOW);
|
||||
if (ret) /* no inactive aperture space, use cpu reloc */
|
||||
return NULL;
|
||||
} else {
|
||||
|
|
|
@ -2754,12 +2754,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
|
|||
return ret;
|
||||
|
||||
/* Reserve a mappable slot for our lockless error capture */
|
||||
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
|
||||
&ggtt->error_capture,
|
||||
PAGE_SIZE, 0,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
0, 0);
|
||||
ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
|
||||
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_INSERT_LOW);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -3669,7 +3667,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
|||
u64 size, u64 alignment, unsigned long color,
|
||||
u64 start, u64 end, unsigned int flags)
|
||||
{
|
||||
u32 search_flag, alloc_flag;
|
||||
enum drm_mm_insert_mode mode;
|
||||
u64 offset;
|
||||
int err;
|
||||
|
||||
|
@ -3690,13 +3688,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
|||
if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
|
||||
return -ENOSPC;
|
||||
|
||||
if (flags & PIN_HIGH) {
|
||||
search_flag = DRM_MM_SEARCH_BELOW;
|
||||
alloc_flag = DRM_MM_CREATE_TOP;
|
||||
} else {
|
||||
search_flag = DRM_MM_SEARCH_DEFAULT;
|
||||
alloc_flag = DRM_MM_CREATE_DEFAULT;
|
||||
}
|
||||
mode = DRM_MM_INSERT_BEST;
|
||||
if (flags & PIN_HIGH)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
if (flags & PIN_MAPPABLE)
|
||||
mode = DRM_MM_INSERT_LOW;
|
||||
|
||||
/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
|
||||
* so we know that we always have a minimum alignment of 4096.
|
||||
|
@ -3708,10 +3704,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
|||
if (alignment <= I915_GTT_MIN_ALIGNMENT)
|
||||
alignment = 0;
|
||||
|
||||
err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
|
||||
size, alignment, color,
|
||||
start, end,
|
||||
search_flag, alloc_flag);
|
||||
err = drm_mm_insert_node_in_range(&vm->mm, node,
|
||||
size, alignment, color,
|
||||
start, end, mode);
|
||||
if (err != -ENOSPC)
|
||||
return err;
|
||||
|
||||
|
@ -3749,9 +3744,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
search_flag = DRM_MM_SEARCH_DEFAULT;
|
||||
return drm_mm_insert_node_in_range_generic(&vm->mm, node,
|
||||
size, alignment, color,
|
||||
start, end,
|
||||
search_flag, alloc_flag);
|
||||
return drm_mm_insert_node_in_range(&vm->mm, node,
|
||||
size, alignment, color,
|
||||
start, end, DRM_MM_INSERT_EVICT);
|
||||
}
|
||||
|
|
|
@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
|
|||
return -ENODEV;
|
||||
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
|
||||
alignment, start, end,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
|
||||
size, alignment, 0,
|
||||
start, end, DRM_MM_INSERT_BEST);
|
||||
mutex_unlock(&dev_priv->mm.stolen_lock);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -713,8 +713,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
if (!intel_fbdev_init_bios(dev, ifbdev))
|
||||
ifbdev->preferred_bpp = 32;
|
||||
|
||||
ret = drm_fb_helper_init(dev, &ifbdev->helper,
|
||||
INTEL_INFO(dev_priv)->num_pipes, 4);
|
||||
ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
|
||||
if (ret) {
|
||||
kfree(ifbdev);
|
||||
return ret;
|
||||
|
|
|
@ -389,8 +389,7 @@ static int imx_drm_bind(struct device *dev)
|
|||
dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
|
||||
legacyfb_depth = 16;
|
||||
}
|
||||
imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
|
||||
drm->mode_config.num_crtc, MAX_CRTC);
|
||||
imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC);
|
||||
if (IS_ERR(imxdrm->fbhelper)) {
|
||||
ret = PTR_ERR(imxdrm->fbhelper);
|
||||
imxdrm->fbhelper = NULL;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
meson-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
|
||||
meson-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
|
||||
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
|
||||
meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
|
||||
|
||||
obj-$(CONFIG_DRM_MESON) += meson.o
|
||||
obj-$(CONFIG_DRM_MESON) += meson-drm.o
|
||||
|
|
|
@ -279,7 +279,6 @@ static int meson_drv_probe(struct platform_device *pdev)
|
|||
drm->mode_config.funcs = &meson_mode_config_funcs;
|
||||
|
||||
priv->fbdev = drm_fbdev_cma_init(drm, 32,
|
||||
drm->mode_config.num_crtc,
|
||||
drm->mode_config.num_connector);
|
||||
if (IS_ERR(priv->fbdev)) {
|
||||
ret = PTR_ERR(priv->fbdev);
|
||||
|
@ -329,8 +328,7 @@ static struct platform_driver meson_drm_platform_driver = {
|
|||
.probe = meson_drv_probe,
|
||||
.remove = meson_drv_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = DRIVER_NAME,
|
||||
.name = "meson-drm",
|
||||
.of_match_table = dt_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -286,7 +286,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
|
|||
drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
|
||||
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
|
||||
MGAG200FB_CONN_LIMIT);
|
||||
if (ret)
|
||||
goto err_fb_helper;
|
||||
|
||||
|
|
|
@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY
|
|||
help
|
||||
Choose this option if the 28nm DSI PHY 8960 variant is used on the
|
||||
platform.
|
||||
|
||||
config DRM_MSM_DSI_14NM_PHY
|
||||
bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)"
|
||||
depends on DRM_MSM_DSI
|
||||
default y
|
||||
help
|
||||
Choose this option if DSI PHY on 8996 is used on the platform.
|
||||
|
|
|
@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
|
|||
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
|
||||
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
|
||||
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
|
||||
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
|
||||
msm-y += dsi/pll/dsi_pll.o
|
||||
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
|
||||
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
|
||||
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DRM_MSM) += msm.o
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
#include "a5xx_gpu.h"
|
||||
|
||||
extern bool hang_debug;
|
||||
|
@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||
/* Enable RBBM error reporting bits */
|
||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
|
||||
|
||||
if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
|
||||
if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
|
||||
/*
|
||||
* Mask out the activity signals from RB1-3 to avoid false
|
||||
* positives
|
||||
|
@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
|
||||
|
||||
if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
||||
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
|
||||
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
|
||||
|
||||
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
|
||||
|
@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
|
||||
{
|
||||
struct msm_gpu *gpu = arg;
|
||||
pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
|
||||
iova, flags,
|
||||
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
|
||||
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
|
||||
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
|
||||
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
|
||||
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static void a5xx_cp_err_irq(struct msm_gpu *gpu)
|
||||
{
|
||||
u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
|
||||
|
@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (gpu->aspace)
|
||||
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
|
||||
|
||||
return gpu;
|
||||
}
|
||||
|
|
|
@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = {
|
|||
.gmem = (SZ_1M + SZ_512K),
|
||||
.init = a4xx_gpu_init,
|
||||
}, {
|
||||
.rev = ADRENO_REV(5, 3, 0, ANY_ID),
|
||||
.rev = ADRENO_REV(5, 3, 0, 2),
|
||||
.revn = 530,
|
||||
.name = "A530",
|
||||
.pm4fw = "a530_pm4.fw",
|
||||
.pfpfw = "a530_pfp.fw",
|
||||
.gmem = SZ_1M,
|
||||
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
|
||||
ADRENO_QUIRK_FAULT_DETECT_MASK,
|
||||
.init = a5xx_gpu_init,
|
||||
.gpmufw = "a530v3_gpmu.fw2",
|
||||
},
|
||||
|
@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev,
|
|||
priv->gpu_pdev = pdev;
|
||||
}
|
||||
|
||||
static const struct {
|
||||
const char *str;
|
||||
uint32_t flag;
|
||||
} quirks[] = {
|
||||
{ "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
|
||||
{ "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
|
||||
};
|
||||
static int find_chipid(struct device *dev, u32 *chipid)
|
||||
{
|
||||
struct device_node *node = dev->of_node;
|
||||
const char *compat;
|
||||
int ret;
|
||||
|
||||
/* first search the compat strings for qcom,adreno-XYZ.W: */
|
||||
ret = of_property_read_string_index(node, "compatible", 0, &compat);
|
||||
if (ret == 0) {
|
||||
unsigned rev, patch;
|
||||
|
||||
if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
|
||||
*chipid = 0;
|
||||
*chipid |= (rev / 100) << 24; /* core */
|
||||
rev %= 100;
|
||||
*chipid |= (rev / 10) << 16; /* major */
|
||||
rev %= 10;
|
||||
*chipid |= rev << 8; /* minor */
|
||||
*chipid |= patch;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* and if that fails, fall back to legacy "qcom,chipid" property: */
|
||||
ret = of_property_read_u32(node, "qcom,chipid", chipid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_warn(dev, "Using legacy qcom,chipid binding!\n");
|
||||
dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
|
||||
(*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
|
||||
(*chipid >> 8) & 0xff, *chipid & 0xff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adreno_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
static struct adreno_platform_config config = {};
|
||||
struct device_node *child, *node = dev->of_node;
|
||||
u32 val;
|
||||
int ret, i;
|
||||
int ret;
|
||||
|
||||
ret = of_property_read_u32(node, "qcom,chipid", &val);
|
||||
ret = find_chipid(dev, &val);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not find chipid: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
|
|||
}
|
||||
|
||||
if (!config.fast_rate) {
|
||||
dev_err(dev, "could not find clk rates\n");
|
||||
return -ENXIO;
|
||||
dev_warn(dev, "could not find clk rates\n");
|
||||
/* This is a safe low speed for all devices: */
|
||||
config.fast_rate = 200000000;
|
||||
config.slow_rate = 27000000;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(quirks); i++)
|
||||
if (of_property_read_bool(node, quirks[i].str))
|
||||
config.quirks |= quirks[i].flag;
|
||||
|
||||
dev->platform_data = &config;
|
||||
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
|
||||
return 0;
|
||||
|
@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
static const struct of_device_id dt_match[] = {
|
||||
{ .compatible = "qcom,adreno" },
|
||||
{ .compatible = "qcom,adreno-3xx" },
|
||||
/* for backwards compat w/ downstream kgsl DT files: */
|
||||
{ .compatible = "qcom,kgsl-3d0" },
|
||||
|
|
|
@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|||
adreno_gpu->gmem = adreno_gpu->info->gmem;
|
||||
adreno_gpu->revn = adreno_gpu->info->revn;
|
||||
adreno_gpu->rev = config->rev;
|
||||
adreno_gpu->quirks = config->quirks;
|
||||
|
||||
gpu->fast_rate = config->fast_rate;
|
||||
gpu->slow_rate = config->slow_rate;
|
||||
|
|
|
@ -75,6 +75,7 @@ struct adreno_info {
|
|||
const char *pm4fw, *pfpfw;
|
||||
const char *gpmufw;
|
||||
uint32_t gmem;
|
||||
enum adreno_quirks quirks;
|
||||
struct msm_gpu *(*init)(struct drm_device *dev);
|
||||
};
|
||||
|
||||
|
@ -116,8 +117,6 @@ struct adreno_gpu {
|
|||
* code (a3xx_gpu.c) and stored in this common location.
|
||||
*/
|
||||
const unsigned int *reg_offsets;
|
||||
|
||||
uint32_t quirks;
|
||||
};
|
||||
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
|
||||
|
||||
|
@ -128,7 +127,6 @@ struct adreno_platform_config {
|
|||
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
|
||||
struct msm_bus_scale_pdata *bus_scale_table;
|
||||
#endif
|
||||
uint32_t quirks;
|
||||
};
|
||||
|
||||
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
||||
|
|
|
@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
|
|||
if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
|
||||
return NULL;
|
||||
|
||||
return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
|
||||
msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
|
||||
msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
|
||||
return msm_dsi->encoder;
|
||||
}
|
||||
|
||||
static int dsi_get_phy(struct msm_dsi *msm_dsi)
|
||||
|
@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void)
|
|||
}
|
||||
|
||||
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_bridge *ext_bridge;
|
||||
int ret, i;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
|
||||
!encoders[MSM_DSI_CMD_ENCODER_ID]))
|
||||
if (WARN_ON(!encoder))
|
||||
return -EINVAL;
|
||||
|
||||
msm_dsi->dev = dev;
|
||||
|
@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
msm_dsi->encoder = encoder;
|
||||
|
||||
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
|
||||
if (IS_ERR(msm_dsi->bridge)) {
|
||||
ret = PTR_ERR(msm_dsi->bridge);
|
||||
|
@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
|
||||
encoders[i]->bridge = msm_dsi->bridge;
|
||||
msm_dsi->encoders[i] = encoders[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* check if the dsi encoder output is connected to a panel or an
|
||||
* external bridge. We create a connector only if we're connected to a
|
||||
|
|
|
@ -27,14 +27,24 @@
|
|||
#define DSI_1 1
|
||||
#define DSI_MAX 2
|
||||
|
||||
struct msm_dsi_phy_shared_timings;
|
||||
struct msm_dsi_phy_clk_request;
|
||||
|
||||
enum msm_dsi_phy_type {
|
||||
MSM_DSI_PHY_28NM_HPM,
|
||||
MSM_DSI_PHY_28NM_LP,
|
||||
MSM_DSI_PHY_20NM,
|
||||
MSM_DSI_PHY_28NM_8960,
|
||||
MSM_DSI_PHY_14NM,
|
||||
MSM_DSI_PHY_MAX
|
||||
};
|
||||
|
||||
enum msm_dsi_phy_usecase {
|
||||
MSM_DSI_PHY_STANDALONE,
|
||||
MSM_DSI_PHY_MASTER,
|
||||
MSM_DSI_PHY_SLAVE,
|
||||
};
|
||||
|
||||
#define DSI_DEV_REGULATOR_MAX 8
|
||||
#define DSI_BUS_CLK_MAX 4
|
||||
|
||||
|
@ -73,8 +83,8 @@ struct msm_dsi {
|
|||
struct device *phy_dev;
|
||||
bool phy_enabled;
|
||||
|
||||
/* the encoders we are hooked to (outside of dsi block) */
|
||||
struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
|
||||
/* the encoder we are hooked to (outside of dsi block) */
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
int id;
|
||||
};
|
||||
|
@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
|
|||
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
|
||||
struct drm_connector *msm_dsi_manager_connector_init(u8 id);
|
||||
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
|
||||
int msm_dsi_manager_phy_enable(int id,
|
||||
const unsigned long bit_rate, const unsigned long esc_rate,
|
||||
u32 *clk_pre, u32 *clk_post);
|
||||
void msm_dsi_manager_phy_disable(int id);
|
||||
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
|
||||
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
|
||||
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
|
||||
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
|
||||
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
|
||||
|
||||
|
@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
|
|||
struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
|
||||
void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
|
||||
int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
|
||||
int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
|
||||
enum msm_dsi_phy_usecase uc);
|
||||
#else
|
||||
static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
|
||||
enum msm_dsi_phy_type type, int id) {
|
||||
|
@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
|
||||
enum msm_dsi_phy_usecase uc)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* dsi host */
|
||||
|
@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
|
|||
u32 dma_base, u32 len);
|
||||
int msm_dsi_host_enable(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_disable(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
|
||||
struct msm_dsi_phy_shared_timings *phy_shared_timings);
|
||||
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
|
||||
struct drm_display_mode *mode);
|
||||
|
@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
|
|||
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
|
||||
struct msm_dsi_pll *src_pll);
|
||||
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
|
||||
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
|
||||
struct msm_dsi_phy_clk_request *clk_req);
|
||||
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
|
||||
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
|
||||
struct drm_device *dev);
|
||||
|
@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
|
|||
|
||||
/* dsi phy */
|
||||
struct msm_dsi_phy;
|
||||
struct msm_dsi_phy_shared_timings {
|
||||
u32 clk_post;
|
||||
u32 clk_pre;
|
||||
bool clk_pre_inc_by_2;
|
||||
};
|
||||
|
||||
struct msm_dsi_phy_clk_request {
|
||||
unsigned long bitclk_rate;
|
||||
unsigned long escclk_rate;
|
||||
};
|
||||
|
||||
void msm_dsi_phy_driver_register(void);
|
||||
void msm_dsi_phy_driver_unregister(void);
|
||||
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
const unsigned long bit_rate, const unsigned long esc_rate);
|
||||
struct msm_dsi_phy_clk_request *clk_req);
|
||||
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
|
||||
void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
|
||||
u32 *clk_pre, u32 *clk_post);
|
||||
void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
|
||||
struct msm_dsi_phy_shared_timings *shared_timing);
|
||||
struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
|
||||
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
|
||||
enum msm_dsi_phy_usecase uc);
|
||||
|
||||
#endif /* __DSI_CONNECTOR_H__ */
|
||||
|
||||
|
|
|
@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
|
|||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
|
||||
- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19)
|
||||
- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
|
||||
|
||||
Copyright (C) 2013-2015 by the following authors:
|
||||
Copyright (C) 2013-2017 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
|
||||
|
||||
|
@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
|
|||
|
||||
#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
|
||||
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
|
||||
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
|
||||
static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
|
||||
}
|
||||
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
|
||||
#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
|
||||
static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
|
||||
}
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
|
||||
#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
|
||||
#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
|
||||
#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
|
||||
|
||||
#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
|
||||
#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
|
||||
#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
|
||||
#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
|
||||
static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
|
||||
}
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
|
||||
#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
|
||||
static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
|
||||
{
|
||||
return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
|
||||
|
||||
static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
|
||||
|
||||
#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
|
||||
|
||||
|
||||
#endif /* DSI_XML */
|
||||
|
|
|
@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
|
|||
.num_dsi = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO: core_mmss_clk fails to enable for some reason, but things work fine
|
||||
* without it too. Figure out why it doesn't enable and uncomment below
|
||||
*/
|
||||
static const char * const dsi_8996_bus_clk_names[] = {
|
||||
"mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
|
||||
};
|
||||
|
||||
static const struct msm_dsi_config msm8996_dsi_cfg = {
|
||||
.io_offset = DSI_6G_REG_SHIFT,
|
||||
.reg_cfg = {
|
||||
.num = 2,
|
||||
.regs = {
|
||||
{"vdda", 18160, 1 }, /* 1.25 V */
|
||||
{"vcca", 17000, 32 }, /* 0.925 V */
|
||||
{"vddio", 100000, 100 },/* 1.8 V */
|
||||
},
|
||||
},
|
||||
.bus_clk_names = dsi_8996_bus_clk_names,
|
||||
.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
|
||||
.io_start = { 0x994000, 0x996000 },
|
||||
.num_dsi = 2,
|
||||
};
|
||||
|
||||
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
|
||||
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
|
||||
|
@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
|
|||
&msm8974_apq8084_dsi_cfg},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
|
||||
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
|
||||
};
|
||||
|
||||
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
|
||||
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
|
||||
|
||||
#define MSM_DSI_V2_VER_MINOR_8064 0x0
|
||||
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче