dma-buf:
 - fix memory leak
 
 core:
 - shmem object mmap fix.
 
 ttm:
 - Fix fence leak in ttm_buffer_object_transfer().
 
 amdgpu:
 - Gfx reset fix for gfx9, 10
 - Fix for gfx10
 - DP MST fix
 - DCC fix
 - Renoir power fixes
 - Navi power fix
 
 i915:
 - Break up long lists of object reclaim with cond_resched()
 - PSR probe fix
 - TGL workarounds
 - Selftest return value fix
 - Drop timeline mutex while waiting for retirement
 - Wait for OA configuration completion before writes to OA buffer
 
 virtio:
 - Fix resource id creation race in virtio.
 - mmap fixes
 
 sun4i:
 - Fixes for sun4i VI layer format support.
 
 kirin:
 - kirin: Revert "Fix for hikey620 display offset problem"
 
 exynos:
 - fix a kernel oops problem in case that driver is loaded as module.
 - fix a regulator warning issue when I2C DDC adapter cannot be gathered.
 - print out an error message only in error case excepting -EPROBE_DEFER.
 
 mediatek:
 - overlay, cursor and gce fixes.
 `
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJeYbXXAAoJEAx081l5xIa+QtoQAJNMM9HBThWZjX+j/+LX1MUg
 xW6ZqOmYjL/FfViSEpASLJuIz9+JF6/FK8RtuzbcsoPesLhfGtsQiM83FN+6KKs8
 4kUa+ahh4+xuz4Q1mPJWH4kb0NpmHhNbuN1z/6fQwwYUYS3ltL21vR/6KE8x2FXu
 rOmrcTVjiMvDjc0d8uN0GBEBON7Sf7EG2wZ0EN2uNR0lEzBSYczXRioDBC9Pj/Va
 MGc4WzULJppTyvbUxnbtHYKXZfOHvW5pcKAR3utJQi1oJRKmNjm76lzNr5r4t7v3
 IEhpe5UfjY01CUhpBWfwUa4ZuESKS9gFNlHN6rqADrdngyn8zDBFFd07B3MCg651
 VURASTcyw7yJgVAhMRyxjcj76GYYm7AX1vyN6tkbBcY+yt2ZYdCYQh5k17U2FYeR
 fn/15SuRqa06p2+cHfyEVNQBwtZ2l10SyJHBTOMlgN5V0AH+itU+2t03SmhuEQaG
 CeiOVJJtJFWyvvwzDeqGf6YqTTicyqciviRVrkLL2KlnoCzTamZosv8aWzKtQKEj
 TKcDp5Toc0TK3EMtPYRwuXGkWr+0WB8PuNYmouhauPMm+cgHO2F4JtgJfPspbw+l
 BOks64uQQNGhfyjfpcaWjIxnyC5dtGJoGhsiP8OQF38utUwZlkOh1/90hcUapocQ
 E4FC8X8mAckSRce9/fca
 =ckrX
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2020-03-06' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Weekly fixes round, looks like a few people woke up, got a bunch of
  fixes across the drivers. Bit bigger than I'd like but they all seem
  fine and hopefully it quiets down now.

  sun4i, kirin, mediatek and exynos on the ARM side. virtio-gpu and core
  have some mmap fixes, and there is a dma-buf leak. one ttm fence leak
  is also fixed.

  Otherwise it's mostly amdgpu and i915.

  One of the i915 fixes is for a very long latency I was seeing (using
  latencytop) running gnome-shell locally when using firefox and eating
  nearly all my RAM, it really helps with desktop responsiveness esp
  when firefox is chewing a lot.

  dma-buf:
   - fix memory leak

  core:
   - shmem object mmap fix.

  ttm:
   - Fix fence leak in ttm_buffer_object_transfer().

  amdgpu:
   - Gfx reset fix for gfx9, 10
   - Fix for gfx10
   - DP MST fix
   - DCC fix
   - Renoir power fixes
   - Navi power fix

  i915:
   - Break up long lists of object reclaim with cond_resched()
   - PSR probe fix
   - TGL workarounds
   - Selftest return value fix
   - Drop timeline mutex while waiting for retirement
   - Wait for OA configuration completion before writes to OA buffer

  virtio:
   - Fix resource id creation race in virtio.
   - mmap fixes

  sun4i:
   - Fixes for sun4i VI layer format support.

  kirin:
   - kirin: Revert "Fix for hikey620 display offset problem"

  exynos:
   - fix a kernel oops problem in case that driver is loaded as module.
   - fix a regulator warning issue when I2C DDC adapter cannot be gathered.
   - print out an error message only in error case excepting -EPROBE_DEFER.

  mediatek:
   - overlay, cursor and gce fixes"
`

* tag 'drm-fixes-2020-03-06' of git://anongit.freedesktop.org/drm/drm: (38 commits)
  drm/amdgpu/display: navi1x copy dcn watermark clock settings to smu resume from s3 (v2)
  drm/amd/powerplay: map mclk to fclk for COMBINATIONAL_BYPASS case
  drm/amd/powerplay: fix pre-check condition for setting clock range
  drm/amd/display: fix dcc swath size calculations on dcn1
  drm/amd/display: Clear link settings on MST disable connector
  drm/amdgpu: disable 3D pipe 1 on Navi1x
  drm/amdgpu: clean wptr on wb when gpu recovery
  drm: kirin: Revert "Fix for hikey620 display offset problem"
  drm/i915/gt: Drop the timeline->mutex as we wait for retirement
  drm/i915/perf: Reintroduce wait on OA configuration completion
  drm/sun4i: Fix DE2 VI layer format support
  drm/sun4i: Add separate DE3 VI layer formats
  drm/sun4i: de2/de3: Remove unsupported VI layer formats
  drm/i915/selftests: Fix return in assert_mmap_offset()
  drm/i915: Protect i915_request_await_start from early waits
  drm/i915/tgl: Add Wa_1608008084
  drm/i915/tgl: Add Wa_22010178259:tgl
  drm/i915: Program MBUS with rmw during initialization
  drm/i915/psr: Force PSR probe only after full initialization
  drm/i915/gem: Break up long lists of object reclaim
  ...
This commit is contained in:
Linus Torvalds 2020-03-06 06:45:20 -06:00
Родитель 9f65ed5fe4 2ac4853e29
Коммит ba0ae9ac46
38 изменённых файлов: 526 добавлений и 218 удалений

Просмотреть файл

@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
return 0;
}

Просмотреть файл

@ -52,7 +52,7 @@
* 1. Primary ring
* 2. Async ring
*/
#define GFX10_NUM_GFX_RINGS 2
#define GFX10_NUM_GFX_RINGS_NV1X 1
#define GFX10_MEC_HPD_SIZE 2048
#define F32_CE_PROGRAM_RAM_SIZE 65536
@ -1304,7 +1304,7 @@ static int gfx_v10_0_sw_init(void *handle)
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@ -2710,18 +2710,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_commit(ring);
/* submit cs packet to copy state 0 to next available state */
ring = &adev->gfx.gfx_ring[1];
r = amdgpu_ring_alloc(ring, 2);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
if (adev->gfx.num_gfx_rings > 1) {
/* maximum supported gfx ring is 2 */
ring = &adev->gfx.gfx_ring[1];
r = amdgpu_ring_alloc(ring, 2);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_commit(ring);
}
amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_commit(ring);
return 0;
}
@ -2818,39 +2820,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
ring = &adev->gfx.gfx_ring[1];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
/* Initialize the ring buffer's write pointers */
ring->wptr = 0;
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
/* Set the wb address wether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
lower_32_bits(wptr_gpu_addr));
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
upper_32_bits(wptr_gpu_addr));
if (adev->gfx.num_gfx_rings > 1) {
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
/* maximum supported gfx ring is 2 */
ring = &adev->gfx.gfx_ring[1];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
/* Initialize the ring buffer's write pointers */
ring->wptr = 0;
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
/* Set the wb address wether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
lower_32_bits(wptr_gpu_addr));
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
mdelay(1);
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
rb_addr = ring->gpu_addr >> 8;
WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
mutex_unlock(&adev->srbm_mutex);
rb_addr = ring->gpu_addr >> 8;
WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
mutex_unlock(&adev->srbm_mutex);
}
/* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
@ -3513,6 +3517,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@ -3966,7 +3971,8 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_set_kiq_pm4_funcs(adev);

Просмотреть файл

@ -3663,6 +3663,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);

Просмотреть файл

@ -1422,6 +1422,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_kms_helper_hotplug_event(dev);
}
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
int ret = 0;
if (!is_support_sw_smu(adev))
return 0;
/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
* on window driver dc implementation.
* For Navi1x, clock settings of dcn watermarks are fixed. the settings
* should be passed to smu during boot up and resume from s3.
* boot up: dc calculate dcn watermark clock settings within dc_create,
* dcn20_resource_construct
* then call pplib functions below to pass the settings to smu:
* smu_set_watermarks_for_clock_ranges
* smu_set_watermarks_table
* navi10_set_watermarks_table
* smu_write_watermarks_table
*
* For Renoir, clock settings of dcn watermark are also fixed values.
* dc has implemented different flow for window driver:
* dc_hardware_init / dc_set_power_state
* dcn10_init_hw
* notify_wm_ranges
* set_wm_ranges
* -- Linux
* smu_set_watermarks_for_clock_ranges
* renoir_set_watermarks_table
* smu_write_watermarks_table
*
* For Linux,
* dc_hardware_init -> amdgpu_dm_init
* dc_set_power_state --> dm_resume
*
* therefore, this function apply to navi10/12/14 but not Renoir
* *
*/
switch(adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
break;
default:
return 0;
}
mutex_lock(&smu->mutex);
/* pass data to smu controller */
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
mutex_unlock(&smu->mutex);
DRM_ERROR("Failed to update WMTABLE!\n");
return ret;
}
smu->watermarks_bitmap |= WATERMARKS_LOADED;
}
mutex_unlock(&smu->mutex);
return 0;
}
/**
* dm_hw_init() - Initialize DC device
* @handle: The base driver device containing the amdgpu_dm device.
@ -1700,6 +1767,8 @@ static int dm_resume(void *handle)
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
return 0;
}

Просмотреть файл

@ -451,6 +451,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->dc_link->cur_link_settings.lane_count = 0;
}
drm_connector_unregister(connector);

Просмотреть файл

@ -840,8 +840,8 @@ static void hubbub1_det_request_size(
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
swath_bytes_horz_wc = height * blk256_height * bpe;
swath_bytes_vert_wc = width * blk256_width * bpe;
swath_bytes_horz_wc = width * blk256_height * bpe;
swath_bytes_vert_wc = height * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */

Просмотреть файл

@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
{
int ret = 0;
if (min <= 0 && max <= 0)
if (min < 0 && max < 0)
return -EINVAL;
if (!smu_clk_dpm_is_enabled(smu, clk_type))

Просмотреть файл

@ -111,8 +111,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
CLK_MAP(SCLK, CLOCK_GFXCLK),
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
CLK_MAP(UCLK, CLOCK_UMCCLK),
CLK_MAP(MCLK, CLOCK_UMCCLK),
CLK_MAP(UCLK, CLOCK_FCLK),
CLK_MAP(MCLK, CLOCK_FCLK),
};
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@ -280,7 +280,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
count = NUM_MEMCLK_DPM_LEVELS;
cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
case SMU_DCEFCLK:
count = NUM_DCFCLK_DPM_LEVELS;

Просмотреть файл

@ -458,9 +458,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
{
int ret = 0;
if (max < min)
return -EINVAL;
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:

Просмотреть файл

@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
dpcd[0] = dp_bw;
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)

Просмотреть файл

@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (ret)
goto err_zero_use;
if (obj->import_attach)
if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
else
} else {
pgprot_t prot = PAGE_KERNEL;
if (!shmem->map_cached)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
VM_MAP, prot);
}
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (!shmem->map_cached)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;

Просмотреть файл

@ -1773,8 +1773,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
dev_info(dev, "failed to get regulators: %d\n", ret);
return -EPROBE_DEFER;
if (ret != -EPROBE_DEFER)
dev_info(dev, "failed to get regulators: %d\n", ret);
return ret;
}
dsi->clks = devm_kcalloc(dev,
@ -1787,9 +1788,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
i--;
continue;
dsi->clks[i] = devm_clk_get(dev,
OLD_SCLK_MIPI_CLK_NAME);
if (!IS_ERR(dsi->clks[i]))
continue;
}
dev_info(dev, "failed to get the clock: %s\n",

Просмотреть файл

@ -1805,18 +1805,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
if (IS_ERR(hdata->reg_hdmi_en))
return PTR_ERR(hdata->reg_hdmi_en);
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to enable hdmi-en regulator\n");
return ret;
}
}
return hdmi_bridge_init(hdata);
}
@ -2023,6 +2015,15 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
if (!IS_ERR(hdata->reg_hdmi_en)) {
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to enable hdmi-en regulator\n");
goto err_hdmiphy;
}
}
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@ -2047,7 +2048,8 @@ err_unregister_audio:
err_rpm_disable:
pm_runtime_disable(dev);
if (!IS_ERR(hdata->reg_hdmi_en))
regulator_disable(hdata->reg_hdmi_en);
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);

Просмотреть файл

@ -83,7 +83,6 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)

Просмотреть файл

@ -46,7 +46,6 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
struct work_struct display_reset_wq;
bool power_on;
int irq;
@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
static void drm_underflow_wq(struct work_struct *work)
{
struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
display_reset_wq);
struct drm_device *drm_dev = ctx->crtc->dev;
struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev);
drm_atomic_helper_resume(drm_dev, state);
}
static irqreturn_t ade_irq_handler(int irq, void *data)
{
struct ade_hw_ctx *ctx = data;
@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
MASK(1), 1);
DRM_ERROR("LDI underflow!");
schedule_work(&ctx->display_reset_wq);
}
return IRQ_HANDLED;
}
@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
if (ret)
return ERR_PTR(-EIO);
INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
ctx->crtc = crtc;
return ctx;

Просмотреть файл

@ -4466,13 +4466,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
u32 val;
u32 mask, val;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
MBUS_ABOX_BW_CREDIT_MASK;
val = I915_READ(MBUS_ABOX_CTL);
val &= ~mask;
val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
I915_WRITE(MBUS_ABOX_CTL, val);
}
@ -4968,8 +4974,21 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
} else {
u32 val;
I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
/* Wa_22010178259:tgl */
val = I915_READ(BW_BUDDY1_CTL);
val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
I915_WRITE(BW_BUDDY1_CTL, val);
val = I915_READ(BW_BUDDY2_CTL);
val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
I915_WRITE(BW_BUDDY2_CTL, val);
}
}

Просмотреть файл

@ -852,10 +852,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_psr)
if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
return;
if (WARN_ON(!CAN_PSR(dev_priv)))
dev_priv->psr.force_mode_changed = false;
if (!crtc_state->has_psr)
return;
WARN_ON(dev_priv->drrs.dp);
@ -1009,6 +1011,8 @@ void intel_psr_update(struct intel_dp *intel_dp,
if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
return;
dev_priv->psr.force_mode_changed = false;
mutex_lock(&dev_priv->psr.lock);
enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
@ -1534,7 +1538,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_crtc_state *crtc_state;
if (!CAN_PSR(dev_priv) || !new_state->crtc ||
dev_priv->psr.initially_probed)
!dev_priv->psr.force_mode_changed)
return;
intel_connector = to_intel_connector(connector);
@ -1545,5 +1549,18 @@ void intel_psr_atomic_check(struct drm_connector *connector,
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
crtc_state->mode_changed = true;
dev_priv->psr.initially_probed = true;
}
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv;
if (!intel_dp)
return;
dev_priv = dp_to_i915(intel_dp);
if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp)
return;
dev_priv->psr.force_mode_changed = true;
}

Просмотреть файл

@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */

Просмотреть файл

@ -225,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}

Просмотреть файл

@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
return false;
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);

Просмотреть файл

@ -147,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence,
interruptible,
timeout);
dma_fence_put(fence);
/* Retirement is best effort */
if (!mutex_trylock(&tl->mutex)) {
active_count++;
goto out_active;
}
}
}
if (!retire_requests(tl) || flush_submission(gt))
active_count++;
mutex_unlock(&tl->mutex);
spin_lock(&timelines->lock);
out_active: spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */
/* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {

Просмотреть файл

@ -575,24 +575,19 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
u32 val;
/* Wa_1409142259:tgl */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* Wa_1604555607:tgl */
val = intel_uncore_read(engine->uncore, FF_MODE2);
val &= ~FF_MODE2_TDS_TIMER_MASK;
val |= FF_MODE2_TDS_TIMER_128;
/*
* FIXME: FF_MODE2 register is not readable till TGL B0. We can
* enable verification of WA from the later steppings, which enables
* the read of FF_MODE2.
* Wa_1604555607:gen12 and Wa_1608008084:gen12
* FF_MODE2 register will return the wrong value when read. The default
* value for this register is zero for all fields and there are no bit
* masks. So instead of doing a RMW we should just write the TDS timer
* value for Wa_1604555607.
*/
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
FF_MODE2_TDS_TIMER_MASK);
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128, 0);
}
static void

Просмотреть файл

@ -56,6 +56,7 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_psr.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@ -330,6 +331,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
intel_init_ipc(i915);
intel_psr_set_force_mode_changed(i915->psr.dp);
return 0;
cleanup_gem:

Просмотреть файл

@ -505,7 +505,7 @@ struct i915_psr {
bool dc3co_enabled;
u32 dc3co_exit_delay;
struct delayed_work idle_work;
bool initially_probed;
bool force_mode_changed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)

Просмотреть файл

@ -1954,9 +1954,10 @@ out:
return i915_vma_get(oa_bo->vma);
}
static int emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config,
struct intel_context *ce)
static struct i915_request *
emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config,
struct intel_context *ce)
{
struct i915_request *rq;
struct i915_vma *vma;
@ -1964,7 +1965,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
return ERR_CAST(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@ -1989,13 +1990,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
err = rq->engine->emit_bb_start(rq,
vma->node.start, 0,
I915_DISPATCH_SECURE);
if (err)
goto err_add_request;
i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
return err;
return err ? ERR_PTR(err) : rq;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@ -2003,7 +2008,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
static struct i915_request *
hsw_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@ -2406,7 +2412,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
}
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
static struct i915_request *
gen8_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@ -2448,7 +2455,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
return ERR_PTR(ret);
return emit_oa_config(stream, oa_config, oa_context(stream));
}
@ -2460,7 +2467,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static int gen12_enable_metric_set(struct i915_perf_stream *stream)
static struct i915_request *
gen12_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@ -2491,7 +2499,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = gen12_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
return ERR_PTR(ret);
/*
* For Gen12, performance counters are context
@ -2501,7 +2509,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
if (stream->ctx) {
ret = gen12_configure_oar_context(stream, true);
if (ret)
return ret;
return ERR_PTR(ret);
}
return emit_oa_config(stream, oa_config, oa_context(stream));
@ -2696,6 +2704,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.read = i915_oa_read,
};
static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
{
struct i915_request *rq;
rq = stream->perf->ops.enable_metric_set(stream);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
return 0;
}
/**
* i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream
@ -2829,7 +2851,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->ops = &i915_oa_stream_ops;
perf->exclusive_stream = stream;
ret = perf->ops.enable_metric_set(stream);
ret = i915_perf_stream_enable_sync(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@ -3147,7 +3169,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
int err;
struct i915_request *rq;
/*
* If OA is bound to a specific context, emit the
@ -3158,11 +3180,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
err = emit_oa_config(stream, config, oa_context(stream));
if (err == 0)
rq = emit_oa_config(stream, config, oa_context(stream));
if (!IS_ERR(rq)) {
config = xchg(&stream->oa_config, config);
else
ret = err;
i915_request_put(rq);
} else {
ret = PTR_ERR(rq);
}
}
i915_oa_config_put(config);

Просмотреть файл

@ -339,7 +339,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
int (*enable_metric_set)(struct i915_perf_stream *stream);
struct i915_request *
(*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using

Просмотреть файл

@ -7757,6 +7757,7 @@ enum {
#define BW_BUDDY1_CTL _MMIO(0x45140)
#define BW_BUDDY2_CTL _MMIO(0x45150)
#define BW_BUDDY_DISABLE REG_BIT(31)
#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)

Просмотреть файл

@ -275,7 +275,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
list_del(&rq->link);
list_del_rcu(&rq->link);
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@ -721,6 +721,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
intel_context_mark_active(ce);
list_add_tail_rcu(&rq->link, &tl->requests);
return rq;
err_unwind:
@ -777,13 +779,23 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
GEM_BUG_ON(i915_request_timeline(rq) ==
rcu_access_pointer(signal->timeline));
if (i915_request_started(signal))
return 0;
fence = NULL;
rcu_read_lock();
spin_lock_irq(&signal->lock);
if (!i915_request_started(signal) &&
!list_is_first(&signal->link,
&rcu_dereference(signal->timeline)->requests)) {
struct i915_request *prev = list_prev_entry(signal, link);
do {
struct list_head *pos = READ_ONCE(signal->link.prev);
struct i915_request *prev;
/* Confirm signal has not been retired, the link is valid */
if (unlikely(i915_request_started(signal)))
break;
/* Is signal the earliest request on its timeline? */
if (pos == &rcu_dereference(signal->timeline)->requests)
break;
/*
* Peek at the request before us in the timeline. That
@ -791,13 +803,18 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
* after acquiring a reference to it, confirm that it is
* still part of the signaler's timeline.
*/
if (i915_request_get_rcu(prev)) {
if (list_next_entry(prev, link) == signal)
fence = &prev->fence;
else
i915_request_put(prev);
prev = list_entry(pos, typeof(*prev), link);
if (!i915_request_get_rcu(prev))
break;
/* After the strong barrier, confirm prev is still attached */
if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
i915_request_put(prev);
break;
}
}
fence = &prev->fence;
} while (0);
spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
@ -1242,8 +1259,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
list_add_tail(&rq->link, &timeline->requests);
/*
* Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before

Просмотреть файл

@ -486,6 +486,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
@ -636,10 +637,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
struct drm_plane *primary,
struct drm_plane *cursor, unsigned int pipe)
unsigned int pipe)
{
int ret;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
int i, ret;
for (i = 0; i < mtk_crtc->layer_nr; i++) {
if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
primary = &mtk_crtc->planes[i];
else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
cursor = &mtk_crtc->planes[i];
}
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
@ -689,11 +698,12 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
}
static inline
enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
else if (plane_idx == 1)
else if (plane_idx == (num_planes - 1))
return DRM_PLANE_TYPE_CURSOR;
else
return DRM_PLANE_TYPE_OVERLAY;
@ -712,7 +722,8 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
num_planes),
mtk_ddp_comp_supported_rotations(comp));
if (ret)
return ret;
@ -807,9 +818,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return ret;
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
NULL, pipe);
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
if (ret < 0)
return ret;
@ -828,7 +837,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
}
ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
&mtk_crtc->cmdq_event);
if (ret)

Просмотреть файл

@ -471,6 +471,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
type != MTK_DISP_OVL_2L &&
type != MTK_DISP_RDMA &&
type != MTK_DISP_WDMA)
return 0;

Просмотреть файл

@ -80,6 +80,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state;
int ret;
if (plane != state->crtc->cursor)
return -EINVAL;
@ -90,6 +91,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
ret = mtk_drm_crtc_plane_check(state->crtc, plane,
to_mtk_plane_state(state));
if (ret)
return ret;
if (state->state)
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
@ -115,6 +121,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);

Просмотреть файл

@ -601,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
source_id = (fault_status >> 16);
/* Page fault only */
if ((status & mask) == BIT(i)) {
WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
ret = -1;
if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
if (!ret) {
mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
status &= ~mask;
continue;
}
}
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
i, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(pfdev, exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
if (ret)
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
i, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(pfdev, exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);

Просмотреть файл

@ -106,48 +106,128 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
@ -196,12 +276,6 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
.rgb = true,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
@ -220,12 +294,6 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
.rgb = true,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_P010,
.de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_P210,
.de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
.rgb = false,
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
};
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)

Просмотреть файл

@ -93,6 +93,10 @@
#define SUN8I_MIXER_FBFMT_ABGR1555 17
#define SUN8I_MIXER_FBFMT_RGBA5551 18
#define SUN8I_MIXER_FBFMT_BGRA5551 19
#define SUN8I_MIXER_FBFMT_ARGB2101010 20
#define SUN8I_MIXER_FBFMT_ABGR2101010 21
#define SUN8I_MIXER_FBFMT_RGBA1010102 22
#define SUN8I_MIXER_FBFMT_BGRA1010102 23
#define SUN8I_MIXER_FBFMT_YUYV 0
#define SUN8I_MIXER_FBFMT_UYVY 1
@ -109,6 +113,13 @@
/* format 12 is semi-planar YUV411 UVUV */
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
/* format 15 doesn't exist */
/* format 16 is P010 YVU */
#define SUN8I_MIXER_FBFMT_P010_YUV 17
/* format 18 is P210 YVU */
#define SUN8I_MIXER_FBFMT_P210_YUV 19
/* format 20 is packed YVU444 10-bit */
/* format 21 is packed YUV444 10-bit */
/*
* Sub-engines listed bellow are unused for now. The EN registers are here only

Просмотреть файл

@ -398,26 +398,26 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
};
/*
* While all RGB formats are supported, VI planes don't support
* alpha blending, so there is no point having formats with alpha
* channel if their opaque analog exist.
* While DE2 VI layer supports same RGB formats as UI layer, alpha
* channel is ignored. This structure lists all unique variants
* where alpha channel is replaced with "don't care" (X) channel.
*/
static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_ABGR1555,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_BGRX5551,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_RGBX5551,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_XBGR4444,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_NV16,
@ -431,11 +431,53 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV444,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
};
static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_ABGR1555,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_BGRA1010102,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_NV16,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_P010,
DRM_FORMAT_P210,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
};
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
u32 supported_encodings, supported_ranges;
unsigned int plane_cnt, format_count;
struct sun8i_vi_layer *layer;
unsigned int plane_cnt;
const u32 *formats;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
if (mixer->cfg->is_de3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
formats = sun8i_vi_layer_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
}
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
sun8i_vi_layer_formats,
ARRAY_SIZE(sun8i_vi_layer_formats),
formats, format_count,
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");

Просмотреть файл

@ -515,6 +515,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);

Просмотреть файл

@ -42,8 +42,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
* "f91a9dd35715 Fix unlinking resources from hash
* table." (Feb 2019) fixes the bug.
*/
static int handle;
handle++;
static atomic_t seqno = ATOMIC_INIT(0);
int handle = atomic_inc_return(&seqno);
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
@ -99,6 +99,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
return NULL;
bo->base.base.funcs = &virtio_gpu_gem_funcs;
bo->base.map_cached = true;
return &bo->base.base;
}

Просмотреть файл

@ -96,6 +96,11 @@ struct drm_gem_shmem_object {
* The address are un-mapped when the count reaches zero.
*/
unsigned int vmap_use_count;
/**
* @map_cached: map object cached (instead of using writecombine).
*/
bool map_cached;
};
#define to_drm_gem_shmem_obj(obj) \