core:
 - edid build fix
 
 mst:
 - fix NULL ptr deref
 
 vgem:
 - fix close after free
 
 msm:
 - better dma-api usage
 
 sun4i:
 - disable allow_fb_modifiers
 
 amdgpu:
 - Additional OD fixes for navi
 - Misc display fixes
 - VCN 2.5 DPG fix
 - Prevent build errors on PowerPC on some configs
 - GDS EDC fix
 
 i915:
 - dsi/acpi fixes
 - gvt locking and allocation fixes
 - gem/gt fixes
 - bios timing parameters fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJeRhwcAAoJEAx081l5xIa+hBoQAJtayqMkZfhrUQDkygYEieCl
 Nj+ffqq7Fm6s3UveARBH1fhyXIEriZSTOqW8ibNnnTj+O/Bs3Cj1NgfuWNXm41El
 6FgExtKK+dXCkqX3j5OrVB7BEkrxvkZZiR34OyFEiNDLYpHWYzGk5wL009mDnvNr
 l7q5DiDS7ecFCMGRxOuC32SAlS/Flky6vl5hlVShPXLN1czRBUYW+PIN17cr3dHf
 Vl08suX5ZUpYU+9qy8Fl/TyOMlHSjKSNgvWCUr5YbOag0OhbMr/Zs+DB4pJlPk1h
 rNtjXQDdEstDfn9gDoFPH7+eVYRviflIBu9Ccng3RnXf7RVao+8soX5aQurPRANI
 nPzqz1LWxT6W+RnjrnaK1qSOIyFacjY6rAfjuhesNpZGLQVFo6UprQc2fGFSCtkQ
 qOSJTSqEczwpJpz5nwgsx1rPwHBYMm3Gbox2mLfXsaszJoAX8tjIoiYZbtSReVIF
 eCOlM2QMyMFRY0FDhA1L1cSQZBkshwZvFpEsTS36xBAr6iofrk3m2MDgwX+/qbX9
 DWgIoTcmgaJjoM5oEyYhPUFfBw3VZtZ2W8p2El05H4zOmLHs7vIaSZSKtNPqUbcq
 ovW63I1zEX+Yt9mckdXYt6zfruw/nPA9dGbkyW/3tjJrlIT4FiurUuTYcXRWUy9l
 M7gDwWdGHjam1lSiyjCr
 =PGEU
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2020-02-14' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "The core has a build fix for edid code on certain compilers/arches/,
  one MST fix and one vgem fix. Regular amdgpu fixes, and a couple of
  small driver fixes.

  The i915 fixes are bit larger than normal for this stage, but they
  were having CI issues last week, and they hadn't sent any fixes last
  week due to this.

  core:
   - edid build fix

  mst:
   - fix NULL ptr deref

  vgem:
   - fix close after free

  msm:
   - better dma-api usage

  sun4i:
   - disable allow_fb_modifiers

  amdgpu:
   - Additional OD fixes for navi
   - Misc display fixes
   - VCN 2.5 DPG fix
   - Prevent build errors on PowerPC on some configs
   - GDS EDC fix

  i915:
   - dsi/acpi fixes
   - gvt locking and allocation fixes
   - gem/gt fixes
   - bios timing parameters fix"

* tag 'drm-fixes-2020-02-14' of git://anongit.freedesktop.org/drm/drm: (50 commits)
  drm/i915: Mark the removal of the i915_request from the sched.link
  drm/i915/execlists: Reclaim the hanging virtual request
  drm/i915/execlists: Take a reference while capturing the guilty request
  drm/i915/execlists: Offline error capture
  drm/i915/gt: Allow temporary suspension of inflight requests
  drm/i915: Keep track of request among the scheduling lists
  drm/i915/gem: Tighten checks and acquiring the mmap object
  drm/i915: Fix preallocated barrier list append
  drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
  drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release
  drm/i915: Stub out i915_gpu_coredump_put
  drm/amdgpu:/navi10: use the ODCAP enum to index the caps array
  drm/amdgpu: update smu_v11_0_pptable.h
  drm/amdgpu: correct comment to clear up the confusion
  drm/amd/display: DCN2.x Do not program DPPCLK if same value
  drm/amd/display: Don't map ATOM_ENABLE to ATOM_INIT
  drm/amdgpu/vcn2.5: fix warning
  drm/amdgpu: limit GDS clearing workaround in cold boot sequence
  drm/amdgpu: fix amdgpu pmu to use hwc->config instead of hwc->conf
  amdgpu: Prevent build errors regarding soft/hard-float FP ABI tags
  ...
This commit is contained in:
Linus Torvalds 2020-02-14 12:23:16 -08:00
Родитель b19e8c6847 6f4134b30b
Коммит 3f0d329371
51 изменённых файлов: 1132 добавлений и 252 удалений

Просмотреть файл

@ -52,7 +52,7 @@ static int amdgpu_perf_event_init(struct perf_event *event)
return -ENOENT;
/* update the hw_perf_event struct with config data */
hwc->conf = event->attr.config;
hwc->config = event->attr.config;
return 0;
}
@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
break;
default:
break;
@ -101,7 +101,7 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
&count);
break;
default:
@ -126,7 +126,7 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
break;
default:
break;
@ -156,7 +156,8 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
retval = pe->adev->df.funcs->pmc_start(pe->adev,
hwc->config, 1);
break;
default:
return 0;
@ -184,7 +185,7 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
break;
default:
break;

Просмотреть файл

@ -179,6 +179,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
@ -190,8 +191,6 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
struct dpg_pause_state pause_state;
bool indirect_sram;
uint8_t num_vcn_inst;

Просмотреть файл

@ -4374,9 +4374,17 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
return r;
/*
* Temp workaround to fix the issue that CP firmware fails to
* update read pointer when CPDMA is writing clearing operation
* to GDS in suspend/resume sequence on several cards. So just
* limit this operation in cold boot sequence.
*/
if (!adev->in_suspend) {
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
return r;
}
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v9_0_do_edc_gpr_workarounds(adev);

Просмотреть файл

@ -1207,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
/* pause/unpause if state is changed */
if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
adev->vcn.inst[inst_idx].pause_state.fw_based,
adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@ -1258,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
adev->vcn.pause_state.fw_based = new_state->fw_based;
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
/* pause/unpause if state is changed */
if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
adev->vcn.inst[inst_idx].pause_state.fw_based,
adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@ -1318,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
adev->vcn.pause_state.jpeg = new_state->jpeg;
adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
return 0;

Просмотреть файл

@ -1137,9 +1137,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.pause_state.fw_based, new_state->fw_based);
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@ -1185,7 +1185,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
adev->vcn.pause_state.fw_based = new_state->fw_based;
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;

Просмотреть файл

@ -1367,9 +1367,9 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.pause_state.fw_based, new_state->fw_based);
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@ -1407,14 +1407,14 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
/* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
}
adev->vcn.pause_state.fw_based = new_state->fw_based;
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;

Просмотреть файл

@ -8408,7 +8408,6 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
/* Calculate number of static frames before generating interrupt to
* enter PSR.
*/
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
// Init fail safe of 2 frames static
unsigned int num_frames_static = 2;
@ -8423,8 +8422,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
* Calculate number of frames such that at least 30 ms of time has
* passed.
*/
if (vsync_rate_hz != 0)
if (vsync_rate_hz != 0) {
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
num_frames_static = (30000 / frame_time_microsec) + 1;
}
params.triggers.cursor_update = true;
params.triggers.overlay_update = true;

Просмотреть файл

@ -711,10 +711,6 @@ static void enable_disp_power_gating_dmcub(
power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
power_gating.power_gating.pwr = *pwr;
/* ATOM_ENABLE is old API in DMUB */
if (power_gating.power_gating.pwr.enable == ATOM_ENABLE)
power_gating.power_gating.pwr.enable = ATOM_INIT;
dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);

Просмотреть файл

@ -87,6 +87,12 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
# prevent build errors regarding soft-float vs hard-float FP ABI tags
# this code is currently unused on ppc64, as it applies to Renoir APUs only
ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)

Просмотреть файл

@ -117,7 +117,7 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}

Просмотреть файл

@ -151,6 +151,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
@ -412,19 +418,19 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
/* We will not select WM based on dcfclk, so leave it as unconstrained */
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* fclk wil be used to select WM*/
/* We will not select WM based on fclk, so leave it as unconstrained */
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
/* dcfclk wil be used to select WM*/
if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
if (i == 0)
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */

Просмотреть файл

@ -400,7 +400,7 @@ static bool acquire(
{
enum gpio_result result;
if (!is_engine_available(engine))
if ((engine == NULL) || !is_engine_available(engine))
return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,

Просмотреть файл

@ -572,7 +572,6 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,

Просмотреть файл

@ -60,6 +60,7 @@
#include "dcn20/dcn20_dccg.h"
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce110/dce110_resource.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@ -856,6 +857,7 @@ static const struct dc_debug_options debug_defaults_diags = {
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_TOTAL_DCN21
};
@ -1718,6 +1720,10 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;

Просмотреть файл

@ -39,21 +39,39 @@
#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
enum SMU_11_0_ODFEATURE_CAP {
SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
SMU_11_0_ODCAP_GFXCLK_CURVE,
SMU_11_0_ODCAP_UCLK_MAX,
SMU_11_0_ODCAP_POWER_LIMIT,
SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
SMU_11_0_ODCAP_FAN_SPEED_MIN,
SMU_11_0_ODCAP_TEMPERATURE_FAN,
SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
SMU_11_0_ODCAP_AUTO_UV_ENGINE,
SMU_11_0_ODCAP_AUTO_OC_ENGINE,
SMU_11_0_ODCAP_AUTO_OC_MEMORY,
SMU_11_0_ODCAP_FAN_CURVE,
SMU_11_0_ODCAP_COUNT,
};
enum SMU_11_0_ODFEATURE_ID {
SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_11_0_ODFEATURE_COUNT = 14,
};
#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features

Просмотреть файл

@ -736,9 +736,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
{
return od_table->cap[feature];
return od_table->cap[cap];
}
static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
@ -846,7 +846,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
break;
size += sprintf(buf + size, "OD_SCLK:\n");
size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
@ -854,7 +854,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_MCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
break;
size += sprintf(buf + size, "OD_MCLK:\n");
size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
@ -862,7 +862,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_VDDC_CURVE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
break;
size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
for (i = 0; i < 3; i++) {
@ -887,7 +887,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
size = sprintf(buf, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
&min_value, NULL);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
@ -896,14 +896,14 @@ static int navi10_print_clk_levels(struct smu_context *smu,
min_value, max_value);
}
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
&min_value, &max_value);
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
&min_value, &max_value);
size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
@ -2056,7 +2056,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
pr_warn("GFXCLK_LIMITS not supported!\n");
return -ENOTSUPP;
}
@ -2102,7 +2102,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
pr_warn("UCLK_MAX not supported!\n");
return -ENOTSUPP;
}
@ -2143,7 +2143,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_VDDC_CURVE:
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
pr_warn("GFXCLK_CURVE not supported!\n");
return -ENOTSUPP;
}

Просмотреть файл

@ -3838,7 +3838,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
if (guid)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}

Просмотреть файл

@ -3211,7 +3211,7 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
{
BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);

Просмотреть файл

@ -357,14 +357,16 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
panel_fixed_mode->hdisplay + dtd->hfront_porch;
panel_fixed_mode->hsync_end =
panel_fixed_mode->hsync_start + dtd->hsync;
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
panel_fixed_mode->htotal =
panel_fixed_mode->hdisplay + dtd->hblank;
panel_fixed_mode->vdisplay = dtd->vactive;
panel_fixed_mode->vsync_start =
panel_fixed_mode->vdisplay + dtd->vfront_porch;
panel_fixed_mode->vsync_end =
panel_fixed_mode->vsync_start + dtd->vsync;
panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
panel_fixed_mode->vtotal =
panel_fixed_mode->vdisplay + dtd->vblank;
panel_fixed_mode->clock = dtd->pixel_clock;
panel_fixed_mode->width_mm = dtd->width_mm;

Просмотреть файл

@ -12366,6 +12366,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
/* Copy parameters to slave plane */
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
linked_state->view = plane_state->view;
memcpy(linked_state->color_plane, plane_state->color_plane,
sizeof(linked_state->color_plane));
@ -14476,37 +14477,23 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
return 0;
}
static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state,
enum transcoder transcoder)
static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
u8 transcoders)
{
struct intel_crtc_state *new_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->cpu_transcoder == transcoder)
return needs_modeset(new_crtc_state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->hw.enable &&
transcoders & BIT(new_crtc_state->cpu_transcoder) &&
needs_modeset(new_crtc_state))
return true;
}
return false;
}
static void
intel_modeset_synced_crtcs(struct intel_atomic_state *state,
u8 transcoders)
{
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
if (transcoders & BIT(new_crtc_state->cpu_transcoder)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
}
}
static int
intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
{
@ -14662,15 +14649,20 @@ static int intel_atomic_check(struct drm_device *dev,
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
enum transcoder master = new_crtc_state->mst_master_transcoder;
if (intel_cpu_transcoder_needs_modeset(state, master)) {
if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
} else if (is_trans_port_sync_mode(new_crtc_state)) {
}
if (is_trans_port_sync_mode(new_crtc_state)) {
u8 trans = new_crtc_state->sync_mode_slaves_mask |
BIT(new_crtc_state->master_transcoder);
intel_modeset_synced_crtcs(state, trans);
if (intel_cpu_transcoders_need_modeset(state, trans)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
}
}

Просмотреть файл

@ -384,6 +384,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
#ifdef CONFIG_ACPI
static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
{
struct i2c_adapter_lookup *lookup = data;
@ -393,8 +394,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
acpi_handle adapter_handle;
acpi_status status;
if (intel_dsi->i2c_bus_num >= 0 ||
!i2c_acpi_get_i2c_resource(ares, &sb))
if (!i2c_acpi_get_i2c_resource(ares, &sb))
return 1;
if (lookup->slave_addr != sb->slave_address)
@ -413,14 +413,41 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
return 1;
}
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 slave_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
struct device *dev = &drm_dev->pdev->dev;
struct acpi_device *acpi_dev;
struct list_head resource_list;
struct i2c_adapter_lookup lookup;
acpi_dev = ACPI_COMPANION(dev);
if (acpi_dev) {
memset(&lookup, 0, sizeof(lookup));
lookup.slave_addr = slave_addr;
lookup.intel_dsi = intel_dsi;
lookup.dev_handle = acpi_device_handle(acpi_dev);
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(acpi_dev, &resource_list,
i2c_adapter_lookup,
&lookup);
acpi_dev_free_resource_list(&resource_list);
}
}
#else
static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 slave_addr)
{
}
#endif
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
struct device *dev = &drm_dev->pdev->dev;
struct i2c_adapter *adapter;
struct acpi_device *acpi_dev;
struct list_head resource_list;
struct i2c_adapter_lookup lookup;
struct i2c_msg msg;
int ret;
u8 vbt_i2c_bus_num = *(data + 2);
@ -431,20 +458,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
if (intel_dsi->i2c_bus_num < 0) {
intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
acpi_dev = ACPI_COMPANION(dev);
if (acpi_dev) {
memset(&lookup, 0, sizeof(lookup));
lookup.slave_addr = slave_addr;
lookup.intel_dsi = intel_dsi;
lookup.dev_handle = acpi_device_handle(acpi_dev);
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(acpi_dev, &resource_list,
i2c_adapter_lookup,
&lookup);
acpi_dev_free_resource_list(&resource_list);
}
i2c_acpi_find_adapter(intel_dsi, slave_addr);
}
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);

Просмотреть файл

@ -1981,9 +1981,20 @@ static int __eb_parse(struct dma_fence_work *work)
pw->trampoline);
}
static void __eb_parse_release(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
if (pw->trampoline)
i915_active_release(&pw->trampoline->active);
i915_active_release(&pw->shadow->active);
i915_active_release(&pw->batch->active);
}
static const struct dma_fence_work_ops eb_parse_ops = {
.name = "eb_parse",
.work = __eb_parse,
.release = __eb_parse_release,
};
static int eb_parse_pipeline(struct i915_execbuffer *eb,
@ -1997,6 +2008,20 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
err = i915_active_acquire(&eb->batch->active);
if (err)
goto err_free;
err = i915_active_acquire(&shadow->active);
if (err)
goto err_batch;
if (trampoline) {
err = i915_active_acquire(&trampoline->active);
if (err)
goto err_shadow;
}
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
@ -2006,7 +2031,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
pw->shadow = shadow;
pw->trampoline = trampoline;
dma_resv_lock(pw->batch->resv, NULL);
err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
if (err)
goto err_trampoline;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
@ -2034,6 +2061,14 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
err_batch_unlock:
dma_resv_unlock(pw->batch->resv);
err_trampoline:
if (trampoline)
i915_active_release(&trampoline->active);
err_shadow:
i915_active_release(&shadow->active);
err_batch:
i915_active_release(&eb->batch->active);
err_free:
kfree(pw);
return err;
}

Просмотреть файл

@ -455,10 +455,11 @@ out:
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo;
struct i915_mmap_offset *mmo, *mn;
spin_lock(&obj->mmo.lock);
list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets, offset) {
/*
* vma_node_unmap for GTT mmaps handled already in
* __i915_gem_object_release_mmap_gtt
@ -487,6 +488,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
i915_gem_object_release_mmap_offset(obj);
}
static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)
{
struct rb_node *rb;
spin_lock(&obj->mmo.lock);
rb = obj->mmo.offsets.rb_node;
while (rb) {
struct i915_mmap_offset *mmo =
rb_entry(rb, typeof(*mmo), offset);
if (mmo->mmap_type == mmap_type) {
spin_unlock(&obj->mmo.lock);
return mmo;
}
if (mmo->mmap_type < mmap_type)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
spin_unlock(&obj->mmo.lock);
return NULL;
}
static struct i915_mmap_offset *
insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
{
struct rb_node *rb, **p;
spin_lock(&obj->mmo.lock);
rb = NULL;
p = &obj->mmo.offsets.rb_node;
while (*p) {
struct i915_mmap_offset *pos;
rb = *p;
pos = rb_entry(rb, typeof(*pos), offset);
if (pos->mmap_type == mmo->mmap_type) {
spin_unlock(&obj->mmo.lock);
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
return pos;
}
if (pos->mmap_type < mmo->mmap_type)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&mmo->offset, rb, p);
rb_insert_color(&mmo->offset, &obj->mmo.offsets);
spin_unlock(&obj->mmo.lock);
return mmo;
}
static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;
mmo = lookup_mmo(obj, mmap_type);
if (mmo)
goto out;
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
if (!mmo)
return ERR_PTR(-ENOMEM);
mmo->obj = obj;
mmo->dev = obj->base.dev;
mmo->file = file;
mmo->mmap_type = mmap_type;
drm_vma_node_reset(&mmo->vma_node);
err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
obj->base.size / PAGE_SIZE);
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
&mmo->vma_node, obj->base.size / PAGE_SIZE);
if (likely(!err))
goto out;
goto insert;
/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto err;
i915_gem_drain_freed_objects(i915);
err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
obj->base.size / PAGE_SIZE);
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
&mmo->vma_node, obj->base.size / PAGE_SIZE);
if (err)
goto err;
insert:
mmo = insert_mmo(obj, mmo);
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
spin_lock(&obj->mmo.lock);
list_add(&mmo->offset, &obj->mmo.offsets);
spin_unlock(&obj->mmo.lock);
return mmo;
err:
@ -745,60 +807,43 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_i915_gem_object *obj = NULL;
struct i915_mmap_offset *mmo = NULL;
struct drm_gem_object *obj = NULL;
struct file *anon;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (likely(node)) {
mmo = container_of(node, struct i915_mmap_offset,
vma_node);
/*
* In our dependency chain, the drm_vma_offset_node
* depends on the validity of the mmo, which depends on
* the gem object. However the only reference we have
* at this point is the mmo (as the parent of the node).
* Try to check if the gem object was at least cleared.
*/
if (!mmo || !mmo->obj) {
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
return -EINVAL;
}
if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
* destroyed and will be invalid when the vma manager lock
* is released.
*/
obj = &mmo->obj->base;
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
rcu_read_unlock();
if (!obj)
return -EINVAL;
return node ? -EACCES : -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_put_unlocked(obj);
return -EACCES;
}
if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
drm_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
anon = mmap_singleton(to_i915(obj->dev));
anon = mmap_singleton(to_i915(dev));
if (IS_ERR(anon)) {
drm_gem_object_put_unlocked(obj);
i915_gem_object_put(obj);
return PTR_ERR(anon);
}

Просмотреть файл

@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->mmo.lock);
INIT_LIST_HEAD(&obj->mmo.offsets);
obj->mmo.offsets = RB_ROOT;
init_rcu_head(&obj->rcu);
@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
struct i915_mmap_offset *mmo;
LIST_HEAD(close);
i915_gem_object_lock(obj);
@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
i915_gem_object_unlock(obj);
spin_lock(&obj->mmo.lock);
list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
if (mmo->file != file)
continue;
spin_unlock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
drm_vma_node_revoke(&mmo->vma_node, file);
spin_lock(&obj->mmo.lock);
}
spin_unlock(&obj->mmo.lock);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
i915_gem_object_release_mmap(obj);
list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets,
offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
INIT_LIST_HEAD(&obj->mmo.offsets);
obj->mmo.offsets = RB_ROOT;
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);

Просмотреть файл

@ -69,6 +69,15 @@ i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
return idr_find(&file->object_idr, handle);
}
static inline struct drm_i915_gem_object *
i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
{
if (obj && !kref_get_unless_zero(&obj->base.refcount))
obj = NULL;
return obj;
}
static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
@ -76,8 +85,7 @@ i915_gem_object_lookup(struct drm_file *file, u32 handle)
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, handle);
if (obj && !kref_get_unless_zero(&obj->base.refcount))
obj = NULL;
obj = i915_gem_object_get_rcu(obj);
rcu_read_unlock();
return obj;

Просмотреть файл

@ -71,13 +71,11 @@ enum i915_mmap_type {
};
struct i915_mmap_offset {
struct drm_device *dev;
struct drm_vma_offset_node vma_node;
struct drm_i915_gem_object *obj;
struct drm_file *file;
enum i915_mmap_type mmap_type;
struct list_head offset;
struct rb_node offset;
};
struct drm_i915_gem_object {
@ -137,7 +135,7 @@ struct drm_i915_gem_object {
struct {
spinlock_t lock; /* Protects access to mmo offsets */
struct list_head offsets;
struct rb_root offsets;
} mmo;
I915_SELFTEST_DECLARE(struct list_head st_link);

Просмотреть файл

@ -67,21 +67,18 @@ static int intel_context_active_acquire(struct intel_context *ce)
{
int err;
err = i915_active_acquire(&ce->active);
if (err)
return err;
__i915_active_acquire(&ce->active);
if (intel_context_is_barrier(ce))
return 0;
/* Preallocate tracking nodes */
if (!intel_context_is_barrier(ce)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err) {
i915_active_release(&ce->active);
return err;
}
}
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err)
i915_active_release(&ce->active);
return 0;
return err;
}
static void intel_context_active_release(struct intel_context *ce)
@ -101,13 +98,19 @@ int __intel_context_do_pin(struct intel_context *ce)
return err;
}
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
err = i915_active_acquire(&ce->active);
if (err)
return err;
if (likely(!atomic_read(&ce->pin_count))) {
if (mutex_lock_interruptible(&ce->pin_mutex)) {
err = -EINTR;
goto out_release;
}
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
goto err;
goto out_unlock;
err = ce->ops->pin(ce);
if (unlikely(err))
@ -117,18 +120,19 @@ int __intel_context_do_pin(struct intel_context *ce)
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
atomic_inc(&ce->pin_count);
}
atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
mutex_unlock(&ce->pin_mutex);
return 0;
GEM_BUG_ON(i915_active_is_idle(&ce->active));
goto out_unlock;
err_active:
intel_context_active_release(ce);
err:
out_unlock:
mutex_unlock(&ce->pin_mutex);
out_release:
i915_active_release(&ce->active);
return err;
}

Просмотреть файл

@ -671,6 +671,7 @@ void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
INIT_LIST_HEAD(&engine->active.hold);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
@ -1422,6 +1423,17 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
static unsigned long list_count(struct list_head *list)
{
struct list_head *pos;
unsigned long count = 0;
list_for_each(pos, list)
count++;
return count;
}
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@ -1491,6 +1503,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);

Просмотреть файл

@ -295,6 +295,7 @@ struct intel_engine_cs {
struct {
spinlock_t lock;
struct list_head requests;
struct list_head hold; /* ready requests, but on hold */
} active;
struct llist_head barrier_tasks;

Просмотреть файл

@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
@ -1535,7 +1537,8 @@ static bool can_merge_rq(const struct i915_request *prev,
return true;
if (unlikely((prev->fence.flags ^ next->fence.flags) &
(I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
if (!can_merge_ctx(prev->context, next->context))
@ -1632,8 +1635,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
!i915_request_completed(rq));
GEM_BUG_ON(i915_request_is_active(w));
if (list_empty(&w->sched.link))
continue; /* Not yet submitted; unready */
if (!i915_request_is_ready(w))
continue;
if (rq_prio(w) < rq_prio(rq))
continue;
@ -2351,6 +2354,310 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
}
}
static void __execlists_hold(struct i915_request *rq)
{
LIST_HEAD(list);
do {
struct i915_dependency *p;
if (i915_request_is_active(rq))
__i915_request_unsubmit(rq);
RQ_TRACE(rq, "on hold\n");
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
list_move_tail(&rq->sched.link, &rq->engine->active.hold);
i915_request_set_hold(rq);
list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
/* Leave semaphores spinning on the other engines */
if (w->engine != rq->engine)
continue;
if (!i915_request_is_ready(w))
continue;
if (i915_request_completed(w))
continue;
if (i915_request_on_hold(rq))
continue;
list_move_tail(&w->sched.link, &list);
}
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
}
static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
spin_lock_irq(&engine->active.lock);
if (i915_request_completed(rq)) { /* too late! */
rq = NULL;
goto unlock;
}
if (rq->engine != engine) { /* preempted virtual engine */
struct virtual_engine *ve = to_virtual_engine(rq->engine);
/*
* intel_context_inflight() is only protected by virtue
* of process_csb() being called only by the tasklet (or
* directly from inside reset while the tasklet is suspended).
* Assert that neither of those are allowed to run while we
* poke at the request queues.
*/
GEM_BUG_ON(!reset_in_progress(&engine->execlists));
/*
* An unsubmitted request along a virtual engine will
* remain on the active (this) engine until we are able
* to process the context switch away (and so mark the
* context as no longer in flight). That cannot have happened
* yet, otherwise we would not be hanging!
*/
spin_lock(&ve->base.active.lock);
GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
GEM_BUG_ON(ve->request != rq);
ve->request = NULL;
spin_unlock(&ve->base.active.lock);
i915_request_put(rq);
rq->engine = engine;
}
/*
* Transfer this request onto the hold queue to prevent it
* being resumbitted to HW (and potentially completed) before we have
* released it. Since we may have already submitted following
* requests, we need to remove those as well.
*/
GEM_BUG_ON(i915_request_on_hold(rq));
GEM_BUG_ON(rq->engine != engine);
__execlists_hold(rq);
unlock:
spin_unlock_irq(&engine->active.lock);
return rq;
}
static bool hold_request(const struct i915_request *rq)
{
struct i915_dependency *p;
/*
* If one of our ancestors is on hold, we must also be on hold,
* otherwise we will bypass it and execute before it.
*/
list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
if (s->engine != rq->engine)
continue;
if (i915_request_on_hold(s))
return true;
}
return false;
}
static void __execlists_unhold(struct i915_request *rq)
{
LIST_HEAD(list);
do {
struct i915_dependency *p;
GEM_BUG_ON(!i915_request_on_hold(rq));
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
i915_request_clear_hold(rq);
list_move_tail(&rq->sched.link,
i915_sched_lookup_priolist(rq->engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
RQ_TRACE(rq, "hold release\n");
/* Also release any children on this engine that are ready */
list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
if (w->engine != rq->engine)
continue;
if (!i915_request_on_hold(rq))
continue;
/* Check that no other parents are also on hold */
if (hold_request(rq))
continue;
list_move_tail(&w->sched.link, &list);
}
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
}
static void execlists_unhold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
spin_lock_irq(&engine->active.lock);
/*
* Move this request back to the priority queue, and all of its
* children and grandchildren that were suspended along with it.
*/
__execlists_unhold(rq);
if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
engine->execlists.queue_priority_hint = rq_prio(rq);
tasklet_hi_schedule(&engine->execlists.tasklet);
}
spin_unlock_irq(&engine->active.lock);
}
struct execlists_capture {
struct work_struct work;
struct i915_request *rq;
struct i915_gpu_coredump *error;
};
static void execlists_capture_work(struct work_struct *work)
{
struct execlists_capture *cap = container_of(work, typeof(*cap), work);
const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
struct intel_engine_cs *engine = cap->rq->engine;
struct intel_gt_coredump *gt = cap->error->gt;
struct intel_engine_capture_vma *vma;
/* Compress all the objects attached to the request, slow! */
vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
if (vma) {
struct i915_vma_compress *compress =
i915_vma_capture_prepare(gt);
intel_engine_coredump_add_vma(gt->engine, vma, compress);
i915_vma_capture_finish(gt, compress);
}
gt->simulated = gt->engine->simulated;
cap->error->simulated = gt->simulated;
/* Publish the error state, and announce it to the world */
i915_error_state_store(cap->error);
i915_gpu_coredump_put(cap->error);
/* Return this request and all that depend upon it for signaling */
execlists_unhold(engine, cap->rq);
i915_request_put(cap->rq);
kfree(cap);
}
static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
{
const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
struct execlists_capture *cap;
cap = kmalloc(sizeof(*cap), gfp);
if (!cap)
return NULL;
cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
if (!cap->error)
goto err_cap;
cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
if (!cap->error->gt)
goto err_gpu;
cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
if (!cap->error->gt->engine)
goto err_gt;
return cap;
err_gt:
kfree(cap->error->gt);
err_gpu:
kfree(cap->error);
err_cap:
kfree(cap);
return NULL;
}
static bool execlists_capture(struct intel_engine_cs *engine)
{
struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
return true;
/*
* We need to _quickly_ capture the engine state before we reset.
* We are inside an atomic section (softirq) here and we are delaying
* the forced preemption event.
*/
cap = capture_regs(engine);
if (!cap)
return true;
cap->rq = execlists_active(&engine->execlists);
GEM_BUG_ON(!cap->rq);
rcu_read_lock();
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
cap->rq = i915_request_get_rcu(cap->rq);
rcu_read_unlock();
if (!cap->rq)
goto err_free;
/*
* Remove the request from the execlists queue, and take ownership
* of the request. We pass it to our worker who will _slowly_ compress
* all the pages the _user_ requested for debugging their batch, after
* which we return it to the queue for signaling.
*
* By removing them from the execlists queue, we also remove the
* requests from being processed by __unwind_incomplete_requests()
* during the intel_engine_reset(), and so they will *not* be replayed
* afterwards.
*
* Note that because we have not yet reset the engine at this point,
* it is possible for the request that we have identified as being
* guilty, did in fact complete and we will then hit an arbitration
* point allowing the outstanding preemption to succeed. The likelihood
* of that is very low (as capturing of the engine registers should be
* fast enough to run inside an irq-off atomic section!), so we will
* simply hold that request accountable for being non-preemptible
* long enough to force the reset.
*/
if (!execlists_hold(engine, cap->rq))
goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work);
schedule_work(&cap->work);
return true;
err_rq:
i915_request_put(cap->rq);
err_free:
i915_gpu_coredump_put(cap->error);
kfree(cap);
return false;
}
static noinline void preempt_reset(struct intel_engine_cs *engine)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
@ -2368,7 +2675,12 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
READ_ONCE(engine->props.preempt_timeout_ms),
jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
intel_engine_reset(engine, "preemption time out");
ring_set_paused(engine, 1); /* Freeze the current request in place */
if (execlists_capture(engine))
intel_engine_reset(engine, "preemption time out");
else
ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@ -2430,11 +2742,12 @@ static void execlists_preempt(struct timer_list *timer)
}
static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
struct i915_request *rq)
{
GEM_BUG_ON(!list_empty(&node->link));
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(engine, rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@ -2462,6 +2775,13 @@ static void submit_queue(struct intel_engine_cs *engine,
__submit_queue_imm(engine);
}
static bool ancestor_on_hold(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
GEM_BUG_ON(i915_request_on_hold(rq));
return !list_empty(&engine->active.hold) && hold_request(rq);
}
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@ -2470,12 +2790,17 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
if (unlikely(ancestor_on_hold(engine, request))) {
list_add_tail(&request->sched.link, &engine->active.hold);
i915_request_set_hold(request);
} else {
queue_request(engine, request);
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
submit_queue(engine, request);
submit_queue(engine, request);
}
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@ -2531,7 +2856,6 @@ static void execlists_context_unpin(struct intel_context *ce)
ce->engine);
i915_gem_object_unpin_map(ce->state->obj);
intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
@ -3325,6 +3649,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
i915_priolist_free(p);
}
/* On-hold requests will be flushed to timeline upon their release */
list_for_each_entry(rq, &engine->active.hold, sched.link)
mark_eio(rq);
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =

Просмотреть файл

@ -59,11 +59,26 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
ring->vma = i915_vma_alloc();
if (!ring->vma) {
kfree(ring);
return NULL;
}
i915_active_init(&ring->vma->active, NULL, NULL);
intel_ring_update_space(ring);
return ring;
}
static void mock_ring_free(struct intel_ring *ring)
{
i915_active_fini(&ring->vma->active);
i915_vma_free(ring->vma);
kfree(ring);
}
static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@ -121,7 +136,7 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
kfree(ce->ring);
mock_ring_free(ce->ring);
mock_timeline_unpin(ce->timeline);
}

Просмотреть файл

@ -285,6 +285,107 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
static int live_hold_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = 0;
/*
* In order to support offline error capture for fast preempt reset,
* we need to decouple the guilty request and ensure that it and its
* descendents are not executed while the capture is in progress.
*/
if (!intel_has_reset_engine(gt))
return 0;
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for_each_engine(engine, gt, id) {
struct intel_context *ce;
unsigned long heartbeat;
struct i915_request *rq;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
engine_heartbeat_disable(engine, &heartbeat);
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(gt);
err = -ETIME;
goto out;
}
/* We have our request executing, now remove it and reset */
if (test_and_set_bit(I915_RESET_ENGINE + id,
&gt->reset.flags)) {
intel_gt_set_wedged(gt);
err = -EBUSY;
goto out;
}
tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.func(engine->execlists.tasklet.data);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
intel_engine_reset(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + id,
&gt->reset.flags);
/* Check that we do not resubmit the held request */
if (!i915_request_wait(rq, 0, HZ / 5)) {
pr_err("%s: on hold request completed!\n",
engine->name);
i915_request_put(rq);
err = -EIO;
goto out;
}
GEM_BUG_ON(!i915_request_on_hold(rq));
/* But is resubmitted on release */
execlists_unhold(engine, rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: held request did not complete!\n",
engine->name);
intel_gt_set_wedged(gt);
err = -ETIME;
}
i915_request_put(rq);
out:
engine_heartbeat_enable(engine, heartbeat);
intel_context_put(ce);
if (err)
break;
}
igt_spinner_fini(&spin);
return err;
}
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@ -3309,12 +3410,168 @@ static int live_virtual_bond(void *arg)
return 0;
}
static int reset_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
struct intel_engine_cs *engine;
struct intel_context *ve;
unsigned long *heartbeat;
struct igt_spinner spin;
struct i915_request *rq;
unsigned int n;
int err = 0;
/*
* In order to support offline error capture for fast preempt reset,
* we need to decouple the guilty request and ensure that it and its
* descendents are not executed while the capture is in progress.
*/
heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
if (!heartbeat)
return -ENOMEM;
if (igt_spinner_init(&spin, gt)) {
err = -ENOMEM;
goto out_free;
}
ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_spin;
}
for (n = 0; n < nsibling; n++)
engine_heartbeat_disable(siblings[n], &heartbeat[n]);
rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_heartbeat;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(gt);
err = -ETIME;
goto out_heartbeat;
}
engine = rq->engine;
GEM_BUG_ON(engine == ve->engine);
/* Take ownership of the reset and tasklet */
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags)) {
intel_gt_set_wedged(gt);
err = -EBUSY;
goto out_heartbeat;
}
tasklet_disable(&engine->execlists.tasklet);
engine->execlists.tasklet.func(engine->execlists.tasklet.data);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
spin_lock_irq(&engine->active.lock);
__unwind_incomplete_requests(engine);
spin_unlock_irq(&engine->active.lock);
GEM_BUG_ON(rq->engine != ve->engine);
/* Reset the engine while keeping our active request on hold */
execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
intel_engine_reset(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
/* Check that we do not resubmit the held request */
i915_request_get(rq);
if (!i915_request_wait(rq, 0, HZ / 5)) {
pr_err("%s: on hold request completed!\n",
engine->name);
intel_gt_set_wedged(gt);
err = -EIO;
goto out_rq;
}
GEM_BUG_ON(!i915_request_on_hold(rq));
/* But is resubmitted on release */
execlists_unhold(engine, rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: held request did not complete!\n",
engine->name);
intel_gt_set_wedged(gt);
err = -ETIME;
}
out_rq:
i915_request_put(rq);
out_heartbeat:
for (n = 0; n < nsibling; n++)
engine_heartbeat_enable(siblings[n], heartbeat[n]);
intel_context_put(ve);
out_spin:
igt_spinner_fini(&spin);
out_free:
kfree(heartbeat);
return err;
}
static int live_virtual_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class, inst;
/*
* Check that we handle a reset event within a virtual engine.
* Only the physical engine is reset, but we have to check the flow
* of the virtual requests around the reset, and make sure it is not
* forgotten.
*/
if (USES_GUC_SUBMISSION(gt->i915))
return 0;
if (!intel_has_reset_engine(gt))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
if (!gt->engine_class[class][inst])
continue;
siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
err = reset_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
return 0;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
SUBTEST(live_hold_reset),
SUBTEST(live_timeslice_preempt),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
@ -3333,6 +3590,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
SUBTEST(live_virtual_reset),
};
if (!HAS_EXECLISTS(i915))

Просмотреть файл

@ -146,7 +146,7 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
kfree(gvt->firmware.mmio);
vfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
@ -229,7 +229,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->cfg_space = mem;
mem = kmalloc(info->mmio_size, GFP_KERNEL);
mem = vmalloc(info->mmio_size);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);

Просмотреть файл

@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
if (mm->type == INTEL_GVT_MM_PPGTT) {
list_del(&mm->ppgtt_mm.list);
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_del(&mm->ppgtt_mm.lru_list);
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);

Просмотреть файл

@ -416,13 +416,15 @@ int i915_active_acquire(struct i915_active *ref)
if (err)
return err;
if (!atomic_read(&ref->count) && ref->active)
err = ref->active(ref);
if (!err) {
spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
debug_active_activate(ref);
atomic_inc(&ref->count);
spin_unlock_irq(&ref->tree_lock);
if (likely(!i915_active_acquire_if_busy(ref))) {
if (ref->active)
err = ref->active(ref);
if (!err) {
spin_lock_irq(&ref->tree_lock); /* __active_retire() */
debug_active_activate(ref);
atomic_inc(&ref->count);
spin_unlock_irq(&ref->tree_lock);
}
}
mutex_unlock(&ref->mutex);
@ -605,7 +607,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
struct llist_node *pos = NULL, *next;
struct llist_node *first = NULL, *last = NULL;
struct intel_gt *gt = engine->gt;
int err;
@ -623,6 +625,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
*/
for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
struct llist_node *prev = first;
struct active_node *node;
node = reuse_idle_barrier(ref, idx);
@ -656,23 +659,23 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
next = barrier_to_ll(node);
next->next = pos;
if (!pos)
pos = next;
first = barrier_to_ll(node);
first->next = prev;
if (!last)
last = first;
intel_engine_pm_get(engine);
}
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
llist_add_batch(next, pos, &ref->preallocated_barriers);
llist_add_batch(first, last, &ref->preallocated_barriers);
return 0;
unwind:
while (pos) {
struct active_node *node = barrier_from_ll(pos);
while (first) {
struct active_node *node = barrier_from_ll(first);
pos = pos->next;
first = first->next;
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));

Просмотреть файл

@ -188,6 +188,12 @@ int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
static inline void __i915_active_acquire(struct i915_active *ref)
{
GEM_BUG_ON(!atomic_read(&ref->count));
atomic_inc(&ref->count);
}
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{

Просмотреть файл

@ -265,7 +265,10 @@ i915_gem_dumb_create(struct drm_file *file,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
args->size = args->pitch * args->height;
if (args->pitch < args->width)
return -EINVAL;
args->size = mul_u32_u32(args->pitch, args->height);
mem_type = INTEL_MEMORY_SYSTEM;
if (HAS_LMEM(to_i915(dev)))

Просмотреть файл

@ -1681,7 +1681,7 @@ static const char *error_msg(struct i915_gpu_coredump *error)
"GPU HANG: ecode %d:%x:%08x",
INTEL_GEN(error->i915), engines,
generate_ecode(first));
if (first) {
if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,

Просмотреть файл

@ -314,8 +314,11 @@ i915_vma_capture_finish(struct intel_gt_coredump *gt,
}
static inline void
i915_error_state_store(struct drm_i915_private *i915,
struct i915_gpu_coredump *error)
i915_error_state_store(struct i915_gpu_coredump *error)
{
}
static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}

Просмотреть файл

@ -637,8 +637,10 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
intel_wakeref_t wakeref;
unsigned long flags;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@ -648,6 +650,14 @@ static void i915_pmu_enable(struct perf_event *event)
BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
if (pmu->enable_count[bit] == 0 &&
config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@ -688,6 +698,8 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)

Просмотреть файл

@ -221,6 +221,8 @@ static void remove_from_engine(struct i915_request *rq)
locked = engine;
}
list_del_init(&rq->sched.link);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
spin_unlock_irq(&locked->active.lock);
}
@ -408,8 +410,10 @@ bool __i915_request_submit(struct i915_request *request)
xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
}
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&

Просмотреть файл

@ -70,6 +70,18 @@ enum {
*/
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
* I915_FENCE_FLAG_PQUEUE - this request is ready for execution
*
* Using the scheduler, when a request is ready for execution it is put
* into the priority queue, and removed from that queue when transferred
* to the HW runlists. We want to track its membership within the
* priority queue so that we can easily check before rescheduling.
*
* See i915_request_in_priority_queue()
*/
I915_FENCE_FLAG_PQUEUE,
/*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
@ -78,6 +90,13 @@ enum {
*/
I915_FENCE_FLAG_SIGNAL,
/*
* I915_FENCE_FLAG_HOLD - this request is currently on hold
*
* This request has been suspended, pending an ongoing investigation.
*/
I915_FENCE_FLAG_HOLD,
/*
* I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
*
@ -361,6 +380,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
{
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
/**
* Returns true if seq1 is later than seq2.
*/
@ -454,6 +478,27 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
return __i915_request_has_started(rq);
}
/**
* i915_request_is_running - check if the request is ready for execution
* @rq: the request
*
* Upon construction, the request is instructed to wait upon various
* signals before it is ready to be executed by the HW. That is, we do
* not want to start execution and read data before it is written. In practice,
* this is controlled with a mixture of interrupts and semaphores. Once
* the submit fence is completed, the backend scheduler will place the
* request into its queue and from there submit it for execution. So we
* can detect when a request is eligible for execution (and is under control
* of the scheduler) by querying where it is in any of the scheduler's lists.
*
* Returns true if the request is ready for execution (it may be inflight),
* false otherwise.
*/
static inline bool i915_request_is_ready(const struct i915_request *rq)
{
return !list_empty(&rq->sched.link);
}
static inline bool i915_request_completed(const struct i915_request *rq)
{
if (i915_request_signaled(rq))
@ -483,6 +528,21 @@ static inline bool i915_request_has_sentinel(const struct i915_request *rq)
return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
}
static inline bool i915_request_on_hold(const struct i915_request *rq)
{
return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
}
static inline void i915_request_set_hold(struct i915_request *rq)
{
set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
}
static inline void i915_request_clear_hold(struct i915_request *rq)
{
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
}
static inline struct intel_timeline *
i915_request_timeline(struct i915_request *rq)
{

Просмотреть файл

@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
node->attr.priority = prio;
if (list_empty(&node->link)) {
/*
* If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute
* to our preemption decisions. On the other hand,
* if the request is on the HW, it too is not in the
* queue; but in that case we may still need to reorder
* the inflight requests.
*/
/*
* Once the request is ready, it will be placed into the
* priority lists and then onto the HW runlist. Before the
* request is ready, it does not contribute to our preemption
* decisions and we can safely ignore it, as it will, and
* any preemption required, be dealt with upon submission.
* See engine->submit_request()
*/
if (list_empty(&node->link))
continue;
}
if (!intel_engine_is_virtual(engine) &&
!i915_request_is_active(node_to_request(node))) {
if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,

Просмотреть файл

@ -1202,16 +1202,26 @@ int __i915_vma_unbind(struct i915_vma *vma)
if (ret)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
GEM_BUG_ON(i915_vma_is_active(vma));
/*
* After confirming that no one else is pinning this vma, wait for
* any laggards who may have crept in during the wait (through
* a residual pin skipping the vm->mutex) to complete.
*/
ret = i915_vma_sync(vma);
if (ret)
return ret;
if (!drm_mm_node_allocated(&vma->node))
return 0;
GEM_BUG_ON(i915_vma_is_pinned(vma));
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT

Просмотреть файл

@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {

Просмотреть файл

@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
break;
}
atomic_inc(&bo->gpu_usecount);
job->mappings[i] = mapping;
}

Просмотреть файл

@ -30,6 +30,12 @@ struct panfrost_gem_object {
struct mutex lock;
} mappings;
/*
* Count the number of jobs referencing this BO so we don't let the
* shrinker reclaim this object prematurely.
*/
atomic_t gpu_usecount;
bool noexec :1;
bool is_heap :1;
};

Просмотреть файл

@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (atomic_read(&bo->gpu_usecount))
return false;
if (!mutex_trylock(&shmem->pages_lock))
return false;

Просмотреть файл

@ -269,8 +269,13 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->render_done_fence);
if (job->mappings) {
for (i = 0; i < job->bo_count; i++)
for (i = 0; i < job->bo_count; i++) {
if (!job->mappings[i])
break;
atomic_dec(&job->mappings[i]->obj->gpu_usecount);
panfrost_gem_mapping_put(job->mappings[i]);
}
kvfree(job->mappings);
}

Просмотреть файл

@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
}
drm_mode_config_init(drm);
drm->mode_config.allow_fb_modifiers = true;
ret = component_bind_all(drm->dev, drm);
if (ret) {

Просмотреть файл

@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_put_unlocked(&obj->base);
if (ret)
if (ret) {
drm_gem_object_put_unlocked(&obj->base);
return ERR_PTR(ret);
}
return &obj->base;
}
@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
DRM_DEBUG("Created object of size %lld\n", size);
drm_gem_object_put_unlocked(gem_object);
DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}