drm/i915: Markup paired operations on wakerefs
The majority of runtime-pm operations are bounded and scoped within a function; these are easy to verify that the wakeref are handled correctly. We can employ the compiler to help us, and reduce the number of wakerefs tracked when debugging, by passing around cookies provided by the various rpm_get functions to their rpm_put counterpart. This makes the pairing explicit, and given the required wakeref cookie the compiler can verify that we pass an initialised value to the rpm_put (quite handy for double checking error paths). For regular builds, the compiler should be able to eliminate the unused local variables and the program growth should be minimal. Fwiw, it came out as a net improvement as gcc was able to refactor rpm_get and rpm_get_if_in_use together, v2: Just s/rpm_put/rpm_put_unchecked/ everywhere, leaving the manual mark up for smaller more targeted patches. v3: Mention the cookie in Returns Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-2-chris@chris-wilson.co.uk
This commit is contained in:
Родитель
bd780f37a3
Коммит
16e4dd0342
|
@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
|||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
|
@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
|||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return 0;
|
||||
out_free_fence:
|
||||
gvt_vgpu_err("Failed to alloc fences\n");
|
||||
|
@ -219,7 +219,7 @@ out_free_fence:
|
|||
vgpu->fence.regs[i] = NULL;
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -597,7 +597,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
|
|||
|
||||
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
|||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
|
|
@ -997,7 +997,7 @@ complete:
|
|||
intel_uncore_forcewake_put(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
||||
intel_runtime_pm_put(gvt->dev_priv);
|
||||
intel_runtime_pm_put_unchecked(gvt->dev_priv);
|
||||
if (ret && (vgpu_is_vm_unhealthy(ret)))
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
||||
}
|
||||
|
@ -1451,7 +1451,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
if (ret && (vgpu_is_vm_unhealthy(ret))) {
|
||||
|
|
|
@ -877,7 +877,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
}
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -953,7 +953,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
gpu = i915_capture_gpu_state(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
if (IS_ERR(gpu))
|
||||
return PTR_ERR(gpu);
|
||||
|
||||
|
@ -1226,7 +1226,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
|
||||
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1292,7 +1292,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||
|
||||
intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
|
||||
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
|
||||
|
@ -1579,7 +1579,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
|
|||
else
|
||||
err = ironlake_drpc_info(m);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1632,7 +1632,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
}
|
||||
|
||||
mutex_unlock(&fbc->lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1695,7 +1695,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
|||
seq_puts(m, "Currently: disabled\n");
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1723,7 +1723,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
|||
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
|
||||
|
||||
|
@ -1756,7 +1756,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "GFX power: %ld\n", gfx);
|
||||
seq_printf(m, "Total power: %ld\n", chipset + gfx);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1805,7 +1805,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2017,7 +2017,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||
seq_puts(m, "L-shaped memory detected\n");
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2067,7 +2067,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
|||
act_freq = intel_get_cagf(dev_priv,
|
||||
I915_READ(GEN6_RPSTAT1));
|
||||
}
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
|
||||
|
@ -2160,7 +2160,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2192,7 +2192,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
|
|||
for (i = 0; i < 16; i++)
|
||||
seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2601,7 +2601,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||
dev_priv->psr.last_exit);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2632,7 +2632,7 @@ retry:
|
|||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2665,7 +2665,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -2673,7 +2673,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
|
|||
power = I915_READ(MCH_SECP_NRG_STTS);
|
||||
power = (1000000 * power) >> units; /* convert to uJ */
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
seq_printf(m, "%llu", power);
|
||||
|
||||
|
@ -2775,7 +2775,7 @@ out:
|
|||
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
|
||||
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3114,7 +3114,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
|||
drm_connector_list_iter_end(&conn_iter);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3139,7 +3139,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
|||
for_each_engine(engine, dev_priv, id)
|
||||
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3265,7 +3265,7 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
|
|||
dev_priv->wm.distrust_bios_wm = true;
|
||||
dev_priv->ipc_enabled = enable;
|
||||
intel_enable_ipc(dev_priv);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
@ -4090,7 +4090,7 @@ i915_drop_caches_set(void *data, u64 val)
|
|||
i915_gem_drain_freed_objects(i915);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -4112,7 +4112,7 @@ i915_cache_sharing_get(void *data, u64 *val)
|
|||
|
||||
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
|
||||
|
||||
|
@ -4140,7 +4140,7 @@ i915_cache_sharing_set(void *data, u64 val)
|
|||
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
|
||||
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4388,7 +4388,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
|||
gen10_sseu_device_status(dev_priv, &sseu);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
i915_print_sseu_info(m, false, &sseu);
|
||||
|
||||
|
@ -4416,7 +4416,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
|
|||
return 0;
|
||||
|
||||
intel_uncore_forcewake_user_put(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -131,6 +131,8 @@ bool i915_error_injected(void);
|
|||
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
|
||||
fmt, ##__VA_ARGS__)
|
||||
|
||||
typedef depot_stack_handle_t intel_wakeref_t;
|
||||
|
||||
enum hpd_pin {
|
||||
HPD_NONE = 0,
|
||||
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
|
||||
|
|
|
@ -175,7 +175,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
|
|||
|
||||
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
return i915->gt.epoch;
|
||||
}
|
||||
|
@ -814,7 +814,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
|
|||
POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
|
||||
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1149,7 +1149,7 @@ out_unpin:
|
|||
i915_vma_unpin(vma);
|
||||
}
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -1356,7 +1356,7 @@ out_unpin:
|
|||
i915_vma_unpin(vma);
|
||||
}
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
out_unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return ret;
|
||||
|
@ -1968,7 +1968,7 @@ err_unpin:
|
|||
err_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
err_rpm:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
err:
|
||||
switch (ret) {
|
||||
|
@ -2068,7 +2068,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
|
|||
wmb();
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
|
@ -4765,7 +4765,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
if (on)
|
||||
cond_resched();
|
||||
}
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
|
||||
|
@ -4901,7 +4901,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
|
|||
intel_engines_sanitize(i915, false);
|
||||
|
||||
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
i915_gem_contexts_lost(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
@ -4965,12 +4965,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
|
|||
if (WARN_ON(!intel_engines_are_idle(i915)))
|
||||
i915_gem_set_wedged(i915); /* no hope, discard everything */
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2424,7 +2424,7 @@ err_vma:
|
|||
eb_release_vmas(&eb);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
err_rpm:
|
||||
intel_runtime_pm_put(eb.i915);
|
||||
intel_runtime_pm_put_unchecked(eb.i915);
|
||||
i915_gem_context_put(eb.ctx);
|
||||
err_destroy:
|
||||
eb_destroy(&eb);
|
||||
|
|
|
@ -258,7 +258,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
|
|||
*/
|
||||
if (intel_runtime_pm_get_if_in_use(fence->i915)) {
|
||||
fence_write(fence, vma);
|
||||
intel_runtime_pm_put(fence->i915);
|
||||
intel_runtime_pm_put_unchecked(fence->i915);
|
||||
}
|
||||
|
||||
if (vma) {
|
||||
|
|
|
@ -2536,7 +2536,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
|
||||
|
||||
|
@ -2556,7 +2556,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
|
@ -2590,7 +2590,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
|||
if (flags & I915_VMA_GLOBAL_BIND) {
|
||||
intel_runtime_pm_get(i915);
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2603,7 +2603,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
|
|||
if (vma->flags & I915_VMA_GLOBAL_BIND) {
|
||||
intel_runtime_pm_get(i915);
|
||||
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
if (vma->flags & I915_VMA_LOCAL_BIND) {
|
||||
|
|
|
@ -265,7 +265,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
|||
}
|
||||
|
||||
if (flags & I915_SHRINK_BOUND)
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
i915_retire_requests(i915);
|
||||
|
||||
|
@ -299,7 +299,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
|
|||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
return freed;
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
I915_SHRINK_ACTIVE |
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
shrinker_unlock(i915, unlock);
|
||||
|
@ -397,7 +397,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||
freed_pages = i915_gem_shrink(i915, -1UL, NULL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
/* Because we may be allocating inside our own driver, we cannot
|
||||
* assert that there are no objects with pinned pages that are not
|
||||
|
@ -451,7 +451,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_VMAPS);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
/* We also want to clear any cached iomaps as they wrap vmap */
|
||||
list_for_each_entry_safe(vma, next,
|
||||
|
|
|
@ -3374,7 +3374,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
|
|||
wake_up_all(&dev_priv->gpu_error.reset_queue);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
/* Called from drm generic code, passed 'crtc' which
|
||||
|
|
|
@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
|||
free_oa_buffer(dev_priv);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
if (stream->ctx)
|
||||
oa_put_render_ctx_id(stream);
|
||||
|
@ -2123,7 +2123,7 @@ err_oa_buf_alloc:
|
|||
put_oa_config(dev_priv, stream->oa_config);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
err_config:
|
||||
if (stream->ctx)
|
||||
|
|
|
@ -210,7 +210,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
|
|||
if (fw)
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -231,7 +231,7 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
|
|||
intel_runtime_pm_get_if_in_use(dev_priv)) {
|
||||
val = intel_get_cagf(dev_priv,
|
||||
I915_READ_NOTRACE(GEN6_RPSTAT1));
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
|
||||
|
@ -448,7 +448,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
|
|||
|
||||
if (intel_runtime_pm_get_if_in_use(i915)) {
|
||||
val = __get_rc6(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
/*
|
||||
* If we are coming back from being runtime suspended we must
|
||||
|
|
|
@ -46,7 +46,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
res = intel_rc6_residency_us(dev_priv, reg);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return DIV_ROUND_CLOSEST_ULL(res, 1000);
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
|||
}
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
|||
val > rps->max_freq ||
|
||||
val < rps->min_freq_softlimit) {
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
|||
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
val > rps->max_freq ||
|
||||
val > rps->max_freq_softlimit) {
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
|
||||
mutex_unlock(&dev_priv->pcu_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
|
|
|
@ -2101,7 +2101,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
|||
err:
|
||||
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
return vma;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include <drm/drm_crtc.h>
|
||||
|
@ -2182,10 +2183,16 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
|
|||
atomic_dec(&i915->runtime_pm.wakeref_count);
|
||||
}
|
||||
|
||||
void intel_runtime_pm_get(struct drm_i915_private *i915);
|
||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
|
||||
|
||||
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
|
||||
#else
|
||||
#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
|
||||
|
|
|
@ -928,7 +928,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
|
||||
idle = false;
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
@ -1485,7 +1485,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
|
||||
if (intel_runtime_pm_get_if_in_use(engine->i915)) {
|
||||
intel_engine_print_registers(engine, m);
|
||||
intel_runtime_pm_put(engine->i915);
|
||||
intel_runtime_pm_put_unchecked(engine->i915);
|
||||
} else {
|
||||
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
ifbdev->vma = vma;
|
||||
ifbdev->vma_flags = flags;
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
vga_switcheroo_client_fb_set(pdev, info);
|
||||
return 0;
|
||||
|
@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
out_unpin:
|
||||
intel_unpin_fb_vma(vma, flags);
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -445,7 +445,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
|
|||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
guc_action_flush_log_complete(guc);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
int intel_guc_log_create(struct intel_guc_log *log)
|
||||
|
@ -528,7 +528,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
|
|||
ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
|
||||
GUC_LOG_LEVEL_IS_ENABLED(level),
|
||||
GUC_LOG_LEVEL_TO_VERBOSITY(level));
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
|
||||
goto out_unlock;
|
||||
|
@ -610,7 +610,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
guc_action_flush_log(guc);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
/* GuC would have updated log buffer by now, so capture it */
|
||||
guc_log_capture_logs(log);
|
||||
|
|
|
@ -261,7 +261,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
|||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
bool intel_encoder_hotplug(struct intel_encoder *encoder,
|
||||
|
|
|
@ -122,7 +122,7 @@ int intel_huc_check_status(struct intel_huc *huc)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -1213,7 +1213,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
|
|||
ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
|
||||
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
|||
spin_lock_init(&rpm->debug.lock);
|
||||
}
|
||||
|
||||
static noinline void
|
||||
static noinline depot_stack_handle_t
|
||||
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||
|
@ -105,11 +105,11 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
|||
assert_rpm_wakelock_held(i915);
|
||||
|
||||
if (!HAS_RUNTIME_PM(i915))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
stack = __save_depot_stack();
|
||||
if (!stack)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
spin_lock_irqsave(&rpm->debug.lock, flags);
|
||||
|
||||
|
@ -122,9 +122,57 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
|||
if (stacks) {
|
||||
stacks[rpm->debug.count++] = stack;
|
||||
rpm->debug.owners = stacks;
|
||||
} else {
|
||||
stack = -1;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rpm->debug.lock, flags);
|
||||
|
||||
return stack;
|
||||
}
|
||||
|
||||
static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
|
||||
depot_stack_handle_t stack)
|
||||
{
|
||||
struct i915_runtime_pm *rpm = &i915->runtime_pm;
|
||||
unsigned long flags, n;
|
||||
bool found = false;
|
||||
|
||||
if (unlikely(stack == -1))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rpm->debug.lock, flags);
|
||||
for (n = rpm->debug.count; n--; ) {
|
||||
if (rpm->debug.owners[n] == stack) {
|
||||
memmove(rpm->debug.owners + n,
|
||||
rpm->debug.owners + n + 1,
|
||||
(--rpm->debug.count - n) * sizeof(stack));
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&rpm->debug.lock, flags);
|
||||
|
||||
if (WARN(!found,
|
||||
"Unmatched wakeref (tracking %lu), count %u\n",
|
||||
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
|
||||
char *buf;
|
||||
|
||||
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
__print_depot_stack(stack, buf, PAGE_SIZE, 2);
|
||||
DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
|
||||
|
||||
stack = READ_ONCE(rpm->debug.last_release);
|
||||
if (stack) {
|
||||
__print_depot_stack(stack, buf, PAGE_SIZE, 2);
|
||||
DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static int cmphandle(const void *_a, const void *_b)
|
||||
|
@ -249,10 +297,12 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
|||
{
|
||||
}
|
||||
|
||||
static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||
static depot_stack_handle_t
|
||||
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||
{
|
||||
atomic_inc(&i915->runtime_pm.wakeref_count);
|
||||
assert_rpm_wakelock_held(i915);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||
|
@ -1852,7 +1902,7 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
|
|||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
if (!is_enabled)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return is_enabled;
|
||||
}
|
||||
|
@ -1886,7 +1936,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
}
|
||||
|
||||
#define I830_PIPES_POWER_DOMAINS ( \
|
||||
|
@ -3994,7 +4044,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
|||
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Keep the power well enabled, but cancel its rpm wakeref. */
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
/* Remove the refcount we took to keep power well support disabled. */
|
||||
if (!i915_modparams.disable_power_well)
|
||||
|
@ -4207,8 +4257,10 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
|
|||
*
|
||||
* Any runtime pm reference obtained by this function must have a symmetric
|
||||
* call to intel_runtime_pm_put() to release the reference again.
|
||||
*
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
|
||||
*/
|
||||
void intel_runtime_pm_get(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct device *kdev = &pdev->dev;
|
||||
|
@ -4217,7 +4269,7 @@ void intel_runtime_pm_get(struct drm_i915_private *i915)
|
|||
ret = pm_runtime_get_sync(kdev);
|
||||
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
|
||||
|
||||
track_intel_runtime_pm_wakeref(i915);
|
||||
return track_intel_runtime_pm_wakeref(i915);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4231,9 +4283,10 @@ void intel_runtime_pm_get(struct drm_i915_private *i915)
|
|||
* Any runtime pm reference obtained by this function must have a symmetric
|
||||
* call to intel_runtime_pm_put() to release the reference again.
|
||||
*
|
||||
* Returns: True if the wakeref was acquired, or False otherwise.
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
|
||||
* as True if the wakeref was acquired, or False otherwise.
|
||||
*/
|
||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
|
@ -4246,12 +4299,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
|||
* atm to the late/early system suspend/resume handlers.
|
||||
*/
|
||||
if (pm_runtime_get_if_in_use(kdev) <= 0)
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
track_intel_runtime_pm_wakeref(i915);
|
||||
|
||||
return true;
|
||||
return track_intel_runtime_pm_wakeref(i915);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4270,8 +4321,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
|||
*
|
||||
* Any runtime pm reference obtained by this function must have a symmetric
|
||||
* call to intel_runtime_pm_put() to release the reference again.
|
||||
*
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
|
||||
*/
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct device *kdev = &pdev->dev;
|
||||
|
@ -4279,7 +4332,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
|||
assert_rpm_wakelock_held(i915);
|
||||
pm_runtime_get_noresume(kdev);
|
||||
|
||||
track_intel_runtime_pm_wakeref(i915);
|
||||
return track_intel_runtime_pm_wakeref(i915);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4290,7 +4343,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
|||
* intel_runtime_pm_get() and might power down the corresponding
|
||||
* hardware block right away if this is the last reference.
|
||||
*/
|
||||
void intel_runtime_pm_put(struct drm_i915_private *i915)
|
||||
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct device *kdev = &pdev->dev;
|
||||
|
@ -4301,6 +4354,14 @@ void intel_runtime_pm_put(struct drm_i915_private *i915)
|
|||
pm_runtime_put_autosuspend(kdev);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
|
||||
{
|
||||
cancel_intel_runtime_pm_wakeref(i915, wref);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_enable - enable runtime pm
|
||||
* @i915: i915 device instance
|
||||
|
|
|
@ -1709,7 +1709,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
|
|||
reg->val = I915_READ8(entry->offset_ldw);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1785,7 +1785,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
|
|||
err = i915_subtests(tests, ctx);
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
mock_file_free(dev_priv, file);
|
||||
|
|
|
@ -32,7 +32,7 @@ static int switch_to_context(struct drm_i915_private *i915,
|
|||
i915_request_add(rq);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
|
|||
*/
|
||||
trash_stolen(i915);
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
static int pm_prepare(struct drm_i915_private *i915)
|
||||
|
@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915)
|
|||
i915_gem_suspend_gtt_mappings(i915);
|
||||
i915_gem_suspend_late(i915);
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
static void pm_hibernate(struct drm_i915_private *i915)
|
||||
|
@ -110,7 +110,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
|
|||
i915_gem_freeze(i915);
|
||||
i915_gem_freeze_late(i915);
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
static void pm_resume(struct drm_i915_private *i915)
|
||||
|
@ -125,7 +125,7 @@ static void pm_resume(struct drm_i915_private *i915)
|
|||
i915_gem_sanitize(i915);
|
||||
i915_gem_resume(i915);
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
|
||||
static int igt_gem_suspend(void *arg)
|
||||
|
|
|
@ -376,7 +376,7 @@ static int igt_gem_coherency(void *arg)
|
|||
}
|
||||
}
|
||||
unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
kfree(offsets);
|
||||
return err;
|
||||
|
|
|
@ -243,7 +243,7 @@ static int live_nop_switch(void *arg)
|
|||
}
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
mock_file_free(i915, file);
|
||||
return err;
|
||||
|
@ -609,7 +609,7 @@ static int igt_ctx_exec(void *arg)
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
err = gpu_fill(obj, ctx, engine, dw);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
if (err) {
|
||||
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
|
||||
ndwords, dw, max_dwords(obj),
|
||||
|
@ -715,7 +715,7 @@ static int igt_ctx_readonly(void *arg)
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
err = gpu_fill(obj, ctx, engine, dw);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
if (err) {
|
||||
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
|
||||
ndwords, dw, max_dwords(obj),
|
||||
|
@ -1067,7 +1067,7 @@ static int igt_vm_isolation(void *arg)
|
|||
count, RUNTIME_INFO(i915)->num_rings);
|
||||
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
out_unlock:
|
||||
if (end_live_test(&t))
|
||||
err = -EIO;
|
||||
|
@ -1200,7 +1200,7 @@ out_unlock:
|
|||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
kernel_context_close(ctx);
|
||||
|
|
|
@ -464,7 +464,7 @@ out_locked:
|
|||
}
|
||||
if (drm_mm_node_allocated(&hole))
|
||||
drm_mm_remove_node(&hole);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -295,7 +295,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
count = n;
|
||||
|
||||
|
@ -1216,7 +1216,7 @@ static int igt_ggtt_page(void *arg)
|
|||
kfree(order);
|
||||
out_remove:
|
||||
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
drm_mm_remove_node(&tmp);
|
||||
out_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
|
|
@ -444,7 +444,7 @@ next_tiling: ;
|
|||
}
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
out:
|
||||
|
@ -508,7 +508,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
|
|||
if (!i915->gt.active_requests++) {
|
||||
intel_runtime_pm_get(i915);
|
||||
i915_gem_unpark(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
}
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
cancel_delayed_work_sync(&i915->gt.retire_work);
|
||||
|
@ -590,7 +590,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_get(i915);
|
||||
err = make_obj_busy(obj);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err) {
|
||||
pr_err("[loop %d] Failed to busy the object\n", loop);
|
||||
|
|
|
@ -403,7 +403,7 @@ static int live_nop_request(void *arg)
|
|||
}
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -553,7 +553,7 @@ out_batch:
|
|||
i915_vma_unpin(batch);
|
||||
i915_vma_put(batch);
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -731,7 +731,7 @@ out_request:
|
|||
i915_vma_unpin(batch);
|
||||
i915_vma_put(batch);
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -860,7 +860,7 @@ out_request:
|
|||
i915_request_put(request[id]);
|
||||
}
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ out:
|
|||
guc_clients_create(guc);
|
||||
guc_clients_enable(guc);
|
||||
unlock:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ out:
|
|||
guc_client_free(clients[i]);
|
||||
}
|
||||
unlock:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ static int igt_wedged_reset(void *arg)
|
|||
i915_reset(i915, ALL_ENGINES, NULL);
|
||||
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
|
||||
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
igt_global_reset_unlock(i915);
|
||||
|
||||
|
@ -1636,7 +1636,7 @@ out:
|
|||
force_reset(i915);
|
||||
|
||||
unlock:
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
igt_global_reset_unlock(i915);
|
||||
|
||||
|
@ -1679,7 +1679,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
|||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
i915_modparams.enable_hangcheck = saved_hangcheck;
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ err_spin:
|
|||
igt_spinner_fini(&spin);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ err_spin_hi:
|
|||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ err_spin_hi:
|
|||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
|
||||
|
@ -374,7 +374,7 @@ err_spin_hi:
|
|||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -627,7 +627,7 @@ err_ctx:
|
|||
err_batch:
|
||||
i915_gem_object_put(smoke.batch);
|
||||
err_unlock:
|
||||
intel_runtime_pm_put(smoke.i915);
|
||||
intel_runtime_pm_put_unchecked(smoke.i915);
|
||||
mutex_unlock(&smoke.i915->drm.struct_mutex);
|
||||
kfree(smoke.contexts);
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
|
|||
|
||||
intel_runtime_pm_get(engine->i915);
|
||||
rq = i915_request_alloc(engine, ctx);
|
||||
intel_runtime_pm_put(engine->i915);
|
||||
intel_runtime_pm_put_unchecked(engine->i915);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_pin;
|
||||
|
@ -241,7 +241,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
|
|||
else
|
||||
rq = i915_request_alloc(engine, ctx);
|
||||
|
||||
intel_runtime_pm_put(engine->i915);
|
||||
intel_runtime_pm_put_unchecked(engine->i915);
|
||||
|
||||
kernel_context_close(ctx);
|
||||
|
||||
|
@ -300,7 +300,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
|
|||
|
||||
intel_runtime_pm_get(i915);
|
||||
err = reset(engine);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
|
||||
if (want_spin) {
|
||||
igt_spinner_end(&spin);
|
||||
|
@ -414,7 +414,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
|
|||
|
||||
out:
|
||||
reference_lists_fini(i915, &lists);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
igt_global_reset_unlock(i915);
|
||||
|
||||
return ok ? 0 : -ESRCH;
|
||||
|
@ -496,7 +496,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
|
|||
|
||||
err:
|
||||
reference_lists_fini(i915, &lists);
|
||||
intel_runtime_pm_put(i915);
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
igt_global_reset_unlock(i915);
|
||||
kernel_context_close(ctx);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче