Merge tag 'drm-intel-fixes-2017-07-27' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
i915 fixes for -rc3 Bit more than usual since we missed -rc2. 4x cc: stable, 2 gvt patches, but all fairly minor stuff. Last minute rebase was to add a few missing cc: stable, I did prep the pull this morning already and made sure CI approves. * tag 'drm-intel-fixes-2017-07-27' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Fix bad comparison in skl_compute_plane_wm. drm/i915: Force CPU synchronisation even if userspace requests ASYNC drm/i915: Only skip updating execobject.offset after error drm/i915: Only mark the execobject as pinned on success drm/i915: Remove assertion from raw __i915_vma_unpin() drm/i915/cnl: Fix loadgen select programming on ddi vswing sequence drm/i915: Fix scaler init during CRTC HW state readout drm/i915/selftests: Fix an error handling path in 'mock_gem_device()' drm/i915: Unbreak gpu reset vs. modeset locking drm/i915: Fix cursor updates on some platforms drm/i915: Fix user ptr check size in eb_relocate_vma() drm/i915/gvt: Extend KBL platform support in GVT-g drm/i915/gvt: Fix the vblank timer close issue after shutdown VMs in reverse
This commit is contained in:
Коммит
d5bcd1113b
|
@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
struct intel_gvt_irq *irq = &gvt->irq;
|
struct intel_gvt_irq *irq = &gvt->irq;
|
||||||
struct intel_vgpu *vgpu;
|
struct intel_vgpu *vgpu;
|
||||||
bool have_enabled_pipe = false;
|
|
||||||
int pipe, id;
|
int pipe, id;
|
||||||
|
|
||||||
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
|
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
|
||||||
|
|
||||||
for_each_active_vgpu(gvt, vgpu, id) {
|
for_each_active_vgpu(gvt, vgpu, id) {
|
||||||
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
|
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
|
||||||
have_enabled_pipe =
|
if (pipe_is_enabled(vgpu, pipe))
|
||||||
pipe_is_enabled(vgpu, pipe);
|
goto out;
|
||||||
if (have_enabled_pipe)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (have_enabled_pipe)
|
/* all the pipes are disabled */
|
||||||
hrtimer_start(&irq->vblank_timer.timer,
|
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||||
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
|
return;
|
||||||
HRTIMER_MODE_ABS);
|
|
||||||
|
out:
|
||||||
|
hrtimer_start(&irq->vblank_timer.timer,
|
||||||
|
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
|
||||||
|
HRTIMER_MODE_ABS);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
||||||
|
|
|
@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
struct clflush *clflush;
|
struct clflush *clflush;
|
||||||
|
@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||||
*/
|
*/
|
||||||
if (!i915_gem_object_has_struct_page(obj)) {
|
if (!i915_gem_object_has_struct_page(obj)) {
|
||||||
obj->cache_dirty = false;
|
obj->cache_dirty = false;
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the GPU is snooping the contents of the CPU cache,
|
/* If the GPU is snooping the contents of the CPU cache,
|
||||||
|
@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||||
* tracking.
|
* tracking.
|
||||||
*/
|
*/
|
||||||
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
|
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
trace_i915_gem_object_clflush(obj);
|
trace_i915_gem_object_clflush(obj);
|
||||||
|
|
||||||
|
@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||||
}
|
}
|
||||||
|
|
||||||
obj->cache_dirty = false;
|
obj->cache_dirty = false;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
struct drm_i915_private;
|
struct drm_i915_private;
|
||||||
struct drm_i915_gem_object;
|
struct drm_i915_gem_object;
|
||||||
|
|
||||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
#define I915_CLFLUSH_FORCE BIT(0)
|
#define I915_CLFLUSH_FORCE BIT(0)
|
||||||
#define I915_CLFLUSH_SYNC BIT(1)
|
#define I915_CLFLUSH_SYNC BIT(1)
|
||||||
|
|
|
@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
|
||||||
eb->args->flags |= __EXEC_HAS_RELOC;
|
eb->args->flags |= __EXEC_HAS_RELOC;
|
||||||
}
|
}
|
||||||
|
|
||||||
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
|
||||||
GEM_BUG_ON(eb_vma_misplaced(entry, vma));
|
|
||||||
|
|
||||||
if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
||||||
err = i915_vma_get_fence(vma);
|
err = i915_vma_get_fence(vma);
|
||||||
if (unlikely(err)) {
|
if (unlikely(err)) {
|
||||||
|
@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
|
||||||
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
||||||
|
GEM_BUG_ON(eb_vma_misplaced(entry, vma));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
|
||||||
* to read. However, if the array is not writable the user loses
|
* to read. However, if the array is not writable the user loses
|
||||||
* the updated relocation values.
|
* the updated relocation values.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
|
if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -1775,7 +1775,7 @@ out:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err ?: have_copy;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int eb_relocate(struct i915_execbuffer *eb)
|
static int eb_relocate(struct i915_execbuffer *eb)
|
||||||
|
@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
|
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
|
||||||
struct i915_vma *vma = exec_to_vma(entry);
|
struct i915_vma *vma = exec_to_vma(entry);
|
||||||
struct drm_i915_gem_object *obj = vma->obj;
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
|
|
||||||
|
@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
|
||||||
eb->request->capture_list = capture;
|
eb->request->capture_list = capture;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
|
||||||
|
if (i915_gem_clflush_object(obj, 0))
|
||||||
|
entry->flags &= ~EXEC_OBJECT_ASYNC;
|
||||||
|
}
|
||||||
|
|
||||||
if (entry->flags & EXEC_OBJECT_ASYNC)
|
if (entry->flags & EXEC_OBJECT_ASYNC)
|
||||||
goto skip_flushes;
|
goto skip_flushes;
|
||||||
|
|
||||||
if (unlikely(obj->cache_dirty && !obj->cache_coherent))
|
|
||||||
i915_gem_clflush_object(obj, 0);
|
|
||||||
|
|
||||||
err = i915_gem_request_await_object
|
err = i915_gem_request_await_object
|
||||||
(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
|
(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
|
||||||
err = eb_relocate(&eb);
|
err = eb_relocate(&eb);
|
||||||
if (err)
|
if (err) {
|
||||||
/*
|
/*
|
||||||
* If the user expects the execobject.offset and
|
* If the user expects the execobject.offset and
|
||||||
* reloc.presumed_offset to be an exact match,
|
* reloc.presumed_offset to be an exact match,
|
||||||
|
@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||||
* relocation.
|
* relocation.
|
||||||
*/
|
*/
|
||||||
args->flags &= ~__EXEC_HAS_RELOC;
|
args->flags &= ~__EXEC_HAS_RELOC;
|
||||||
if (err < 0)
|
|
||||||
goto err_vma;
|
goto err_vma;
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
|
if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
|
||||||
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
|
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
|
||||||
|
|
|
@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
|
||||||
|
|
||||||
static inline void __i915_vma_unpin(struct i915_vma *vma)
|
static inline void __i915_vma_unpin(struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
GEM_BUG_ON(!i915_vma_is_pinned(vma));
|
|
||||||
vma->flags--;
|
vma->flags--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void i915_vma_unpin(struct i915_vma *vma)
|
static inline void i915_vma_unpin(struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
|
GEM_BUG_ON(!i915_vma_is_pinned(vma));
|
||||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||||
__i915_vma_unpin(vma);
|
__i915_vma_unpin(vma);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
|
||||||
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
|
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
|
||||||
val &= ~LOADGEN_SELECT;
|
val &= ~LOADGEN_SELECT;
|
||||||
|
|
||||||
if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
|
if ((rate <= 600000 && width == 4 && ln >= 1) ||
|
||||||
((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
|
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
|
||||||
val |= LOADGEN_SELECT;
|
val |= LOADGEN_SELECT;
|
||||||
}
|
}
|
||||||
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
|
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
|
||||||
|
|
|
@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
|
||||||
intel_finish_page_flip_cs(dev_priv, crtc->pipe);
|
intel_finish_page_flip_cs(dev_priv, crtc->pipe);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_update_primary_planes(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct drm_crtc *crtc;
|
|
||||||
|
|
||||||
for_each_crtc(dev, crtc) {
|
|
||||||
struct intel_plane *plane = to_intel_plane(crtc->primary);
|
|
||||||
struct intel_plane_state *plane_state =
|
|
||||||
to_intel_plane_state(plane->base.state);
|
|
||||||
|
|
||||||
if (plane_state->base.visible) {
|
|
||||||
trace_intel_update_plane(&plane->base,
|
|
||||||
to_intel_crtc(crtc));
|
|
||||||
|
|
||||||
plane->update_plane(plane,
|
|
||||||
to_intel_crtc_state(crtc->state),
|
|
||||||
plane_state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
__intel_display_resume(struct drm_device *dev,
|
__intel_display_resume(struct drm_device *dev,
|
||||||
struct drm_atomic_state *state,
|
struct drm_atomic_state *state,
|
||||||
|
@ -3499,6 +3479,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
|
||||||
struct drm_atomic_state *state;
|
struct drm_atomic_state *state;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
||||||
|
/* reset doesn't touch the display */
|
||||||
|
if (!i915.force_reset_modeset_test &&
|
||||||
|
!gpu_reset_clobbers_display(dev_priv))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need mode_config.mutex so that we don't
|
* Need mode_config.mutex so that we don't
|
||||||
* trample ongoing ->detect() and whatnot.
|
* trample ongoing ->detect() and whatnot.
|
||||||
|
@ -3512,12 +3498,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
drm_modeset_backoff(ctx);
|
drm_modeset_backoff(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reset doesn't touch the display, but flips might get nuked anyway, */
|
|
||||||
if (!i915.force_reset_modeset_test &&
|
|
||||||
!gpu_reset_clobbers_display(dev_priv))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disabling the crtcs gracefully seems nicer. Also the
|
* Disabling the crtcs gracefully seems nicer. Also the
|
||||||
* g33 docs say we should at least disable all the planes.
|
* g33 docs say we should at least disable all the planes.
|
||||||
|
@ -3547,6 +3527,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||||
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
|
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* reset doesn't touch the display */
|
||||||
|
if (!i915.force_reset_modeset_test &&
|
||||||
|
!gpu_reset_clobbers_display(dev_priv))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!state)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flips in the rings will be nuked by the reset,
|
* Flips in the rings will be nuked by the reset,
|
||||||
* so complete all pending flips so that user space
|
* so complete all pending flips so that user space
|
||||||
|
@ -3558,22 +3546,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
/* reset doesn't touch the display */
|
/* reset doesn't touch the display */
|
||||||
if (!gpu_reset_clobbers_display(dev_priv)) {
|
if (!gpu_reset_clobbers_display(dev_priv)) {
|
||||||
if (!state) {
|
/* for testing only restore the display */
|
||||||
/*
|
ret = __intel_display_resume(dev, state, ctx);
|
||||||
* Flips in the rings have been nuked by the reset,
|
|
||||||
* so update the base address of all primary
|
|
||||||
* planes to the the last fb to make sure we're
|
|
||||||
* showing the correct fb after a reset.
|
|
||||||
*
|
|
||||||
* FIXME: Atomic will make this obsolete since we won't schedule
|
|
||||||
* CS-based flips (which might get lost in gpu resets) any more.
|
|
||||||
*/
|
|
||||||
intel_update_primary_planes(dev);
|
|
||||||
} else {
|
|
||||||
ret = __intel_display_resume(dev, state, ctx);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The display has been reset as well,
|
* The display has been reset as well,
|
||||||
|
@ -3597,8 +3573,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||||
intel_hpd_init(dev_priv);
|
intel_hpd_init(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (state)
|
drm_atomic_state_put(state);
|
||||||
drm_atomic_state_put(state);
|
unlock:
|
||||||
drm_modeset_drop_locks(ctx);
|
drm_modeset_drop_locks(ctx);
|
||||||
drm_modeset_acquire_fini(ctx);
|
drm_modeset_acquire_fini(ctx);
|
||||||
mutex_unlock(&dev->mode_config.mutex);
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
|
@ -9117,6 +9093,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||||
u64 power_domain_mask;
|
u64 power_domain_mask;
|
||||||
bool active;
|
bool active;
|
||||||
|
|
||||||
|
if (INTEL_GEN(dev_priv) >= 9) {
|
||||||
|
intel_crtc_init_scalers(crtc, pipe_config);
|
||||||
|
|
||||||
|
pipe_config->scaler_state.scaler_id = -1;
|
||||||
|
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
|
||||||
|
}
|
||||||
|
|
||||||
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
|
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
|
||||||
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
||||||
return false;
|
return false;
|
||||||
|
@ -9145,13 +9128,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||||
pipe_config->gamma_mode =
|
pipe_config->gamma_mode =
|
||||||
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
|
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
|
||||||
|
|
||||||
if (INTEL_GEN(dev_priv) >= 9) {
|
|
||||||
intel_crtc_init_scalers(crtc, pipe_config);
|
|
||||||
|
|
||||||
pipe_config->scaler_state.scaler_id = -1;
|
|
||||||
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
|
|
||||||
}
|
|
||||||
|
|
||||||
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
|
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
|
||||||
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
|
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
|
||||||
power_domain_mask |= BIT_ULL(power_domain);
|
power_domain_mask |= BIT_ULL(power_domain);
|
||||||
|
@ -9540,7 +9516,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
|
||||||
* On some platforms writing CURCNTR first will also
|
* On some platforms writing CURCNTR first will also
|
||||||
* cause CURPOS to be armed by the CURBASE write.
|
* cause CURPOS to be armed by the CURBASE write.
|
||||||
* Without the CURCNTR write the CURPOS write would
|
* Without the CURCNTR write the CURPOS write would
|
||||||
* arm itself.
|
* arm itself. Thus we always start the full update
|
||||||
|
* with a CURCNTR write.
|
||||||
|
*
|
||||||
|
* On other platforms CURPOS always requires the
|
||||||
|
* CURBASE write to arm the update. Additonally
|
||||||
|
* a write to any of the cursor register will cancel
|
||||||
|
* an already armed cursor update. Thus leaving out
|
||||||
|
* the CURBASE write after CURPOS could lead to a
|
||||||
|
* cursor that doesn't appear to move, or even change
|
||||||
|
* shape. Thus we always write CURBASE.
|
||||||
*
|
*
|
||||||
* CURCNTR and CUR_FBC_CTL are always
|
* CURCNTR and CUR_FBC_CTL are always
|
||||||
* armed by the CURBASE write only.
|
* armed by the CURBASE write only.
|
||||||
|
@ -9559,6 +9544,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
|
||||||
plane->cursor.cntl = cntl;
|
plane->cursor.cntl = cntl;
|
||||||
} else {
|
} else {
|
||||||
I915_WRITE_FW(CURPOS(pipe), pos);
|
I915_WRITE_FW(CURPOS(pipe), pos);
|
||||||
|
I915_WRITE_FW(CURBASE(pipe), base);
|
||||||
}
|
}
|
||||||
|
|
||||||
POSTING_READ_FW(CURBASE(pipe));
|
POSTING_READ_FW(CURBASE(pipe));
|
||||||
|
|
|
@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
|
||||||
return true;
|
return true;
|
||||||
if (IS_SKYLAKE(dev_priv))
|
if (IS_SKYLAKE(dev_priv))
|
||||||
return true;
|
return true;
|
||||||
if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
|
if (IS_KABYLAKE(dev_priv))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||||
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
|
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
|
||||||
(plane_bytes_per_line / 512 < 1))
|
(plane_bytes_per_line / 512 < 1))
|
||||||
selected_result = method2;
|
selected_result = method2;
|
||||||
else if ((ddb_allocation && ddb_allocation /
|
else if (ddb_allocation >=
|
||||||
fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
|
fixed_16_16_to_u32_round_up(plane_blocks_per_line))
|
||||||
selected_result = min_fixed_16_16(method1, method2);
|
selected_result = min_fixed_16_16(method1, method2);
|
||||||
else if (latency >= linetime_us)
|
else if (latency >= linetime_us)
|
||||||
selected_result = min_fixed_16_16(method1, method2);
|
selected_result = min_fixed_16_16(method1, method2);
|
||||||
|
|
|
@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void)
|
||||||
mkwrite_device_info(i915)->ring_mask = BIT(0);
|
mkwrite_device_info(i915)->ring_mask = BIT(0);
|
||||||
i915->engine[RCS] = mock_engine(i915, "mock");
|
i915->engine[RCS] = mock_engine(i915, "mock");
|
||||||
if (!i915->engine[RCS])
|
if (!i915->engine[RCS])
|
||||||
goto err_dependencies;
|
goto err_priorities;
|
||||||
|
|
||||||
i915->kernel_context = mock_context(i915, NULL);
|
i915->kernel_context = mock_context(i915, NULL);
|
||||||
if (!i915->kernel_context)
|
if (!i915->kernel_context)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче