Merge tag 'drm-intel-fixes-2013-12-18' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes
Besides the 2 fixes for tricky corner cases in gem from Chris I've promised already two patche from Paulo to fix pc8 warnings (both ported from -next, bug report from Dave Jones) and one patch from to fix vga enable/disable on snb+. That one is a really old bug, but apparently it can cause machine hangs if you try hard enough with vgacon/efifb handover. * tag 'drm-intel-fixes-2013-12-18' of git://people.freedesktop.org/~danvet/drm-intel: drm/i915: Use the correct GMCH_CTRL register for Sandybridge+ drm/i915: get a PC8 reference when enabling the power well drm/i915: change CRTC assertion on LCPLL disable drm/i915: Fix erroneous dereference of batch_obj inside reset_status drm/i915: Prevent double unref following alloc failure during execbuffer
This commit is contained in:
Коммит
418cb50bd6
|
@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
|
|||
kfree(request);
|
||||
}
|
||||
|
||||
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 completed_seqno;
|
||||
u32 acthd;
|
||||
u32 completed_seqno = ring->get_seqno(ring, false);
|
||||
u32 acthd = intel_ring_get_active_head(ring);
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
acthd = intel_ring_get_active_head(ring);
|
||||
completed_seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
if (i915_seqno_passed(completed_seqno, request->seqno))
|
||||
continue;
|
||||
|
||||
i915_set_reset_status(ring, request, acthd);
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
|
@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_gem_request,
|
||||
list);
|
||||
|
||||
if (request->seqno > completed_seqno)
|
||||
i915_set_reset_status(ring, request, acthd);
|
||||
|
||||
i915_gem_free_request(request);
|
||||
}
|
||||
|
||||
|
@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
|
|||
struct intel_ring_buffer *ring;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Before we free the objects from the requests, we need to inspect
|
||||
* them for finding the guilty party. As the requests only borrow
|
||||
* their reference to the objects, the inspection must be done first.
|
||||
*/
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_lists(dev_priv, ring);
|
||||
i915_gem_reset_ring_status(dev_priv, ring);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_cleanup(dev_priv, ring);
|
||||
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head objects;
|
||||
int i, ret = 0;
|
||||
int i, ret;
|
||||
|
||||
INIT_LIST_HEAD(&objects);
|
||||
spin_lock(&file->table_lock);
|
||||
|
@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
DRM_DEBUG("Invalid object handle %d at index %d\n",
|
||||
exec[i].handle, i);
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->obj_exec_link)) {
|
||||
|
@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
|
||||
obj, exec[i].handle, i);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
spin_unlock(&file->table_lock);
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(obj, &objects, obj_exec_link) {
|
||||
while (!list_empty(&objects)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
|
||||
/*
|
||||
* NOTE: We can leak any vmas created here when something fails
|
||||
* later on. But that's no issue since vma_unbind can deal with
|
||||
|
@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
if (IS_ERR(vma)) {
|
||||
DRM_DEBUG("Failed to lookup VMA\n");
|
||||
ret = PTR_ERR(vma);
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Transfer ownership from the objects list to the vmas list. */
|
||||
list_add_tail(&vma->exec_list, &eb->vmas);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
|
||||
vma->exec_entry = &exec[i];
|
||||
if (eb->and < 0) {
|
||||
|
@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
++i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
||||
err:
|
||||
while (!list_empty(&objects)) {
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
if (ret)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
/*
|
||||
* Objects already transfered to the vmas list will be unreferenced by
|
||||
* eb_destroy.
|
||||
*/
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|||
uint32_t val;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
|
||||
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
|
||||
WARN(crtc->active, "CRTC for pipe %c enabled\n",
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
|
||||
|
@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
|
|||
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
|
||||
u16 gmch_ctrl;
|
||||
|
||||
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
|
||||
pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
|
||||
if (state)
|
||||
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
|
||||
else
|
||||
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
|
||||
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
|
||||
pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
|||
unsigned long irqflags;
|
||||
uint32_t tmp;
|
||||
|
||||
WARN_ON(dev_priv->pc8.enabled);
|
||||
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
|
||||
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
|
||||
|
@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
|||
static void __intel_power_well_get(struct drm_device *dev,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (!power_well->count++)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!power_well->count++) {
|
||||
hsw_disable_package_c8(dev_priv);
|
||||
__intel_set_power_well(dev, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void __intel_power_well_put(struct drm_device *dev,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!power_well->count);
|
||||
if (!--power_well->count && i915_disable_power_well)
|
||||
if (!--power_well->count && i915_disable_power_well) {
|
||||
__intel_set_power_well(dev, false);
|
||||
hsw_enable_package_c8(dev_priv);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_display_power_get(struct drm_device *dev,
|
||||
|
|
Загрузка…
Ссылка в новой задаче