Merge tag 'drm-intel-fixes-2015-07-15' of git://anongit.freedesktop.org/drm-intel into drm-fixes
Next batch of i915 fixes. Note that the compat32 patch here needs the drm core one to be actually useful, I'll send you that one with a separate drm-fixes pull request. One revert because a fix in -rc2 did break existing userspace. * tag 'drm-intel-fixes-2015-07-15' of git://anongit.freedesktop.org/drm-intel: drm/i915: Do not call intel_crtc_disable if the crtc is already disabled. Revert "drm/i915: Declare the swizzling unknown for L-shaped configurations" drm/i915: Forward all core DRM ioctls to core compat handling drm/i915: fix oops in primary_check_plane drm/i915: remove unused has_dma_mapping flag drm/i915: Fix missing return warning for !CONFIG_DEBUGFS drm/i915: avoid leaking DMA mappings drm/i915: Snapshot seqno of most recently submitted request. drm/i915: Store device pointer in contexts for late tracepoint usafe
This commit is contained in:
Коммит
61f2669fce
|
@ -826,6 +826,7 @@ struct intel_context {
|
|||
struct kref ref;
|
||||
int user_handle;
|
||||
uint8_t remap_slice;
|
||||
struct drm_i915_private *i915;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
|
|||
unsigned int cache_level:3;
|
||||
unsigned int cache_dirty:1;
|
||||
|
||||
unsigned int has_dma_mapping:1;
|
||||
|
||||
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
|
||||
|
||||
unsigned int pin_display;
|
||||
|
@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
|
|||
int i915_debugfs_connector_add(struct drm_connector *connector);
|
||||
void intel_display_crc_init(struct drm_device *dev);
|
||||
#else
|
||||
static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
|
||||
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
|
||||
{ return 0; }
|
||||
static inline void intel_display_crc_init(struct drm_device *dev) {}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
|||
sg_dma_len(sg) = obj->base.size;
|
||||
|
||||
obj->pages = st;
|
||||
obj->has_dma_mapping = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
|
|||
|
||||
sg_free_table(obj->pages);
|
||||
kfree(obj->pages);
|
||||
|
||||
obj->has_dma_mapping = false;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_save_bit_17_swizzle(obj);
|
||||
|
||||
|
@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
struct sg_page_iter sg_iter;
|
||||
struct page *page;
|
||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
||||
int ret;
|
||||
gfp_t gfp;
|
||||
|
||||
/* Assert that the object is not currently in any GPU domain. As it
|
||||
|
@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
*/
|
||||
i915_gem_shrink_all(dev_priv);
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err_pages;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
|
@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
sg_mark_end(sg);
|
||||
obj->pages = st;
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret)
|
||||
goto err_pages;
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_do_bit_17_swizzle(obj);
|
||||
|
||||
|
@ -2300,10 +2306,10 @@ err_pages:
|
|||
* space and so want to translate the error from shmemfs back to our
|
||||
* usual understanding of ENOMEM.
|
||||
*/
|
||||
if (PTR_ERR(page) == -ENOSPC)
|
||||
return -ENOMEM;
|
||||
else
|
||||
return PTR_ERR(page);
|
||||
if (ret == -ENOSPC)
|
||||
ret = -ENOMEM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Ensure that the associated pages are gathered from the backing storage
|
||||
|
@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
ring->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->file_priv = NULL;
|
||||
|
||||
|
@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist. */
|
||||
if (list_empty(&obj->vma_list)) {
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
if (list_empty(&obj->vma_list))
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
|
||||
}
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
|
@ -3768,22 +3773,16 @@ search_free:
|
|||
goto err_remove_node;
|
||||
}
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret)
|
||||
goto err_remove_node;
|
||||
|
||||
trace_i915_vma_bind(vma, flags);
|
||||
ret = i915_vma_bind(vma, obj->cache_level, flags);
|
||||
if (ret)
|
||||
goto err_finish_gtt;
|
||||
goto err_remove_node;
|
||||
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
list_add_tail(&vma->mm_list, &vm->inactive_list);
|
||||
|
||||
return vma;
|
||||
|
||||
err_finish_gtt:
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
err_remove_node:
|
||||
drm_mm_remove_node(&vma->node);
|
||||
err_free_vma:
|
||||
|
|
|
@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
|
|||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct intel_context *ctx = container_of(ctx_ref,
|
||||
typeof(*ctx), ref);
|
||||
struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
|
||||
trace_i915_context_free(ctx);
|
||||
|
||||
|
@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
|
|||
|
||||
kref_init(&ctx->ref);
|
||||
list_add_tail(&ctx->link, &dev_priv->context_list);
|
||||
ctx->i915 = dev_priv;
|
||||
|
||||
if (dev_priv->hw_context_size) {
|
||||
struct drm_i915_gem_object *obj =
|
||||
|
|
|
@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
|||
return PTR_ERR(sg);
|
||||
|
||||
obj->pages = sg;
|
||||
obj->has_dma_mapping = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
dma_buf_unmap_attachment(obj->base.import_attach,
|
||||
obj->pages, DMA_BIDIRECTIONAL);
|
||||
obj->has_dma_mapping = false;
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
|
||||
|
|
|
@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
|||
|
||||
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->has_dma_mapping)
|
||||
return 0;
|
||||
|
||||
if (!dma_map_sg(&obj->base.dev->pdev->dev,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL))
|
||||
|
@ -1972,10 +1969,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
|||
|
||||
interruptible = do_idling(dev_priv);
|
||||
|
||||
if (!obj->has_dma_mapping)
|
||||
dma_unmap_sg(&dev->pdev->dev,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
undo_idling(dev_priv, interruptible);
|
||||
}
|
||||
|
|
|
@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
|
|||
if (obj->pages == NULL)
|
||||
goto cleanup;
|
||||
|
||||
obj->has_dma_mapping = true;
|
||||
i915_gem_object_pin_pages(obj);
|
||||
obj->stolen = stolen;
|
||||
|
||||
|
|
|
@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
if (IS_GEN4(dev)) {
|
||||
uint32_t ddc2 = I915_READ(DCC2);
|
||||
|
||||
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
|
||||
/* Since the swizzling may vary within an
|
||||
* object, we have no idea what the swizzling
|
||||
* is for any page in particular. Thus we
|
||||
* cannot migrate tiled pages using the GPU,
|
||||
* nor can we tell userspace what the exact
|
||||
* swizzling is for any object.
|
||||
*/
|
||||
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
|
||||
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
if (dcc == 0xffffffff) {
|
||||
|
|
|
@ -545,6 +545,26 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
|
||||
struct page **pvec, int num_pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret) {
|
||||
sg_free_table(obj->pages);
|
||||
kfree(obj->pages);
|
||||
obj->pages = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
{
|
||||
|
@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
if (obj->userptr.work != &work->work) {
|
||||
ret = 0;
|
||||
} else if (pinned == num_pages) {
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
|
||||
obj->get_page.sg = obj->pages->sgl;
|
||||
obj->get_page.last = 0;
|
||||
|
||||
pinned = 0;
|
||||
}
|
||||
}
|
||||
|
@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
obj->userptr.work = NULL;
|
||||
pinned = 0;
|
||||
|
@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
|
|||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
obj->dirty = 0;
|
||||
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
struct page *page = sg_page_iter_page(&sg_iter);
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
drm_ioctl_compat_t *fn = NULL;
|
||||
int ret;
|
||||
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
|
||||
|
|
|
@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
|
|||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_request *
|
||||
ring_last_request(struct intel_engine_cs *ring)
|
||||
{
|
||||
return list_entry(ring->request_list.prev,
|
||||
struct drm_i915_gem_request, list);
|
||||
}
|
||||
|
||||
static bool
|
||||
ring_idle(struct intel_engine_cs *ring)
|
||||
ring_idle(struct intel_engine_cs *ring, u32 seqno)
|
||||
{
|
||||
return (list_empty(&ring->request_list) ||
|
||||
i915_gem_request_completed(ring_last_request(ring), false));
|
||||
i915_seqno_passed(seqno, ring->last_submitted_seqno));
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
acthd = intel_ring_get_active_head(ring);
|
||||
|
||||
if (ring->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(ring)) {
|
||||
if (ring_idle(ring, seqno)) {
|
||||
ring->hangcheck.action = HANGCHECK_IDLE;
|
||||
|
||||
if (waitqueue_active(&ring->irq_queue)) {
|
||||
|
|
|
@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
|
|||
TP_fast_assign(
|
||||
__entry->ctx = ctx;
|
||||
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
|
||||
__entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
|
||||
__entry->dev = ctx->i915->dev->primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
|
||||
|
|
|
@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
|
|||
struct drm_connector *connector;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* crtc should still be enabled when we disable it. */
|
||||
WARN_ON(!crtc->state->enable);
|
||||
|
||||
intel_crtc_disable_planes(crtc);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
dev_priv->display.off(crtc);
|
||||
|
@ -12591,7 +12588,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
|
|||
continue;
|
||||
|
||||
if (!crtc_state->enable) {
|
||||
intel_crtc_disable(crtc);
|
||||
if (crtc->state->enable)
|
||||
intel_crtc_disable(crtc);
|
||||
} else if (crtc->state->enable) {
|
||||
intel_crtc_disable_planes(crtc);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
|
@ -13276,7 +13274,7 @@ intel_check_primary_plane(struct drm_plane *plane,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
|
||||
struct intel_plane_state *old_state =
|
||||
to_intel_plane_state(plane->state);
|
||||
|
||||
|
|
|
@ -275,6 +275,13 @@ struct intel_engine_cs {
|
|||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
struct drm_i915_gem_request *outstanding_lazy_request;
|
||||
/**
|
||||
* Seqno of request most recently submitted to request_list.
|
||||
* Used exclusively by hang checker to avoid grabbing lock while
|
||||
* inspecting request list.
|
||||
*/
|
||||
u32 last_submitted_seqno;
|
||||
|
||||
bool gpu_caches_dirty;
|
||||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
|
Загрузка…
Ссылка в новой задаче