drm/i915: rework legacy GFX HWS handling
To get the fun stuff out of the way, the legacy hws is allocated by userspace when the gpu needs a gfx hws. And there's no reference-counting going on, so userspace can simply screw everyone over. At least it's not as horrible as i810, where the ringbuffer is allocated by userspace ... We can't fix this disaster, but we can at least tidy up the code a bit to make things clearer: - Drop the drm ioremap indirection. - Add a new new read_legacy_status_page to paper over the differences between the legacy gfx hws and the physical hws shared with the new ringbuffer code. - Add a pointer in dev_priv->dri1 for the cpu addresses - that one is an iomem remapping as opposed to all other hw status pages. This is just prep work to make sparse happy. Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Родитель
627965ad3e
Коммит
316d388450
|
@ -67,7 +67,16 @@
|
|||
LOCK_TEST_WITH_RETURN(dev, file); \
|
||||
} while (0)
|
||||
|
||||
#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
|
||||
static inline u32
|
||||
intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
|
||||
{
|
||||
if (I915_NEED_GFX_HWS(dev_priv->dev))
|
||||
return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
|
||||
else
|
||||
return intel_read_status_page(LP_RING(dev_priv), reg);
|
||||
}
|
||||
|
||||
#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
|
||||
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
|
||||
#define I915_BREADCRUMB_INDEX 0x21
|
||||
|
||||
|
@ -137,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
|
|||
|
||||
if (ring->status_page.gfx_addr) {
|
||||
ring->status_page.gfx_addr = 0;
|
||||
drm_core_ioremapfree(&dev_priv->hws_map, dev);
|
||||
iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
|
||||
}
|
||||
|
||||
/* Need to rewrite hardware status page */
|
||||
|
@ -1073,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
|
||||
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
|
||||
|
||||
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
|
||||
dev_priv->hws_map.size = 4*1024;
|
||||
dev_priv->hws_map.type = 0;
|
||||
dev_priv->hws_map.flags = 0;
|
||||
dev_priv->hws_map.mtrr = 0;
|
||||
|
||||
drm_core_ioremap_wc(&dev_priv->hws_map, dev);
|
||||
if (dev_priv->hws_map.handle == NULL) {
|
||||
dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
|
||||
4096);
|
||||
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
ring->status_page.gfx_addr = 0;
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" G33 hw status page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
ring->status_page.page_addr =
|
||||
(void __force __iomem *)dev_priv->hws_map.handle;
|
||||
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
|
||||
|
||||
memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
|
||||
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
|
||||
|
||||
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
|
||||
|
|
|
@ -369,7 +369,6 @@ typedef struct drm_i915_private {
|
|||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
uint32_t counter;
|
||||
drm_local_map_t hws_map;
|
||||
struct drm_i915_gem_object *pwrctx;
|
||||
struct drm_i915_gem_object *renderctx;
|
||||
|
||||
|
@ -743,6 +742,7 @@ typedef struct drm_i915_private {
|
|||
* here! */
|
||||
struct {
|
||||
unsigned allow_batchbuffer : 1;
|
||||
u32 __iomem *gfx_hws_cpu_addr;
|
||||
} dri1;
|
||||
|
||||
/* Kernel Modesetting */
|
||||
|
|
Загрузка…
Ссылка в новой задаче