drm/nv50: support for compression
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Родитель
26c0c9e33a
Коммит
8f7286f8e4
|
@ -72,6 +72,7 @@ struct nouveau_mem {
|
||||||
struct nouveau_vma tmp_vma;
|
struct nouveau_vma tmp_vma;
|
||||||
u8 page_shift;
|
u8 page_shift;
|
||||||
|
|
||||||
|
struct drm_mm_node *tag;
|
||||||
struct list_head regions;
|
struct list_head regions;
|
||||||
dma_addr_t *pages;
|
dma_addr_t *pages;
|
||||||
u32 memtype;
|
u32 memtype;
|
||||||
|
|
|
@ -740,7 +740,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||||
|
|
||||||
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
|
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
|
||||||
mem->page_alignment << PAGE_SHIFT, size_nc,
|
mem->page_alignment << PAGE_SHIFT, size_nc,
|
||||||
(nvbo->tile_flags >> 8) & 0xff, &node);
|
(nvbo->tile_flags >> 8) & 0x3ff, &node);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||||
u32 max = 1 << (vm->pgt_bits - bits);
|
u32 max = 1 << (vm->pgt_bits - bits);
|
||||||
u32 end, len;
|
u32 end, len;
|
||||||
|
|
||||||
|
delta = 0;
|
||||||
list_for_each_entry(r, &node->regions, rl_entry) {
|
list_for_each_entry(r, &node->regions, rl_entry) {
|
||||||
u64 phys = (u64)r->offset << 12;
|
u64 phys = (u64)r->offset << 12;
|
||||||
u32 num = r->length >> bits;
|
u32 num = r->length >> bits;
|
||||||
|
@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||||
end = max;
|
end = max;
|
||||||
len = end - pte;
|
len = end - pte;
|
||||||
|
|
||||||
vm->map(vma, pgt, node, pte, len, phys);
|
vm->map(vma, pgt, node, pte, len, phys, delta);
|
||||||
|
|
||||||
num -= len;
|
num -= len;
|
||||||
pte += len;
|
pte += len;
|
||||||
|
@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
||||||
pde++;
|
pde++;
|
||||||
pte = 0;
|
pte = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
delta += (u64)len << vma->node->type;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,8 @@ struct nouveau_vm {
|
||||||
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
|
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
|
||||||
struct nouveau_gpuobj *pgt[2]);
|
struct nouveau_gpuobj *pgt[2]);
|
||||||
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||||
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
|
struct nouveau_mem *, u32 pte, u32 cnt,
|
||||||
|
u64 phys, u64 delta);
|
||||||
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||||
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
|
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
|
||||||
|
@ -93,7 +94,7 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
|
||||||
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||||
struct nouveau_gpuobj *pgt[2]);
|
struct nouveau_gpuobj *pgt[2]);
|
||||||
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||||
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
|
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
|
||||||
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||||
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||||
|
@ -104,7 +105,7 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
|
||||||
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||||
struct nouveau_gpuobj *pgt[2]);
|
struct nouveau_gpuobj *pgt[2]);
|
||||||
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||||
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
|
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
|
||||||
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||||
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
|
||||||
|
|
|
@ -8,31 +8,61 @@ struct nv50_fb_priv {
|
||||||
dma_addr_t r100c08;
|
dma_addr_t r100c08;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void
|
||||||
|
nv50_fb_destroy(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||||
|
struct nv50_fb_priv *priv = pfb->priv;
|
||||||
|
|
||||||
|
if (drm_mm_initialized(&pfb->tag_heap))
|
||||||
|
drm_mm_takedown(&pfb->tag_heap);
|
||||||
|
|
||||||
|
if (priv->r100c08_page) {
|
||||||
|
pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
|
||||||
|
PCI_DMA_BIDIRECTIONAL);
|
||||||
|
__free_page(priv->r100c08_page);
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(priv);
|
||||||
|
pfb->priv = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nv50_fb_create(struct drm_device *dev)
|
nv50_fb_create(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||||
struct nv50_fb_priv *priv;
|
struct nv50_fb_priv *priv;
|
||||||
|
u32 tagmem;
|
||||||
|
int ret;
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
pfb->priv = priv;
|
||||||
|
|
||||||
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!priv->r100c08_page) {
|
if (!priv->r100c08_page) {
|
||||||
kfree(priv);
|
nv50_fb_destroy(dev);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
|
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
|
||||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||||
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
|
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
|
||||||
__free_page(priv->r100c08_page);
|
nv50_fb_destroy(dev);
|
||||||
kfree(priv);
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->engine.fb.priv = priv;
|
tagmem = nv_rd32(dev, 0x100320);
|
||||||
|
NV_DEBUG(dev, "%d tags available\n", tagmem);
|
||||||
|
ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
|
||||||
|
if (ret) {
|
||||||
|
nv50_fb_destroy(dev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,18 +111,7 @@ nv50_fb_init(struct drm_device *dev)
|
||||||
void
|
void
|
||||||
nv50_fb_takedown(struct drm_device *dev)
|
nv50_fb_takedown(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
nv50_fb_destroy(dev);
|
||||||
struct nv50_fb_priv *priv;
|
|
||||||
|
|
||||||
priv = dev_priv->engine.fb.priv;
|
|
||||||
if (!priv)
|
|
||||||
return;
|
|
||||||
dev_priv->engine.fb.priv = NULL;
|
|
||||||
|
|
||||||
pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
|
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
|
||||||
__free_page(priv->r100c08_page);
|
|
||||||
kfree(priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -83,8 +83,9 @@ nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
|
||||||
|
|
||||||
void
|
void
|
||||||
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||||
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
|
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
|
||||||
{
|
{
|
||||||
|
u32 comp = (mem->memtype & 0x180) >> 7;
|
||||||
u32 block;
|
u32 block;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -105,6 +106,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||||
|
|
||||||
phys += block << (vma->node->type - 3);
|
phys += block << (vma->node->type - 3);
|
||||||
cnt -= block;
|
cnt -= block;
|
||||||
|
if (comp) {
|
||||||
|
u32 tag = mem->tag->start + ((delta >> 16) * comp);
|
||||||
|
offset_h |= (tag << 17);
|
||||||
|
delta += block << (vma->node->type - 3);
|
||||||
|
}
|
||||||
|
|
||||||
while (block) {
|
while (block) {
|
||||||
nv_wo32(pgt, pte + 0, offset_l);
|
nv_wo32(pgt, pte + 0, offset_l);
|
||||||
|
|
|
@ -69,6 +69,11 @@ nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
|
||||||
list_del(&this->rl_entry);
|
list_del(&this->rl_entry);
|
||||||
nouveau_mm_put(mm, this);
|
nouveau_mm_put(mm, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mem->tag) {
|
||||||
|
drm_mm_put_block(mem->tag);
|
||||||
|
mem->tag = NULL;
|
||||||
|
}
|
||||||
mutex_unlock(&mm->mutex);
|
mutex_unlock(&mm->mutex);
|
||||||
|
|
||||||
kfree(mem);
|
kfree(mem);
|
||||||
|
@ -76,7 +81,7 @@ nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
|
||||||
|
|
||||||
int
|
int
|
||||||
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
|
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
|
||||||
u32 type, struct nouveau_mem **pmem)
|
u32 memtype, struct nouveau_mem **pmem)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
|
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
|
||||||
|
@ -84,6 +89,8 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
|
||||||
struct nouveau_mm *mm = man->priv;
|
struct nouveau_mm *mm = man->priv;
|
||||||
struct nouveau_mm_node *r;
|
struct nouveau_mm_node *r;
|
||||||
struct nouveau_mem *mem;
|
struct nouveau_mem *mem;
|
||||||
|
int comp = (memtype & 0x300) >> 8;
|
||||||
|
int type = (memtype & 0x07f);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!types[type])
|
if (!types[type])
|
||||||
|
@ -96,12 +103,26 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_lock(&mm->mutex);
|
||||||
|
if (comp) {
|
||||||
|
if (align == 16) {
|
||||||
|
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||||
|
int n = (size >> 4) * comp;
|
||||||
|
|
||||||
|
mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
|
||||||
|
if (mem->tag)
|
||||||
|
mem->tag = drm_mm_get_block(mem->tag, n, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(!mem->tag))
|
||||||
|
comp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mem->regions);
|
INIT_LIST_HEAD(&mem->regions);
|
||||||
mem->dev = dev_priv->dev;
|
mem->dev = dev_priv->dev;
|
||||||
mem->memtype = type;
|
mem->memtype = (comp << 7) | type;
|
||||||
mem->size = size;
|
mem->size = size;
|
||||||
|
|
||||||
mutex_lock(&mm->mutex);
|
|
||||||
do {
|
do {
|
||||||
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
|
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
|
||||||
|
|
||||||
void
|
void
|
||||||
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
|
||||||
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
|
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
|
||||||
{
|
{
|
||||||
u32 next = 1 << (vma->node->type - 8);
|
u32 next = 1 << (vma->node->type - 8);
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mem->regions);
|
INIT_LIST_HEAD(&mem->regions);
|
||||||
mem->dev = dev_priv->dev;
|
mem->dev = dev_priv->dev;
|
||||||
mem->memtype = type;
|
mem->memtype = (type & 0xff);
|
||||||
mem->size = size;
|
mem->size = size;
|
||||||
|
|
||||||
mutex_lock(&mm->mutex);
|
mutex_lock(&mm->mutex);
|
||||||
|
|
|
@ -94,6 +94,7 @@ struct drm_nouveau_setparam {
|
||||||
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
|
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
|
||||||
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
|
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
|
||||||
|
|
||||||
|
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
|
||||||
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
|
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
|
||||||
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
|
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
|
||||||
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
|
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
|
||||||
|
|
Загрузка…
Ссылка в новой задаче