drm/nv50-nvc0: delay GART binding until move_notify time

The immediate benefit of doing this is that on NV50 and up, the GPU
virtual address of any buffer is now constant, regardless of what
memtype they're placed in.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2011-02-10 12:59:51 +10:00
Родитель d5f423947a
Коммит 26c0c9e33a
9 изменённых файлов: 183 добавлений и 86 удалений

Просмотреть файл

@ -138,11 +138,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
if (nvbo->vma.node) {
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nvbo->bo.offset = nvbo->vma.offset;
}
if (nvbo->vma.node)
nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@ -312,11 +309,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
if (nvbo->vma.node) {
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nvbo->bo.offset = nvbo->vma.offset;
}
if (nvbo->vma.node)
nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@ -426,7 +420,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
if (dev_priv->card_type >= NV_50)
man->func = &nouveau_gart_manager;
else
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@ -501,25 +498,18 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *old_node = old_mem->mm_node;
struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u32 page_count = new_mem->num_pages;
u64 src_offset, dst_offset;
int ret;
src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_mem *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base;
}
dst_offset = new_mem->start << PAGE_SHIFT;
if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset = nvbo->vma.offset;
src_offset = old_node->tmp_vma.offset;
if (new_node->tmp_vma.node)
dst_offset = new_node->tmp_vma.offset;
else
dst_offset += dev_priv->gart_info.aper_base;
dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages;
while (page_count) {
@ -554,25 +544,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *old_node = old_mem->mm_node;
struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset;
int ret;
src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_mem *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base;
}
dst_offset = new_mem->start << PAGE_SHIFT;
if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset = nvbo->vma.offset;
src_offset = old_node->tmp_vma.offset;
if (new_node->tmp_vma.node)
dst_offset = new_node->tmp_vma.offset;
else
dst_offset += dev_priv->gart_info.aper_base;
dst_offset = nvbo->vma.offset;
while (length) {
u32 amount, stride, height;
@ -728,16 +711,28 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
/* create temporary vma for old memory, this will get cleaned
* up after ttm destroys the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) {
if (dev_priv->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
if (!node->tmp_vma.node) {
u32 page_shift = nvbo->vma.node->type;
if (old_mem->mem_type == TTM_PL_TT)
page_shift = nvbo->vma.vm->spg_shift;
ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT,
nvbo->vma.node->type, NV_MEM_ACCESS_RO,
&node->tmp_vma);
if (ret)
goto out;
ret = nouveau_vm_get(chan->vm,
old_mem->num_pages << PAGE_SHIFT,
page_shift, NV_MEM_ACCESS_RO,
&node->tmp_vma);
if (ret)
goto out;
}
nouveau_vm_map(&node->tmp_vma, node);
if (old_mem->mem_type == TTM_PL_VRAM)
nouveau_vm_map(&node->tmp_vma, node);
else {
nouveau_vm_map_sg(&node->tmp_vma, 0,
old_mem->num_pages << PAGE_SHIFT,
node, node->pages);
}
}
if (dev_priv->card_type < NV_50)
@ -764,6 +759,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@ -783,7 +779,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
if (dev_priv->card_type >= NV_50) {
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node = tmp_mem.mm_node;
struct nouveau_vma *vma = &nvbo->vma;
if (vma->node->type != vma->vm->spg_shift)
vma = &node->tmp_vma;
nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
node, node->pages);
}
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (dev_priv->card_type >= NV_50) {
struct nouveau_bo *nvbo = nouveau_bo(bo);
nouveau_vm_unmap(&nvbo->vma);
}
if (ret)
goto out;
@ -830,16 +842,26 @@ static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma = &nvbo->vma;
struct nouveau_vm *vm = vma->vm;
if (dev_priv->card_type < NV_50)
return;
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
nouveau_vm_map(vma, node);
break;
case TTM_PL_TT:
if (vma->node->type != vm->spg_shift) {
nouveau_vm_unmap(vma);
vma = &node->tmp_vma;
}
nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
node, node->pages);
break;
default:
nouveau_vm_unmap(&nvbo->vma);
break;

Просмотреть файл

@ -73,6 +73,7 @@ struct nouveau_mem {
u8 page_shift;
struct list_head regions;
dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
@ -706,7 +707,6 @@ struct drm_nouveau_private {
} dummy;
struct nouveau_gpuobj *sg_ctxdma;
struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
@ -846,6 +846,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);

Просмотреть файл

@ -785,3 +785,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
static int
nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
return 0;
}
static int
nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
{
return 0;
}
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node = mem->mm_node;
if (node->tmp_vma.node) {
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
mem->mm_node = NULL;
}
static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma = &nvbo->vma;
struct nouveau_vm *vm = vma->vm;
struct nouveau_mem *node;
int ret;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
return -ENOMEM;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
/* This node must be for evicting large-paged VRAM
* to system memory. Due to a nv50 limitation of
* not being able to mix large/small pages within
* the same PDE, we need to create a temporary
* small-paged VMA for the eviction.
*/
if (vma->node->type != vm->spg_shift) {
ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
vm->spg_shift, NV_MEM_ACCESS_RW,
&node->tmp_vma);
if (ret) {
kfree(node);
return ret;
}
}
node->page_shift = nvbo->vma.node->type;
mem->mm_node = node;
mem->start = 0;
return 0;
}
void
nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_init,
nouveau_gart_manager_fini,
nouveau_gart_manager_new,
nouveau_gart_manager_del,
nouveau_gart_manager_debug
};

Просмотреть файл

@ -98,6 +98,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int size, uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@ -111,11 +112,16 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
if (dev_priv->card_type < NV_50) {
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
} else {
target = NV_MEM_TARGET_VM;
offset = chan->notifier_bo->vma.offset;
}
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,

Просмотреть файл

@ -375,12 +375,10 @@ static int
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
nvbe->offset = mem->start << PAGE_SHIFT;
nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
node->pages = nvbe->pages;
nvbe->pages = (dma_addr_t *)node;
nvbe->bound = true;
return 0;
}
@ -389,13 +387,10 @@ static int
nv50_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
if (!nvbe->bound)
return 0;
nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
nvbe->nr_pages << PAGE_SHIFT);
struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
nvbe->pages = node->pages;
node->pages = NULL;
nvbe->bound = false;
return 0;
}
@ -457,13 +452,7 @@ nouveau_sgdma_init(struct drm_device *dev)
}
if (dev_priv->card_type >= NV_50) {
ret = nouveau_vm_get(dev_priv->chan_vm, aper_size,
12, NV_MEM_ACCESS_RW,
&dev_priv->gart_info.vma);
if (ret)
return ret;
dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
@ -522,7 +511,6 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
nouveau_vm_put(&dev_priv->gart_info.vma);
if (dev_priv->gart_info.dummy.page) {
pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,

Просмотреть файл

@ -74,7 +74,7 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
dma_addr_t *list)
struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
@ -94,7 +94,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = max;
len = end - pte;
vm->map_sg(vma, pgt, pte, list, len);
vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;

Просмотреть файл

@ -69,7 +69,7 @@ struct nouveau_vm {
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@ -87,7 +87,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
dma_addr_t *);
struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
@ -95,7 +95,7 @@ void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
@ -106,7 +106,7 @@ void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);

Просмотреть файл

@ -57,10 +57,9 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
u64 phys, u32 memtype, u32 target)
nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
phys |= 1; /* present */
phys |= (u64)memtype << 40;
@ -89,7 +88,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
u32 block;
int i;
phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
cnt <<= 3;
@ -118,11 +117,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
u32 pte, dma_addr_t *list, u32 cnt)
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;

Просмотреть файл

@ -75,11 +75,11 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
u32 pte, dma_addr_t *list, u32 cnt)
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;