drm/nouveau/core/mm: have users explicitly define heap identifiers
Different sections of VRAM may have different properties (ie. can't be used for compression/display, can't be mapped, etc). We currently already support this, but it's a bit magic. This change makes it more obvious where we're allocating from. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Родитель
24e8375b1b
Коммит
4d058fab63
|
@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
|
|||
return mm->heap_nodes;
|
||||
}
|
||||
|
||||
int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
|
||||
int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
|
||||
int nvkm_mm_fini(struct nvkm_mm *);
|
||||
int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
|
||||
u32 size_min, u32 align, struct nvkm_mm_node **);
|
||||
|
|
|
@ -123,6 +123,10 @@ struct nvkm_ram {
|
|||
u64 size;
|
||||
|
||||
#define NVKM_RAM_MM_SHIFT 12
|
||||
#define NVKM_RAM_MM_ANY (NVKM_MM_HEAP_ANY + 0)
|
||||
#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
|
||||
#define NVKM_RAM_MM_NOMAP (NVKM_MM_HEAP_ANY + 2)
|
||||
#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
|
||||
struct nvkm_mm vram;
|
||||
struct nvkm_mm tags;
|
||||
u64 stolen;
|
||||
|
|
|
@ -340,7 +340,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
|||
if (ret)
|
||||
goto done;
|
||||
|
||||
ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
|
||||
ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
|
||||
done:
|
||||
if (ret)
|
||||
nouveau_abi16_chan_fini(abi16, chan);
|
||||
|
|
|
@ -185,7 +185,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
|
|||
gpuobj->size = nvkm_memory_size(gpuobj->memory);
|
||||
}
|
||||
|
||||
return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
|
||||
return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
|
|||
}
|
||||
|
||||
int
|
||||
nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
|
||||
nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
|
||||
{
|
||||
struct nvkm_mm_node *node, *prev;
|
||||
u32 next;
|
||||
|
@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
|
|||
|
||||
list_add_tail(&node->nl_entry, &mm->nodes);
|
||||
list_add_tail(&node->fl_entry, &mm->free);
|
||||
node->heap = ++mm->heap_nodes;
|
||||
node->heap = heap;
|
||||
mm->heap_nodes++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
|
|||
if (ret)
|
||||
goto free_domain;
|
||||
|
||||
ret = nvkm_mm_init(&tdev->iommu.mm, 0,
|
||||
ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
|
||||
(1ULL << tdev->func->iommu_bit) >>
|
||||
tdev->iommu.pgshift, 1);
|
||||
if (ret)
|
||||
|
|
|
@ -73,13 +73,14 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
|||
ram->size = size;
|
||||
|
||||
if (!nvkm_mm_initialised(&ram->vram)) {
|
||||
ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
|
||||
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
|
||||
size >> NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!nvkm_mm_initialised(&ram->tags)) {
|
||||
ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
|
||||
ret = nvkm_mm_init(&ram->tags, 0, 0, tags ? ++tags : 0, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -617,7 +617,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
|||
*/
|
||||
if (lower != total) {
|
||||
/* The common memory amount is addressed normally. */
|
||||
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
|
||||
rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -625,13 +626,15 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
|
|||
/* And the rest is much higher in the physical address
|
||||
* space, and may not be usable for certain operations.
|
||||
*/
|
||||
ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
|
||||
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
|
||||
ubase >> NVKM_RAM_MM_SHIFT,
|
||||
(usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/* GPUs without mixed-memory are a lot nicer... */
|
||||
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
|
||||
rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(total - rsvd_head - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
if (ret)
|
||||
|
|
|
@ -81,7 +81,8 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
|||
ram->base.stolen = base;
|
||||
nvkm_mm_fini(&ram->base.vram);
|
||||
|
||||
return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
return nvkm_mm_init(&ram->base.vram, NVKM_RAM_MM_NORMAL,
|
||||
rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(size - rsvd_head - rsvd_tail) >>
|
||||
NVKM_RAM_MM_SHIFT, 1);
|
||||
}
|
||||
|
|
|
@ -669,7 +669,8 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
|
|||
ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
|
||||
nvkm_mm_fini(&ram->vram);
|
||||
|
||||
return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
|
||||
rsvd_head >> NVKM_RAM_MM_SHIFT,
|
||||
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
|
||||
nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ nv04_instmem_oneinit(struct nvkm_instmem *base)
|
|||
/* PRAMIN aperture maps over the end of VRAM, reserve it */
|
||||
imem->base.reserved = 512 * 1024;
|
||||
|
||||
ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
|
||||
ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ nv40_instmem_oneinit(struct nvkm_instmem *base)
|
|||
imem->base.reserved += 512 * 1024; /* object storage */
|
||||
imem->base.reserved = round_up(imem->base.reserved, 4096);
|
||||
|
||||
ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
|
||||
ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -183,8 +183,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
|
|||
tag_size += tag_align;
|
||||
tag_size = (tag_size + 0xfff) >> 12; /* round up */
|
||||
|
||||
ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
|
||||
<c->tag_ram);
|
||||
ret = nvkm_mm_tail(&ram->vram, NVKM_RAM_MM_NORMAL, 1, tag_size,
|
||||
tag_size, 1, <c->tag_ram);
|
||||
if (ret) {
|
||||
ltc->num_tags = 0;
|
||||
} else {
|
||||
|
@ -197,7 +197,7 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
|
|||
}
|
||||
|
||||
mm_init:
|
||||
return nvkm_mm_init(<c->tags, 0, ltc->num_tags, 1);
|
||||
return nvkm_mm_init(<c->tags, 0, 0, ltc->num_tags, 1);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -45,7 +45,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
|
|||
ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
|
||||
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
|
||||
/*XXX: tagram allocation - TBD */
|
||||
return nvkm_mm_init(<c->tags, 0, 0, 1);
|
||||
return nvkm_mm_init(<c->tags, 0, 0, 0, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -388,7 +388,7 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
|
||||
ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
|
||||
block >> 12);
|
||||
if (ret) {
|
||||
vfree(vm->pgt);
|
||||
|
|
Загрузка…
Ссылка в новой задаче