drm/ttm: flip the switch for driver allocated resources v2
Instead of both driver and TTM allocating memory finalize embedding the ttm_resource object as base into the driver backends. v2: fix typo in vmwgfx grid mgr and double init in amdgpu_vram_mgr.c Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-10-christian.koenig@amd.com
This commit is contained in:
Родитель
d3bcb4b02f
Коммит
cb1c81467a
|
@ -40,8 +40,7 @@ to_gtt_mgr(struct ttm_resource_manager *man)
|
|||
static inline struct amdgpu_gtt_node *
|
||||
to_amdgpu_gtt_node(struct ttm_resource *res)
|
||||
{
|
||||
return container_of(res->mm_node, struct amdgpu_gtt_node,
|
||||
base.mm_nodes[0]);
|
||||
return container_of(res, struct amdgpu_gtt_node, base.base);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -102,13 +101,13 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
|
|||
/**
|
||||
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
|
||||
*
|
||||
* @mem: the mem object to check
|
||||
* @res: the mem object to check
|
||||
*
|
||||
* Check if a mem object has already address space allocated.
|
||||
*/
|
||||
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
|
||||
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
|
||||
{
|
||||
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
|
||||
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
|
||||
|
||||
return drm_mm_node_allocated(&node->base.mm_nodes[0]);
|
||||
}
|
||||
|
@ -126,19 +125,20 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
|
|||
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *tbo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
|
||||
uint32_t num_pages = PFN_UP(tbo->base.size);
|
||||
struct amdgpu_gtt_node *node;
|
||||
int r;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) &&
|
||||
atomic64_read(&mgr->available) < mem->num_pages) {
|
||||
if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
|
||||
atomic64_read(&mgr->available) < num_pages) {
|
||||
spin_unlock(&mgr->lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
atomic64_sub(mem->num_pages, &mgr->available);
|
||||
atomic64_sub(num_pages, &mgr->available);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
|
||||
|
@ -154,29 +154,28 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||
spin_lock(&mgr->lock);
|
||||
r = drm_mm_insert_node_in_range(&mgr->mm,
|
||||
&node->base.mm_nodes[0],
|
||||
mem->num_pages,
|
||||
tbo->page_alignment, 0,
|
||||
place->fpfn, place->lpfn,
|
||||
num_pages, tbo->page_alignment,
|
||||
0, place->fpfn, place->lpfn,
|
||||
DRM_MM_INSERT_BEST);
|
||||
spin_unlock(&mgr->lock);
|
||||
if (unlikely(r))
|
||||
goto err_free;
|
||||
|
||||
mem->start = node->base.mm_nodes[0].start;
|
||||
node->base.base.start = node->base.mm_nodes[0].start;
|
||||
} else {
|
||||
node->base.mm_nodes[0].start = 0;
|
||||
node->base.mm_nodes[0].size = mem->num_pages;
|
||||
mem->start = AMDGPU_BO_INVALID_OFFSET;
|
||||
node->base.mm_nodes[0].size = node->base.base.num_pages;
|
||||
node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
|
||||
}
|
||||
|
||||
mem->mm_node = &node->base.mm_nodes[0];
|
||||
*res = &node->base.base;
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(node);
|
||||
|
||||
err_out:
|
||||
atomic64_add(mem->num_pages, &mgr->available);
|
||||
atomic64_add(num_pages, &mgr->available);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -190,21 +189,16 @@ err_out:
|
|||
* Free the allocated GTT again.
|
||||
*/
|
||||
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
|
||||
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
|
||||
struct amdgpu_gtt_node *node;
|
||||
|
||||
if (!mem->mm_node)
|
||||
return;
|
||||
|
||||
node = to_amdgpu_gtt_node(mem);
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
|
||||
drm_mm_remove_node(&node->base.mm_nodes[0]);
|
||||
spin_unlock(&mgr->lock);
|
||||
atomic64_add(mem->num_pages, &mgr->available);
|
||||
atomic64_add(res->num_pages, &mgr->available);
|
||||
|
||||
kfree(node);
|
||||
}
|
||||
|
|
|
@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||
if (bo->base.resv == &bo->base._resv)
|
||||
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
|
||||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node ||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM ||
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
|
||||
return;
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include <drm/drm_mm.h>
|
||||
#include <drm/ttm/ttm_resource.h>
|
||||
#include <drm/ttm/ttm_range_manager.h>
|
||||
|
||||
/* state back for walking over vram_mgr and gtt_mgr allocations */
|
||||
struct amdgpu_res_cursor {
|
||||
|
@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
|
|||
{
|
||||
struct drm_mm_node *node;
|
||||
|
||||
if (!res || !res->mm_node) {
|
||||
if (!res) {
|
||||
cur->start = start;
|
||||
cur->size = size;
|
||||
cur->remaining = size;
|
||||
|
@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
|
|||
|
||||
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
|
||||
|
||||
node = res->mm_node;
|
||||
node = to_ttm_range_mgr_node(res)->mm_nodes;
|
||||
while (start >= node->size << PAGE_SHIFT)
|
||||
start -= node++->size << PAGE_SHIFT;
|
||||
|
||||
|
|
|
@ -219,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
|
|||
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_resource *mem = bo->tbo.resource;
|
||||
struct drm_mm_node *nodes = mem->mm_node;
|
||||
unsigned pages = mem->num_pages;
|
||||
struct ttm_resource *res = bo->tbo.resource;
|
||||
unsigned pages = res->num_pages;
|
||||
struct drm_mm_node *mm;
|
||||
u64 usage;
|
||||
|
||||
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
|
||||
return amdgpu_bo_size(bo);
|
||||
|
||||
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
return 0;
|
||||
|
||||
for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
|
||||
usage += amdgpu_vram_mgr_vis_size(adev, nodes);
|
||||
mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
|
||||
for (usage = 0; pages; pages -= mm->size, mm++)
|
||||
usage += amdgpu_vram_mgr_vis_size(adev, mm);
|
||||
|
||||
return usage;
|
||||
}
|
||||
|
@ -367,7 +368,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
|
|||
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *tbo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
|
@ -388,7 +389,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
|
||||
|
||||
/* bail out quickly if there's likely not enough VRAM for this BO */
|
||||
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
|
||||
mem_bytes = tbo->base.size;
|
||||
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
|
||||
r = -ENOSPC;
|
||||
goto error_sub;
|
||||
|
@ -406,7 +407,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
#endif
|
||||
pages_per_node = max_t(uint32_t, pages_per_node,
|
||||
tbo->page_alignment);
|
||||
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
|
||||
num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node);
|
||||
}
|
||||
|
||||
node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
|
||||
|
@ -422,8 +423,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
|
||||
mem->start = 0;
|
||||
pages_left = mem->num_pages;
|
||||
pages_left = node->base.num_pages;
|
||||
|
||||
/* Limit maximum size to 2GB due to SG table limitations */
|
||||
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
|
||||
|
@ -451,7 +451,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
}
|
||||
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
|
||||
amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]);
|
||||
amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
|
||||
pages_left -= pages;
|
||||
++i;
|
||||
|
||||
|
@ -461,10 +461,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
spin_unlock(&mgr->lock);
|
||||
|
||||
if (i == 1)
|
||||
mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
|
||||
atomic64_add(vis_usage, &mgr->vis_usage);
|
||||
mem->mm_node = &node->mm_nodes[0];
|
||||
*res = &node->base;
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
|
@ -487,28 +487,22 @@ error_sub:
|
|||
* Free the allocated VRAM again.
|
||||
*/
|
||||
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||
struct ttm_range_mgr_node *node;
|
||||
uint64_t usage = 0, vis_usage = 0;
|
||||
unsigned pages = mem->num_pages;
|
||||
struct drm_mm_node *nodes;
|
||||
|
||||
if (!mem->mm_node)
|
||||
return;
|
||||
|
||||
node = to_ttm_range_mgr_node(mem);
|
||||
nodes = &node->mm_nodes[0];
|
||||
unsigned i, pages;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
while (pages) {
|
||||
pages -= nodes->size;
|
||||
drm_mm_remove_node(nodes);
|
||||
usage += nodes->size << PAGE_SHIFT;
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
|
||||
++nodes;
|
||||
for (i = 0, pages = res->num_pages; pages;
|
||||
pages -= node->mm_nodes[i].size, ++i) {
|
||||
struct drm_mm_node *mm = &node->mm_nodes[i];
|
||||
|
||||
drm_mm_remove_node(mm);
|
||||
usage += mm->size << PAGE_SHIFT;
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
|
||||
}
|
||||
amdgpu_vram_mgr_do_reserve(man);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
@ -533,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
|||
* Allocate and fill a sg table from a VRAM allocation.
|
||||
*/
|
||||
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
||||
struct ttm_resource *mem,
|
||||
struct ttm_resource *res,
|
||||
u64 offset, u64 length,
|
||||
struct device *dev,
|
||||
enum dma_data_direction dir,
|
||||
|
@ -549,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
|||
return -ENOMEM;
|
||||
|
||||
/* Determine the number of DRM_MM nodes to export */
|
||||
amdgpu_res_first(mem, offset, length, &cursor);
|
||||
amdgpu_res_first(res, offset, length, &cursor);
|
||||
while (cursor.remaining) {
|
||||
num_entries++;
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
|
@ -569,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
|||
* and the number of bytes from it. Access the following
|
||||
* DRM_MM node(s) if more buffer needs to exported
|
||||
*/
|
||||
amdgpu_res_first(mem, offset, length, &cursor);
|
||||
amdgpu_res_first(res, offset, length, &cursor);
|
||||
for_each_sgtable_sg((*sgt), sg, i) {
|
||||
phys_addr_t phys = cursor.start + adev->gmc.aper_base;
|
||||
size_t size = cursor.size;
|
||||
|
|
|
@ -250,7 +250,8 @@ EXPORT_SYMBOL(drm_gem_vram_put);
|
|||
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
|
||||
{
|
||||
/* Keep TTM behavior for now, remove when drivers are audited */
|
||||
if (WARN_ON_ONCE(!gbo->bo.resource->mm_node))
|
||||
if (WARN_ON_ONCE(!gbo->bo.resource ||
|
||||
gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
|
||||
return 0;
|
||||
|
||||
return gbo->bo.resource->start;
|
||||
|
|
|
@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
|
|||
}
|
||||
}
|
||||
|
||||
if (new_reg) {
|
||||
if (new_reg->mm_node)
|
||||
nvbo->offset = (new_reg->start << PAGE_SHIFT);
|
||||
else
|
||||
nvbo->offset = 0;
|
||||
}
|
||||
if (new_reg)
|
||||
nvbo->offset = (new_reg->start << PAGE_SHIFT);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -178,25 +178,24 @@ void
|
|||
nouveau_mem_del(struct ttm_resource *reg)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
if (!mem)
|
||||
return;
|
||||
|
||||
nouveau_mem_fini(mem);
|
||||
kfree(reg->mm_node);
|
||||
reg->mm_node = NULL;
|
||||
kfree(mem);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
|
||||
struct ttm_resource *reg)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct nouveau_mem *mem;
|
||||
|
||||
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
mem->cli = cli;
|
||||
mem->kind = kind;
|
||||
mem->comp = comp;
|
||||
|
||||
reg->mm_node = mem;
|
||||
*res = &mem->base;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -6,12 +6,6 @@ struct ttm_tt;
|
|||
#include <nvif/mem.h>
|
||||
#include <nvif/vmm.h>
|
||||
|
||||
static inline struct nouveau_mem *
|
||||
nouveau_mem(struct ttm_resource *reg)
|
||||
{
|
||||
return reg->mm_node;
|
||||
}
|
||||
|
||||
struct nouveau_mem {
|
||||
struct ttm_resource base;
|
||||
struct nouveau_cli *cli;
|
||||
|
@ -21,8 +15,14 @@ struct nouveau_mem {
|
|||
struct nvif_vma vma[2];
|
||||
};
|
||||
|
||||
static inline struct nouveau_mem *
|
||||
nouveau_mem(struct ttm_resource *reg)
|
||||
{
|
||||
return container_of(reg, struct nouveau_mem, base);
|
||||
}
|
||||
|
||||
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
|
||||
struct ttm_resource *);
|
||||
struct ttm_resource **);
|
||||
void nouveau_mem_del(struct ttm_resource *);
|
||||
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
|
||||
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
|
||||
|
|
|
@ -45,7 +45,7 @@ static int
|
|||
nouveau_vram_manager_new(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *reg)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
|
@ -54,15 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
|
|||
if (drm->client.device.info.ram_size == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ttm_resource_init(bo, place, reg->mm_node);
|
||||
ttm_resource_init(bo, place, *res);
|
||||
|
||||
ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
|
||||
ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
|
||||
if (ret) {
|
||||
nouveau_mem_del(reg);
|
||||
nouveau_mem_del(*res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -78,18 +78,18 @@ static int
|
|||
nouveau_gart_manager_new(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *reg)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ttm_resource_init(bo, place, reg->mm_node);
|
||||
reg->start = 0;
|
||||
ttm_resource_init(bo, place, *res);
|
||||
(*res)->start = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -102,27 +102,27 @@ static int
|
|||
nv04_gart_manager_new(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *reg)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_mem *mem;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
|
||||
mem = nouveau_mem(reg);
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ttm_resource_init(bo, place, reg->mm_node);
|
||||
mem = nouveau_mem(*res);
|
||||
ttm_resource_init(bo, place, *res);
|
||||
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
|
||||
(long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
|
||||
(long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
|
||||
if (ret) {
|
||||
nouveau_mem_del(reg);
|
||||
nouveau_mem_del(*res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg->start = mem->vma[0].addr >> PAGE_SHIFT;
|
||||
(*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ to_range_manager(struct ttm_resource_manager *man)
|
|||
static int ttm_range_man_alloc(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct ttm_range_manager *rman = to_range_manager(man);
|
||||
struct ttm_range_mgr_node *node;
|
||||
|
@ -83,37 +83,30 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
|
|||
|
||||
spin_lock(&rman->lock);
|
||||
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
|
||||
mem->num_pages, bo->page_alignment, 0,
|
||||
node->base.num_pages,
|
||||
bo->page_alignment, 0,
|
||||
place->fpfn, lpfn, mode);
|
||||
spin_unlock(&rman->lock);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
if (unlikely(ret))
|
||||
kfree(node);
|
||||
} else {
|
||||
mem->mm_node = &node->mm_nodes[0];
|
||||
mem->start = node->mm_nodes[0].start;
|
||||
}
|
||||
else
|
||||
node->base.start = node->mm_nodes[0].start;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_range_man_free(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
|
||||
struct ttm_range_manager *rman = to_range_manager(man);
|
||||
struct ttm_range_mgr_node *node;
|
||||
|
||||
if (!mem->mm_node)
|
||||
return;
|
||||
|
||||
node = to_ttm_range_mgr_node(mem);
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_remove_node(&node->mm_nodes[0]);
|
||||
spin_unlock(&rman->lock);
|
||||
|
||||
kfree(node);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static void ttm_range_man_debug(struct ttm_resource_manager *man,
|
||||
|
|
|
@ -29,7 +29,6 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
|
|||
const struct ttm_place *place,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
res->mm_node = NULL;
|
||||
res->start = 0;
|
||||
res->num_pages = PFN_UP(bo->base.size);
|
||||
res->mem_type = place->mem_type;
|
||||
|
@ -47,22 +46,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
|
|||
{
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bo->bdev, place->mem_type);
|
||||
struct ttm_resource *res;
|
||||
int r;
|
||||
|
||||
res = kmalloc(sizeof(*res), GFP_KERNEL);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_resource_init(bo, place, res);
|
||||
r = man->func->alloc(man, bo, place, res);
|
||||
if (r) {
|
||||
kfree(res);
|
||||
return r;
|
||||
}
|
||||
|
||||
*res_ptr = res;
|
||||
return 0;
|
||||
return man->func->alloc(man, bo, place, res_ptr);
|
||||
}
|
||||
|
||||
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
|
||||
|
@ -74,7 +59,6 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
|
|||
|
||||
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
|
||||
man->func->free(man, *res);
|
||||
kfree(*res);
|
||||
*res = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_resource_free);
|
||||
|
|
|
@ -10,20 +10,20 @@
|
|||
static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
mem->mm_node = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem->mm_node)
|
||||
*res = kzalloc(sizeof(**res), GFP_KERNEL);
|
||||
if (!*res)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_resource_init(bo, place, mem->mm_node);
|
||||
ttm_resource_init(bo, place, *res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_sys_man_free(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
kfree(mem->mm_node);
|
||||
kfree(res);
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func ttm_sys_manager_func = {
|
||||
|
|
|
@ -52,16 +52,16 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
|
|||
static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
|
||||
int id;
|
||||
|
||||
mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem->mm_node)
|
||||
*res = kmalloc(sizeof(**res), GFP_KERNEL);
|
||||
if (!*res)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_resource_init(bo, place, mem->mm_node);
|
||||
ttm_resource_init(bo, place, *res);
|
||||
|
||||
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
|
@ -70,34 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
|
|||
spin_lock(&gman->lock);
|
||||
|
||||
if (gman->max_gmr_pages > 0) {
|
||||
gman->used_gmr_pages += mem->num_pages;
|
||||
gman->used_gmr_pages += (*res)->num_pages;
|
||||
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
|
||||
goto nospace;
|
||||
}
|
||||
|
||||
mem->mm_node = gman;
|
||||
mem->start = id;
|
||||
(*res)->start = id;
|
||||
|
||||
spin_unlock(&gman->lock);
|
||||
return 0;
|
||||
|
||||
nospace:
|
||||
gman->used_gmr_pages -= mem->num_pages;
|
||||
gman->used_gmr_pages -= (*res)->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
ida_free(&gman->gmr_ida, id);
|
||||
kfree(*res);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
|
||||
|
||||
ida_free(&gman->gmr_ida, mem->start);
|
||||
ida_free(&gman->gmr_ida, res->start);
|
||||
spin_lock(&gman->lock);
|
||||
gman->used_gmr_pages -= mem->num_pages;
|
||||
gman->used_gmr_pages -= res->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
kfree(mem->mm_node);
|
||||
kfree(res);
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
|
||||
|
|
|
@ -51,7 +51,7 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
|
|||
static int vmw_thp_get_node(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
struct vmw_thp_manager *rman = to_thp_manager(man);
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
|
@ -78,26 +78,27 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
|
|||
spin_lock(&rman->lock);
|
||||
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
|
||||
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
|
||||
if (mem->num_pages >= align_pages) {
|
||||
if (node->base.num_pages >= align_pages) {
|
||||
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
|
||||
align_pages, place, mem,
|
||||
lpfn, mode);
|
||||
align_pages, place,
|
||||
&node->base, lpfn, mode);
|
||||
if (!ret)
|
||||
goto found_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
|
||||
if (mem->num_pages >= align_pages) {
|
||||
if (node->base.num_pages >= align_pages) {
|
||||
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
|
||||
align_pages, place, mem, lpfn,
|
||||
mode);
|
||||
align_pages, place, &node->base,
|
||||
lpfn, mode);
|
||||
if (!ret)
|
||||
goto found_unlock;
|
||||
}
|
||||
|
||||
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
|
||||
mem->num_pages, bo->page_alignment, 0,
|
||||
node->base.num_pages,
|
||||
bo->page_alignment, 0,
|
||||
place->fpfn, lpfn, mode);
|
||||
found_unlock:
|
||||
spin_unlock(&rman->lock);
|
||||
|
@ -105,20 +106,18 @@ found_unlock:
|
|||
if (unlikely(ret)) {
|
||||
kfree(node);
|
||||
} else {
|
||||
mem->mm_node = &node->mm_nodes[0];
|
||||
mem->start = node->mm_nodes[0].start;
|
||||
node->base.start = node->mm_nodes[0].start;
|
||||
*res = &node->base;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void vmw_thp_put_node(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem)
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
|
||||
struct vmw_thp_manager *rman = to_thp_manager(man);
|
||||
struct ttm_range_mgr_node * node = mem->mm_node;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_remove_node(&node->mm_nodes[0]);
|
||||
|
|
|
@ -30,8 +30,7 @@ struct ttm_range_mgr_node {
|
|||
static inline struct ttm_range_mgr_node *
|
||||
to_ttm_range_mgr_node(struct ttm_resource *res)
|
||||
{
|
||||
return container_of(res->mm_node, struct ttm_range_mgr_node,
|
||||
mm_nodes[0]);
|
||||
return container_of(res, struct ttm_range_mgr_node, base);
|
||||
}
|
||||
|
||||
int ttm_range_man_init(struct ttm_device *bdev,
|
||||
|
|
|
@ -45,46 +45,38 @@ struct ttm_resource_manager_func {
|
|||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
* @bo: Pointer to the buffer object we're allocating space for.
|
||||
* @placement: Placement details.
|
||||
* @flags: Additional placement flags.
|
||||
* @mem: Pointer to a struct ttm_resource to be filled in.
|
||||
* @place: Placement details.
|
||||
* @res: Resulting pointer to the ttm_resource.
|
||||
*
|
||||
* This function should allocate space in the memory type managed
|
||||
* by @man. Placement details if
|
||||
* applicable are given by @placement. If successful,
|
||||
* @mem::mm_node should be set to a non-null value, and
|
||||
* @mem::start should be set to a value identifying the beginning
|
||||
* by @man. Placement details if applicable are given by @place. If
|
||||
* successful, a filled in ttm_resource object should be returned in
|
||||
* @res. @res::start should be set to a value identifying the beginning
|
||||
* of the range allocated, and the function should return zero.
|
||||
* If the memory region accommodate the buffer object, @mem::mm_node
|
||||
* should be set to NULL, and the function should return 0.
|
||||
* If the manager can't fulfill the request -ENOSPC should be returned.
|
||||
* If a system error occurred, preventing the request to be fulfilled,
|
||||
* the function should return a negative error code.
|
||||
*
|
||||
* Note that @mem::mm_node will only be dereferenced by
|
||||
* struct ttm_resource_manager functions and optionally by the driver,
|
||||
* which has knowledge of the underlying type.
|
||||
*
|
||||
* This function may not be called from within atomic context, so
|
||||
* an implementation can and must use either a mutex or a spinlock to
|
||||
* protect any data structures managing the space.
|
||||
* This function may not be called from within atomic context and needs
|
||||
* to take care of its own locking to protect any data structures
|
||||
* managing the space.
|
||||
*/
|
||||
int (*alloc)(struct ttm_resource_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource *mem);
|
||||
struct ttm_resource **res);
|
||||
|
||||
/**
|
||||
* struct ttm_resource_manager_func member free
|
||||
*
|
||||
* @man: Pointer to a memory type manager.
|
||||
* @mem: Pointer to a struct ttm_resource to be filled in.
|
||||
* @res: Pointer to a struct ttm_resource to be freed.
|
||||
*
|
||||
* This function frees memory type resources previously allocated
|
||||
* and that are identified by @mem::mm_node and @mem::start. May not
|
||||
* be called from within atomic context.
|
||||
* This function frees memory type resources previously allocated.
|
||||
* May not be called from within atomic context.
|
||||
*/
|
||||
void (*free)(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *mem);
|
||||
struct ttm_resource *res);
|
||||
|
||||
/**
|
||||
* struct ttm_resource_manager_func member debug
|
||||
|
@ -158,9 +150,9 @@ struct ttm_bus_placement {
|
|||
/**
|
||||
* struct ttm_resource
|
||||
*
|
||||
* @mm_node: Memory manager node.
|
||||
* @size: Requested size of memory region.
|
||||
* @num_pages: Actual size of memory region in pages.
|
||||
* @start: Start of the allocation.
|
||||
* @num_pages: Actual size of resource in pages.
|
||||
* @mem_type: Resource type of the allocation.
|
||||
* @placement: Placement flags.
|
||||
* @bus: Placement on io bus accessible to the CPU
|
||||
*
|
||||
|
@ -168,7 +160,6 @@ struct ttm_bus_placement {
|
|||
* buffer object.
|
||||
*/
|
||||
struct ttm_resource {
|
||||
void *mm_node;
|
||||
unsigned long start;
|
||||
unsigned long num_pages;
|
||||
uint32_t mem_type;
|
||||
|
|
Загрузка…
Ссылка в новой задаче