drm/amdgpu: untie user ring ids from kernel ring ids v6
Add amdgpu_queue_mgr, a mechanism that allows disjointing usermode's ring ids from the kernel's ring ids. The queue manager maintains a per-file descriptor map of user ring ids to amdgpu_ring pointers. Once a map is created it is permanent (this is required to maintain FIFO execution guarantees for a context's ring). Different queue map policies can be configured for each HW IP. Currently all HW IPs use the identity mapper, i.e. kernel ring id is equal to the user ring id. The purpose of this mechanism is to distribute the load across multiple queues more effectively for HW IPs that support multiple rings. Userspace clients are unable to check whether a specific resource is in use by a different client. Therefore, it is up to the kernel driver to make the optimal choice. v2: remove amdgpu_queue_mapper_funcs v3: made amdgpu_queue_mgr per context instead of per-fd v4: add context_put on error paths v5: rebase and include new IPs UVD_ENC & VCN_* v6: drop unused amdgpu_ring_is_valid_index (Alex) Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Andres Rodriguez <andresx7@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Родитель
ecd910eb1f
Коммит
effd924d2f
|
@ -24,7 +24,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
||||
amdgpu_queue_mgr.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
|
|
|
@ -776,6 +776,29 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
|||
struct amd_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f);
|
||||
|
||||
/*
|
||||
* Queue manager
|
||||
*/
|
||||
struct amdgpu_queue_mapper {
|
||||
int hw_ip;
|
||||
struct mutex lock;
|
||||
/* protected by lock */
|
||||
struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
|
||||
};
|
||||
|
||||
struct amdgpu_queue_mgr {
|
||||
struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
|
||||
};
|
||||
|
||||
int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mgr *mgr);
|
||||
int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mgr *mgr);
|
||||
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mgr *mgr,
|
||||
int hw_ip, int instance, int ring,
|
||||
struct amdgpu_ring **out_ring);
|
||||
|
||||
/*
|
||||
* context related structures
|
||||
*/
|
||||
|
@ -789,6 +812,7 @@ struct amdgpu_ctx_ring {
|
|||
struct amdgpu_ctx {
|
||||
struct kref refcount;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_queue_mgr queue_mgr;
|
||||
unsigned reset_counter;
|
||||
spinlock_t ring_lock;
|
||||
struct dma_fence **fences;
|
||||
|
@ -1909,9 +1933,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev);
|
|||
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
u32 ip_instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring);
|
||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||
|
|
|
@ -30,90 +30,6 @@
|
|||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
u32 ip_instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring)
|
||||
{
|
||||
/* Right now all IPs have only one instance - multiple rings. */
|
||||
if (ip_instance != 0) {
|
||||
DRM_ERROR("invalid ip instance: %d\n", ip_instance);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (ip_type) {
|
||||
default:
|
||||
DRM_ERROR("unknown ip type: %d\n", ip_type);
|
||||
return -EINVAL;
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
if (ring < adev->gfx.num_gfx_rings) {
|
||||
*out_ring = &adev->gfx.gfx_ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d gfx rings are supported now\n",
|
||||
adev->gfx.num_gfx_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
if (ring < adev->gfx.num_compute_rings) {
|
||||
*out_ring = &adev->gfx.compute_ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d compute rings are supported now\n",
|
||||
adev->gfx.num_compute_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
if (ring < adev->sdma.num_instances) {
|
||||
*out_ring = &adev->sdma.instance[ring].ring;
|
||||
} else {
|
||||
DRM_ERROR("only %d SDMA rings are supported\n",
|
||||
adev->sdma.num_instances);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
*out_ring = &adev->uvd.ring;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
if (ring < adev->vce.num_rings){
|
||||
*out_ring = &adev->vce.ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
if (ring < adev->uvd.num_enc_rings){
|
||||
*out_ring = &adev->uvd.ring_enc[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d UVD ENC rings are supported\n",
|
||||
adev->uvd.num_enc_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
*out_ring = &adev->vcn.ring_dec;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
if (ring < adev->vcn.num_enc_rings){
|
||||
*out_ring = &adev->vcn.ring_enc[ring];
|
||||
} else {
|
||||
DRM_ERROR("only %d VCN ENC rings are supported\n",
|
||||
adev->vcn.num_enc_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(*out_ring && (*out_ring)->adev)) {
|
||||
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
||||
ring, ip_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||
struct drm_amdgpu_cs_chunk_fence *data,
|
||||
uint32_t *offset)
|
||||
|
@ -928,9 +844,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance, chunk_ib->ring,
|
||||
&ring);
|
||||
r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance, chunk_ib->ring, &ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1032,16 +947,19 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||
struct amdgpu_ctx *ctx;
|
||||
struct dma_fence *fence;
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
|
||||
deps[j].ip_instance,
|
||||
deps[j].ring, &ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
|
||||
if (ctx == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
|
||||
deps[j].ip_type,
|
||||
deps[j].ip_instance,
|
||||
deps[j].ring, &ring);
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
fence = amdgpu_ctx_get_fence(ctx, ring,
|
||||
deps[j].handle);
|
||||
if (IS_ERR(fence)) {
|
||||
|
@ -1177,15 +1095,19 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||
return -ENODEV;
|
||||
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
|
||||
if (ctx == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
|
||||
wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &ring);
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
|
||||
if (IS_ERR(fence))
|
||||
r = PTR_ERR(fence);
|
||||
|
@ -1221,15 +1143,17 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
|
|||
struct dma_fence *fence;
|
||||
int r;
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
|
||||
user->ring, &ring);
|
||||
if (r)
|
||||
return ERR_PTR(r);
|
||||
|
||||
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
|
||||
if (ctx == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
|
||||
user->ip_instance, user->ring, &ring);
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
|
||||
amdgpu_ctx_put(ctx);
|
||||
|
||||
|
|
|
@ -62,6 +62,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
|
|||
goto failed;
|
||||
}
|
||||
|
||||
r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
|
||||
if (r)
|
||||
goto failed;
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
|
@ -90,6 +94,8 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
|||
for (i = 0; i < adev->num_rings; i++)
|
||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
|
||||
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||
|
|
|
@ -0,0 +1,251 @@
|
|||
/*
|
||||
* Copyright 2017 Valve Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Andres Rodriguez
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ring.h"
|
||||
|
||||
static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
|
||||
int hw_ip)
|
||||
{
|
||||
if (!mapper)
|
||||
return -EINVAL;
|
||||
|
||||
if (hw_ip > AMDGPU_MAX_IP_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
mapper->hw_ip = hw_ip;
|
||||
mutex_init(&mapper->lock);
|
||||
|
||||
memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
|
||||
int ring)
|
||||
{
|
||||
return mapper->queue_map[ring];
|
||||
}
|
||||
|
||||
static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
|
||||
int ring, struct amdgpu_ring *pring)
|
||||
{
|
||||
if (WARN_ON(mapper->queue_map[ring])) {
|
||||
DRM_ERROR("Un-expected ring re-map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mapper->queue_map[ring] = pring;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_identity_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mapper *mapper,
|
||||
int ring,
|
||||
struct amdgpu_ring **out_ring)
|
||||
{
|
||||
switch (mapper->hw_ip) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
*out_ring = &adev->gfx.gfx_ring[ring];
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
*out_ring = &adev->gfx.compute_ring[ring];
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
*out_ring = &adev->sdma.instance[ring].ring;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
*out_ring = &adev->uvd.ring;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
*out_ring = &adev->vce.ring[ring];
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
*out_ring = &adev->uvd.ring_enc[ring];
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
*out_ring = &adev->vcn.ring_dec;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
*out_ring = &adev->vcn.ring_enc[ring];
|
||||
break;
|
||||
default:
|
||||
*out_ring = NULL;
|
||||
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return amdgpu_update_cached_map(mapper, ring, *out_ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @mgr: amdgpu_queue_mgr structure holding queue information
|
||||
*
|
||||
* Initialize the the selected @mgr (all asics).
|
||||
*
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mgr *mgr)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
if (!adev || !mgr)
|
||||
return -EINVAL;
|
||||
|
||||
memset(mgr, 0, sizeof(*mgr));
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
|
||||
r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @mgr: amdgpu_queue_mgr structure holding queue information
|
||||
*
|
||||
* De-initialize the the selected @mgr (all asics).
|
||||
*
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mgr *mgr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @mgr: amdgpu_queue_mgr structure holding queue information
|
||||
* @hw_ip: HW IP enum
|
||||
* @instance: HW instance
|
||||
* @ring: user ring id
|
||||
* @our_ring: pointer to mapped amdgpu_ring
|
||||
*
|
||||
* Map a userspace ring id to an appropriate kernel ring. Different
|
||||
* policies are configurable at a HW IP level.
|
||||
*
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_queue_mgr *mgr,
|
||||
int hw_ip, int instance, int ring,
|
||||
struct amdgpu_ring **out_ring)
|
||||
{
|
||||
int r, ip_num_rings;
|
||||
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
|
||||
|
||||
if (!adev || !mgr || !out_ring)
|
||||
return -EINVAL;
|
||||
|
||||
if (hw_ip >= AMDGPU_MAX_IP_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
if (ring >= AMDGPU_MAX_RINGS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Right now all IPs have only one instance - multiple rings. */
|
||||
if (instance != 0) {
|
||||
DRM_ERROR("invalid ip instance: %d\n", instance);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (hw_ip) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
ip_num_rings = adev->gfx.num_gfx_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
ip_num_rings = adev->gfx.num_compute_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
ip_num_rings = adev->sdma.num_instances;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
ip_num_rings = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
ip_num_rings = adev->vce.num_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
ip_num_rings = adev->uvd.num_enc_rings;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
ip_num_rings = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
ip_num_rings = adev->vcn.num_enc_rings;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown ip type: %d\n", hw_ip);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ring >= ip_num_rings) {
|
||||
DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
|
||||
ring, ip_num_rings, hw_ip);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&mapper->lock);
|
||||
|
||||
*out_ring = amdgpu_get_cached_map(mapper, ring);
|
||||
if (*out_ring) {
|
||||
/* cache hit */
|
||||
r = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (mapper->hw_ip) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
|
||||
break;
|
||||
default:
|
||||
*out_ring = NULL;
|
||||
r = -EINVAL;
|
||||
DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&mapper->lock);
|
||||
return r;
|
||||
}
|
Загрузка…
Ссылка в новой задаче