UAPI Changes:
 -dma-buf: Introduce and revert dma-buf heap (Andrew/John/Sean)
 
 Cross-subsystem Changes:
 - None
 
 Core Changes:
 -dma-buf: add dynamic mapping to allow exporters to choose dma_resv lock
 	  state on mmap/munmap (Christian)
 -vram: add prepare/cleanup fb helpers to vram helpers (Thomas)
 -ttm: always keep bo's on the lru + ttm cleanups (Christian)
 -sched: allow a free_job routine to sleep (Steven)
 -fb_helper: remove unused drm_fb_helper_defio_init() (Thomas)
 
 Driver Changes:
 -bochs/hibmc/vboxvideo: Use new vram helpers for prepare/cleanup fb (Thomas)
 -amdgpu: Implement dma-buf import/export without drm helpers (Christian)
 -panfrost: Simplify devfreq integration in driver (Steven)
 
 Cc: Christian König <christian.koenig@amd.com>
 Cc: Thomas Zimmermann <tzimmermann@suse.de>
 Cc: Steven Price <steven.price@arm.com>
 Cc: Andrew F. Davis <afd@ti.com>
 Cc: John Stultz <john.stultz@linaro.org>
 Cc: Sean Paul <seanpaul@chromium.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEHF6rntfJ3enn8gh8cywAJXLcr3kFAl27NYYACgkQcywAJXLc
 r3k7SQgAy7MbT7MHg1NkObnWFKwbNxA0FuRV7HVuQ5AiVA5fbehGECNVI1hFZE3D
 kCyL5zRgzfrbBjI9zqclonXPmjPbD//f+ufUZJ2EaWHfE5k7LUYIEunpgWlCEgUR
 qqeuqjvx2+NY3gLZW6D8BIrUW79cKbggBvDa9QeE4aoMiI7/F3lpNog5LNo7TWCQ
 4SGgpDqtcJ2eSBneX80zLLVnI1CKfCSiWheZVoqCTRN5bfUftGwhbXb3N/JipG37
 cCblVR7D8FR7z0MQUtq2ql76tCn5SJJqloujkmUU06quYBHR5K1deB5BP+8HI1Fw
 /XMqWHFo0uX7h4hFIt1MVIJ92U+b+w==
 =ViNo
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2019-10-31' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.5:

UAPI Changes:
-dma-buf: Introduce and revert dma-buf heap (Andrew/John/Sean)

Cross-subsystem Changes:
- None

Core Changes:
-dma-buf: add dynamic mapping to allow exporters to choose dma_resv lock
	  state on mmap/munmap (Christian)
-vram: add prepare/cleanup fb helpers to vram helpers (Thomas)
-ttm: always keep bo's on the lru + ttm cleanups (Christian)
-sched: allow a free_job routine to sleep (Steven)
-fb_helper: remove unused drm_fb_helper_defio_init() (Thomas)

Driver Changes:
-bochs/hibmc/vboxvideo: Use new vram helpers for prepare/cleanup fb (Thomas)
-amdgpu: Implement dma-buf import/export without drm helpers (Christian)
-panfrost: Simplify devfreq integration in driver (Steven)

Cc: Christian König <christian.koenig@amd.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Steven Price <steven.price@arm.com>
Cc: Andrew F. Davis <afd@ti.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Sean Paul <seanpaul@chromium.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20191031193015.GA243509@art_vandelay
This commit is contained in:
Dave Airlie 2019-11-04 09:27:41 +10:00
Родитель 57c2af791b fae7d7d5f3
Коммит 633aa7e53a
62 изменённых файлов: 711 добавлений и 735 удалений

Просмотреть файл

@ -118,13 +118,13 @@ Kernel Functions and Structures Reference
Reservation Objects
-------------------
.. kernel-doc:: drivers/dma-buf/reservation.c
.. kernel-doc:: drivers/dma-buf/dma-resv.c
:doc: Reservation Object Overview
.. kernel-doc:: drivers/dma-buf/reservation.c
.. kernel-doc:: drivers/dma-buf/dma-resv.c
:export:
.. kernel-doc:: include/linux/reservation.h
.. kernel-doc:: include/linux/dma-resv.h
:internal:
DMA Fences

Просмотреть файл

@ -206,10 +206,10 @@ Generic fbdev defio support
---------------------------
The defio support code in the fbdev core has some very specific requirements,
which means drivers need to have a special framebuffer for fbdev. Which prevents
us from using the generic fbdev emulation code everywhere. The main issue is
that it uses some fields in struct page itself, which breaks shmem gem objects
(and other things).
which means drivers need to have a special framebuffer for fbdev. The main
issue is that it uses some fields in struct page itself, which breaks shmem
gem objects (and other things). To support defio, affected drivers require
the use of a shadow buffer, which may add CPU and memory overhead.
Possible solution would be to write our own defio mmap code in the drm fbdev
emulation. It would need to fully wrap the existing mmap ops, forwarding

Просмотреть файл

@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
mutex_lock(&dmabuf->lock);
dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
mutex_unlock(&dmabuf->lock);
dma_resv_unlock(dmabuf->resv);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name))
return PTR_ERR(name);
mutex_lock(&dmabuf->lock);
dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
dmabuf->name = name;
out_unlock:
mutex_unlock(&dmabuf->lock);
dma_resv_unlock(dmabuf->resv);
return ret;
}
@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
mutex_lock(&dmabuf->lock);
dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
mutex_unlock(&dmabuf->lock);
dma_resv_unlock(dmabuf->resv);
}
static const struct file_operations dma_buf_fops = {
@ -525,6 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
exp_info->ops->dynamic_mapping))
return ERR_PTR(-EINVAL);
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
@ -645,10 +649,11 @@ void dma_buf_put(struct dma_buf *dmabuf)
EXPORT_SYMBOL_GPL(dma_buf_put);
/**
* dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
* dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
* calls attach() of dma_buf_ops to allow device-specific attach functionality
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
* @dynamic_mapping: [in] calling convention for map/unmap
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@ -662,8 +667,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* accessible to @dev, and cannot be moved to a more suitable place. This is
* indicated with the error code -EBUSY.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
bool dynamic_mapping)
{
struct dma_buf_attachment *attach;
int ret;
@ -677,24 +683,68 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
attach->dev = dev;
attach->dmabuf = dmabuf;
mutex_lock(&dmabuf->lock);
attach->dynamic_mapping = dynamic_mapping;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
mutex_unlock(&dmabuf->lock);
/* When either the importer or the exporter can't handle dynamic
* mappings we cache the mapping here to avoid issues with the
* reservation object lock.
*/
if (dma_buf_attachment_is_dynamic(attach) !=
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto err_unlock;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
attach->sgt = sgt;
attach->dir = DMA_BIDIRECTIONAL;
}
return attach;
err_attach:
kfree(attach);
mutex_unlock(&dmabuf->lock);
return ERR_PTR(ret);
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
*
* Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
* mapping.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
{
return dma_buf_dynamic_attach(dmabuf, dev, false);
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
@ -711,15 +761,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
if (attach->sgt)
if (attach->sgt) {
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
mutex_lock(&dmabuf->lock);
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
}
dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
mutex_unlock(&dmabuf->lock);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@ -749,6 +806,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
if (dma_buf_attachment_is_dynamic(attach))
dma_resv_assert_held(attach->dmabuf->resv);
if (attach->sgt) {
/*
* Two mappings with different directions for the same
@ -761,6 +821,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@ -793,9 +856,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
if (dma_buf_attachment_is_dynamic(attach))
dma_resv_assert_held(attach->dmabuf->resv);
if (attach->sgt == sg_table)
return;
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@ -1171,13 +1240,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
seq_puts(s,
"\tERROR locking buffer object: skipping\n");
continue;
}
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
goto error_unlock;
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
@ -1223,19 +1289,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
dma_resv_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
size += buf_obj->size;
mutex_unlock(&buf_obj->lock);
}
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
mutex_unlock(&db_list.lock);
return 0;
error_unlock:
mutex_unlock(&db_list.lock);
return ret;
}
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);

Просмотреть файл

@ -613,7 +613,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates, true);
false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@ -686,7 +686,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates, true);
false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@ -1805,8 +1805,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
true);
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@ -2004,7 +2003,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
false, &duplicate_save, true);
false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;

Просмотреть файл

@ -581,7 +581,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates, false);
&duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");

Просмотреть файл

@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;

Просмотреть файл

@ -34,26 +34,11 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
* implementation
* @obj: GEM buffer object (BO)
*
* Returns:
* A scatter/gather table for the pinned pages of the BO's memory.
*/
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int npages = bo->tbo.num_pages;
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
@ -179,92 +164,126 @@ err_fences_put:
}
/**
* amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
* @dmabuf: DMA-buf where we attach to
* @attach: attachment to add
*
* Add the attachment as user to the exported DMA-buf.
*/
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
if (attach->dev->driver == adev->dev->driver)
return 0;
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
return r;
/*
* We only create shared fences for internal use, but importers
* of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards. As any of the current fences may
* correspond to a write, we need to convert all existing
* fences on the reservation object into a single exclusive
* fence.
*/
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
return r;
bo->prime_shared_count++;
amdgpu_bo_unreserve(bo);
return 0;
}
/**
* amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
*
* @dmabuf: DMA-buf where we remove the attachment from
* @attach: the attachment to remove
*
* Called when an attachment is removed from the DMA-buf.
*/
static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
}
/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
* @dir: DMA direction
*
* Makes sure that the shared DMA buffer can be accessed by the target device.
* For now, simply pins it to the GTT domain, where it should be accessible by
* all DMA devices.
*
* Returns:
* 0 on success or a negative error code on failure.
* sg_table filled with the DMA addresses to use or ERR_PRT with negative error
* code.
*/
static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct sg_table *sgt;
long r;
r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
goto error_detach;
if (attach->dev->driver != adev->dev->driver) {
/*
* We only create shared fences for internal use, but importers
* of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards. As any of the current fences may
* correspond to a write, we need to convert all existing
* fences on the reservation object into a single exclusive
* fence.
*/
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
goto error_unreserve;
}
/* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error_unreserve;
return ERR_PTR(r);
if (attach->dev->driver != adev->dev->driver)
bo->prime_shared_count++;
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
return sgt;
error_unreserve:
amdgpu_bo_unreserve(bo);
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC))
goto error_free;
error_detach:
if (r)
drm_gem_map_detach(dma_buf, attach);
return r;
return sgt;
error_free:
sg_free_table(sgt);
kfree(sgt);
return ERR_PTR(-ENOMEM);
}
/**
* amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
* @dma_buf: Shared DMA buffer
* amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
* @attach: DMA-buf attachment
* @sgt: sg_table to unmap
* @dir: DMA direction
*
* This is called when a shared DMA buffer no longer needs to be accessible by
* another device. For now, simply unpins the buffer from GTT.
*/
static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int ret = 0;
ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0))
goto error;
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
amdgpu_bo_unpin(bo);
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
error:
drm_gem_map_detach(dma_buf, attach);
}
/**
@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_map_attach,
.detach = amdgpu_dma_buf_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
@ -349,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
}
/**
* amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
* implementation
* @dev: DRM device
* @attach: DMA-buf attachment
* @sg: Scatter/gather table
* amdgpu_dma_buf_create_obj - create BO for DMA-buf import
*
* Imports shared DMA buffer memory exported by another device.
* @dev: DRM device
* @dma_buf: DMA-buf
*
* Creates an empty SG BO for DMA-buf import.
*
* Returns:
* A new GEM BO of the given DRM device, representing the memory
* described by the given DMA-buf attachment and scatter/gather table.
*/
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
static struct drm_gem_object *
amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
struct dma_resv *resv = attach->dmabuf->resv;
struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
memset(&bp, 0, sizeof(bp));
bp.size = attach->dmabuf->size;
bp.size = dma_buf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
@ -384,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto error;
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
@ -404,15 +419,15 @@ error:
* @dev: DRM device
* @dma_buf: Shared DMA buffer
*
* The main work is done by the &drm_gem_prime_import helper, which in turn
* uses &amdgpu_gem_prime_import_sg_table.
* Import a dma_buf into a the driver and potentially create a new GEM object.
*
* Returns:
* GEM BO representing the shared DMA buffer for the given device.
*/
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
@ -427,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
}
}
return drm_gem_prime_import(dev, dma_buf);
obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
if (IS_ERR(obj))
return obj;
attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);
}
get_dma_buf(dma_buf);
obj->import_attach = attach;
return obj;
}

Просмотреть файл

@ -25,11 +25,6 @@
#include <drm/drm_gem.h>
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,

Просмотреть файл

@ -1381,8 +1381,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,

Просмотреть файл

@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;

Просмотреть файл

@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;

Просмотреть файл

@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
#include <linux/dma-buf.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@ -764,6 +765,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
@ -1228,6 +1230,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
@ -1248,7 +1251,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
@ -1261,7 +1263,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return 0;
}
if (slave && ttm->sg) {
if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
if (!ttm->sg) {
struct dma_buf_attachment *attach;
struct sg_table *sgt;
attach = gtt->gobj->import_attach;
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
ttm->sg = sgt;
}
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
@ -1288,9 +1302,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
struct amdgpu_device *adev;
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@ -1299,7 +1312,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
}
if (slave)
if (ttm->sg && gtt->gobj->import_attach) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
ttm->sg = NULL;
return;
}
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
adev = amdgpu_ttm_adev(ttm->bdev);

Просмотреть файл

@ -610,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@ -634,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move);
}
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true;
}

Просмотреть файл

@ -4596,7 +4596,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;

Просмотреть файл

@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
}
}
static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_state)
{
struct drm_gem_vram_object *gbo;
if (!new_state->fb)
return 0;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
}
static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_gem_vram_object *gbo;
if (!old_state->fb)
return;
gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.update = bochs_pipe_update,
.prepare_fb = bochs_pipe_prepare_fb,
.cleanup_fb = bochs_pipe_cleanup_fb,
.prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
.cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)

Просмотреть файл

@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
/* cirrus (simple) display pipe */
static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)

Просмотреть файл

@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
mb();
mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
mb();
mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
mb();
mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
mb();
mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
mb();
mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}

Просмотреть файл

@ -92,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
* It will automatically set up deferred I/O if the driver requires a shadow
* buffer.
*
* Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
* down by calling drm_fb_helper_fbdev_teardown().
* For other drivers, setup fbdev emulation by calling
* drm_fb_helper_fbdev_setup() and tear it down by calling
* drm_fb_helper_fbdev_teardown().
*
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
@ -127,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
* mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
* deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
* mmap page writes.
*
* Deferred I/O is not compatible with SHMEM. Such drivers should request an
* fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
@ -679,49 +684,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
* drm_fb_helper_defio_init - fbdev deferred I/O initialization
* @fb_helper: driver-allocated fbdev helper
*
* This function allocates &fb_deferred_io, sets callback to
* drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
* It should be called from the &drm_fb_helper_funcs->fb_probe callback.
* drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
*
* NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
* because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
* affect other instances of that &fb_ops.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
{
struct fb_info *info = fb_helper->fbdev;
struct fb_deferred_io *fbdefio;
struct fb_ops *fbops;
fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
if (!fbdefio || !fbops) {
kfree(fbdefio);
kfree(fbops);
return -ENOMEM;
}
info->fbdefio = fbdefio;
fbdefio->delay = msecs_to_jiffies(50);
fbdefio->deferred_io = drm_fb_helper_deferred_io;
*fbops = *info->fbops;
info->fbops = fbops;
fb_deferred_io_init(info);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_defio_init);
/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
@ -2356,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* Drivers that set the dirty callback on their framebuffer will get a shadow
* fbdev buffer that is blitted onto the real buffer. This is done in order to
* make deferred I/O work with all kinds of buffers.
* make deferred I/O work with all kinds of buffers. A shadow buffer can be
* requested explicitly by setting struct drm_mode_config.prefer_shadow or
* struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
* required to use generic fbdev emulation with SHMEM helpers.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.

Просмотреть файл

@ -1106,6 +1106,9 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
return -EINVAL;
if (obj->funcs && obj->funcs->mmap) {
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
ret = obj->funcs->mmap(obj, vma);
if (ret)
return ret;

Просмотреть файл

@ -541,9 +541,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);

Просмотреть файл

@ -3,10 +3,13 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@ -646,6 +649,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
}
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
* Helpers for struct drm_plane_helper_funcs
*/
/**
* drm_gem_vram_plane_helper_prepare_fb() - \
* Implements &struct drm_plane_helper_funcs.prepare_fb
* @plane: a DRM plane
* @new_state: the plane's new state
*
* During plane updates, this function pins the GEM VRAM
* objects of the plane's new framebuffer to VRAM. Call
* drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
int
drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
size_t i;
struct drm_gem_vram_object *gbo;
int ret;
if (!new_state->fb)
return 0;
for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
if (!new_state->fb->obj[i])
continue;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto err_drm_gem_vram_unpin;
}
return 0;
err_drm_gem_vram_unpin:
while (i) {
--i;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
drm_gem_vram_unpin(gbo);
}
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
/**
* drm_gem_vram_plane_helper_cleanup_fb() - \
* Implements &struct drm_plane_helper_funcs.cleanup_fb
* @plane: a DRM plane
* @old_state: the plane's old state
*
* During plane updates, this function unpins the GEM VRAM
* objects of the plane's old framebuffer from VRAM. Complements
* drm_gem_vram_plane_helper_prepare_fb().
*/
void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
size_t i;
struct drm_gem_vram_object *gbo;
if (!old_state->fb)
return;
for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
if (!old_state->fb->obj[i])
continue;
gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
drm_gem_vram_unpin(gbo);
}
}
EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
/**
* drm_gem_vram_simple_display_pipe_prepare_fb() - \
* Implements &struct drm_simple_display_pipe_funcs.prepare_fb
* @pipe: a simple display pipe
* @new_state: the plane's new state
*
* During plane updates, this function pins the GEM VRAM
* objects of the plane's new framebuffer to VRAM. Call
* drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
int drm_gem_vram_simple_display_pipe_prepare_fb(
struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_state)
{
return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
}
EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
/**
* drm_gem_vram_simple_display_pipe_cleanup_fb() - \
* Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
* @pipe: a simple display pipe
* @old_state: the plane's old state
*
* During plane updates, this function unpins the GEM VRAM
* objects of the plane's old framebuffer from VRAM. Complements
* drm_gem_vram_simple_display_pipe_prepare_fb().
*/
void drm_gem_vram_simple_display_pipe_cleanup_fb(
struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
}
EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
/*
* PRIME helpers
*/
@ -887,12 +1013,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
struct ttm_bo_global *glob = vmm->bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}

Просмотреть файл

@ -43,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
/* Anything goes */
return MODE_OK;
return pipe->funcs->mode_valid(crtc, mode);
return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,

Просмотреть файл

@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
return;
}
/*create a new connetor*/
/*create a new connector*/
dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
if (!dsi_connector) {
DRM_ERROR("No memory");

Просмотреть файл

@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
u32 reg;
int ret;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
hibmc_fb = to_hibmc_framebuffer(state->fb);
gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret) {
DRM_ERROR("failed to pin bo: %d", ret);
return;
}
gpu_addr = drm_gem_vram_offset(gbo);
if (gpu_addr < 0) {
drm_gem_vram_unpin(gbo);
return;
}
if (WARN_ON_ONCE(gpu_addr < 0))
return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = {
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
.prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
.cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};

Просмотреть файл

@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
pgprot_writecombine(PAGE_KERNEL));
out:
kfree((void *)sgt);
kfree(sgt);
return mtk_gem->kvaddr;
}
@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
vunmap(vaddr);
mtk_gem->kvaddr = 0;
kfree((void *)mtk_gem->pages);
kfree(mtk_gem->pages);
}

Просмотреть файл

@ -13,97 +13,42 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
struct dev_pm_opp *opp;
unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
unsigned long target_volt, target_rate;
struct panfrost_device *pfdev = dev_get_drvdata(dev);
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
target_rate = dev_pm_opp_get_freq(opp);
target_volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
if (old_clk_rate == target_rate)
return 0;
/*
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
*/
if (old_clk_rate < target_rate) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err) {
dev_err(dev, "Cannot set voltage %lu uV\n",
target_volt);
return err;
}
}
err = clk_set_rate(pfdev->clock, target_rate);
if (err) {
dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
err);
regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
pfdev->devfreq.cur_volt);
err = dev_pm_opp_set_rate(dev, *freq);
if (err)
return err;
}
if (old_clk_rate > target_rate) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err)
dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
}
pfdev->devfreq.cur_freq = target_rate;
pfdev->devfreq.cur_volt = target_volt;
*freq = clk_get_rate(pfdev->clock);
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
ktime_t now = ktime_get();
int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
pfdev->devfreq.slot[i].busy_time = 0;
pfdev->devfreq.slot[i].idle_time = 0;
pfdev->devfreq.slot[i].time_last_update = now;
}
pfdev->devfreq.busy_time = 0;
pfdev->devfreq.idle_time = 0;
pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
int i;
struct panfrost_device *pfdev = dev_get_drvdata(dev);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
panfrost_devfreq_update_utilization(pfdev, i);
}
panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
pfdev->devfreq.slot[0].idle_time));
status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
pfdev->devfreq.idle_time));
status->busy_time = 0;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
}
/* We're scheduling only to one core atm, so don't divide for now */
/* status->busy_time /= NUM_JOB_SLOTS; */
status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@ -119,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
*freq = pfdev->devfreq.cur_freq;
*freq = clk_get_rate(pfdev->clock);
return 0;
}
@ -135,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
unsigned long cur_freq;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
@ -144,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
cur_freq = clk_get_rate(pfdev->clock);
opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
@ -174,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
int i;
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@ -194,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@ -204,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
last = pfdev->devfreq.slot[slot].time_last_update;
last = pfdev->devfreq.time_last_update;
/* If we last recorded a transition to busy, we have been idle since */
if (devfreq_slot->busy)
pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
if (atomic_read(&pfdev->devfreq.busy_count) > 0)
pfdev->devfreq.busy_time += ktime_sub(now, last);
else
pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
pfdev->devfreq.idle_time += ktime_sub(now, last);
pfdev->devfreq.slot[slot].time_last_update = now;
pfdev->devfreq.time_last_update = now;
}
/* The job scheduler is expected to call this at every transition busy <-> idle */
void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
{
struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
panfrost_devfreq_update_utilization(pfdev, slot);
devfreq_slot->busy = !devfreq_slot->busy;
panfrost_devfreq_update_utilization(pfdev);
atomic_inc(&pfdev->devfreq.busy_count);
}
void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
int count;
panfrost_devfreq_update_utilization(pfdev);
count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
WARN_ON(count < 0);
}

Просмотреть файл

@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */

Просмотреть файл

@ -51,13 +51,6 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
struct panfrost_devfreq_slot {
ktime_t busy_time;
ktime_t idle_time;
ktime_t time_last_update;
bool busy;
};
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@ -93,9 +86,10 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
unsigned long cur_freq;
unsigned long cur_volt;
struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
ktime_t busy_time;
ktime_t idle_time;
ktime_t time_last_update;
atomic_t busy_count;
} devfreq;
};

Просмотреть файл

@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
panfrost_devfreq_record_transition(pfdev, js);
panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@ -404,7 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
panfrost_devfreq_record_transition(pfdev, js);
panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
@ -467,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
panfrost_devfreq_record_transition(pfdev, j);
panfrost_devfreq_record_idle(pfdev);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@ -568,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
/* Check whether the hardware is idle */
if (atomic_read(&pfdev->devfreq.busy_count))
return false;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
/* Check whether the hardware is idle */
if (pfdev->devfreq.slot[i].busy)
return false;
}
return true;

Просмотреть файл

@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data)
}
static enum drm_mode_status
pl111_mode_valid(struct drm_crtc *crtc,
pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
struct drm_device *drm = crtc->dev;
struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = priv->variant->fb_bpp / 8;
u64 bw;

Просмотреть файл

@ -355,6 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
/* qxl image */

Просмотреть файл

@ -167,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
@ -179,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
@ -212,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
return;
fallback:
qxl_bo_kunmap(bo);

Просмотреть файл

@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
!no_intr, NULL, true);
!no_intr, NULL);
if (ret)
return ret;
@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
glob = bdev->glob;
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
ttm_bo_add_to_lru(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}

Просмотреть файл

@ -110,8 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct qxl_device *qdev = qxl_get_qdev(bdev);
@ -319,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct qxl_device *rdev = dev->dev_private;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
#endif

Просмотреть файл

@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;

Просмотреть файл

@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}

Просмотреть файл

@ -622,43 +622,41 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
* drm_sched_cleanup_jobs - destroy finished jobs
* drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
* Remove all finished jobs from the mirror list and destroy them.
* Returns the next finished job from the mirror list (if there is one)
* ready for it to be destroyed.
*/
static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
unsigned long flags;
/* Don't destroy jobs while the timeout worker is running */
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr))
return;
return NULL;
spin_lock_irqsave(&sched->job_list_lock, flags);
while (!list_empty(&sched->ring_mirror_list)) {
struct drm_sched_job *job;
job = list_first_entry(&sched->ring_mirror_list,
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
if (!dma_fence_is_signaled(&job->s_fence->finished))
break;
spin_lock_irqsave(&sched->job_list_lock, flags);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&job->node);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched->ops->free_job(job);
} else {
job = NULL;
/* queue timeout for next job */
drm_sched_start_timeout(sched);
}
/* queue timeout for next job */
spin_lock_irqsave(&sched->job_list_lock, flags);
drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
return job;
}
/**
@ -698,12 +696,19 @@ static int drm_sched_main(void *param)
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
(drm_sched_cleanup_jobs(sched),
(cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop()));
kthread_should_stop());
if (cleanup_job) {
sched->ops->free_job(cleanup_job);
/* queue timeout for next job */
drm_sched_start_timeout(sched);
}
if (!entity)
continue;

Просмотреть файл

@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);

Просмотреть файл

@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob;
EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = {
.name = "bo_count",
@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref));
BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->bdev->glob->bo_count);
atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@ -188,23 +187,17 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
ttm_bo_add_mem_to_lru(bo, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@ -224,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
@ -248,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@ -311,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
lru = &pos->first->bdev->glob->swap_lru[i];
lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@ -475,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@ -485,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
goto error;
}
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
if (bo->base.resv != &bo->base._resv)
dma_resv_unlock(&bo->base._resv);
@ -512,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
ttm_bo_add_to_lru(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
@ -523,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
@ -546,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
struct ttm_bo_global *glob = bo->bdev->glob;
struct dma_resv *resv;
int ret;
@ -565,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
@ -576,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@ -586,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
ret = 0;
@ -595,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@ -603,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@ -618,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed;
bool empty;
@ -842,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@ -880,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
kref_put(&busy_bo->list_kref, ttm_bo_release_list);
@ -896,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
if (locked) {
if (locked)
ttm_bo_unreserve(bo);
} else {
spin_lock(&glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&glob->lru_lock);
}
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@ -1072,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, mem);
spin_unlock(&bo->bdev->glob->lru_lock);
}
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, mem);
spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@ -1168,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
spin_lock(&bo->bdev->glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
}
return ret;
@ -1294,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@ -1323,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@ -1357,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
atomic_inc(&bo->bdev->glob->bo_count);
atomic_inc(&ttm_bo_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@ -1387,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&bdev->glob->lru_lock);
}
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@ -1489,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@ -1658,8 +1627,6 @@ static int ttm_bo_global_init(void)
goto out;
spin_lock_init(&glob->lru_lock);
glob->mem_glob = &ttm_mem_glob;
glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@ -1683,10 +1650,10 @@ out:
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@ -1755,7 +1722,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@ -1835,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
int ret = 0;
/*
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
ret = ttm_bo_reserve(bo, true, no_wait, NULL);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_wait(bo, true, no_wait);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
@ -1959,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false
};
while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
;
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);

Просмотреть файл

@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@ -153,7 +151,6 @@ retry:
}
return ret;
}
EXPORT_SYMBOL(ttm_mem_io_reserve);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
atomic_inc(&bo->bdev->glob->bo_count);
atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(fbo->base.base.resv);
ret = dma_resv_trylock(fbo->base.base.resv);
if (bo->base.resv == &bo->base._resv)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
ttm_bo_unreserve(ghost);
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;

Просмотреть файл

@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
if (bo->moving != moving) {
spin_lock(&bdev->glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bdev->glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
}
dma_fence_put(moving);
}
@ -480,6 +480,13 @@ EXPORT_SYMBOL(ttm_bo_mmap);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
ttm_bo_get(bo);
/*
* FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
* removed. Add it back here until the rest of TTM works without it.
*/
vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}

Просмотреть файл

@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
}
}
static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
ttm_bo_del_from_lru(bo);
}
}
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
if (list_empty(list))
return;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->bdev->glob;
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups, bool del_lru)
struct list_head *dups)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->bdev->glob;
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
dma_resv_unlock(bo->base.resv);
ret = -EBUSY;
} else if (ret == -EALREADY && dups) {
if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
if (del_lru) {
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
}
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
struct ttm_buffer_object *bo = entry->bo;
if (entry->num_shared)
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}

Просмотреть файл

@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
ret = ttm_bo_swapout(glob->bo_glob, ctx);
ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;

Просмотреть файл

@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
if (mem_count_update == 0)
@ -1049,7 +1049,7 @@ put_pages:
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
int ret;

Просмотреть файл

@ -886,8 +886,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@ -991,8 +991,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;

Просмотреть файл

@ -334,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
old_state->src_y >> 16);
}
static int vbox_primary_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_gem_vram_object *gbo;
int ret;
if (!new_state->fb)
return 0;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
return ret;
}
static void vbox_primary_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_gem_vram_object *gbo;
if (!old_state->fb)
return;
gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
static int vbox_cursor_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@ -492,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
mutex_unlock(&vbox->hw_mutex);
}
static int vbox_cursor_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_gem_vram_object *gbo;
if (!new_state->fb)
return 0;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
}
static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_gem_vram_object *gbo;
if (!plane->state->fb)
return;
gbo = drm_gem_vram_of_gem(plane->state->fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
static const u32 vbox_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@ -524,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
.prepare_fb = vbox_cursor_prepare_fb,
.cleanup_fb = vbox_cursor_cleanup_fb,
.prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
.cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
@ -546,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
.atomic_check = vbox_primary_atomic_check,
.atomic_update = vbox_primary_atomic_update,
.atomic_disable = vbox_primary_atomic_disable,
.prepare_fb = vbox_primary_prepare_fb,
.cleanup_fb = vbox_primary_cleanup_fb,
.prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
.cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
@ -890,7 +837,7 @@ static int vbox_connector_init(struct drm_device *dev,
}
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = drm_gem_fb_create,
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};

Просмотреть файл

@ -566,7 +566,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
ttm_bo_synccpu_write_release(&user_bo->vbo.base);
atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
@ -682,12 +682,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = dma_resv_wait_timeout_rcu
@ -700,15 +700,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0;
}
ret = ttm_bo_synccpu_write_grab
(bo, !!(flags & drm_vmw_synccpu_dontblock));
ret = ttm_bo_reserve(bo, true, nonblock, NULL);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0))
atomic_inc(&user_bo->vbo.cpu_writers);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->vbo.base);
atomic_dec(&user_bo->vbo.cpu_writers);
return ret;
}

Просмотреть файл

@ -102,6 +102,8 @@ struct vmw_fpriv {
* @base: The TTM buffer object
* @res_list: List of resources using this buffer object as a backing MOB
* @pin_count: pin depth
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
@ -110,6 +112,7 @@ struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
s32 pin_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */

Просмотреть файл

@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
true);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;

Просмотреть файл

@ -521,6 +521,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
};
int ret;
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
if (vbo->pin_count > 0)
return 0;

Просмотреть файл

@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
NULL, true);
NULL);
}
/**

Просмотреть файл

@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe,
}
static enum drm_mode_status
display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
display_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
struct xen_drm_front_drm_pipeline *pipeline =
container_of(crtc, struct xen_drm_front_drm_pipeline,
pipe.crtc);
container_of(pipe, struct xen_drm_front_drm_pipeline,
pipe);
if (mode->hdisplay != pipeline->width)
return MODE_ERROR;

Просмотреть файл

@ -235,7 +235,6 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);

Просмотреть файл

@ -159,8 +159,9 @@ struct drm_gem_object_funcs {
*
* The callback is used by by both drm_gem_mmap_obj() and
* drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
* used, the @mmap callback must set vma->vm_ops instead.
*
* used, the @mmap callback must set vma->vm_ops instead. The @mmap
* callback is always called with a 0 offset. The caller will remove
* the fake offset as necessary.
*/
int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);

Просмотреть файл

@ -13,6 +13,9 @@
#include <linux/kernel.h> /* for container_of() */
struct drm_mode_create_dumb;
struct drm_plane;
struct drm_plane_state;
struct drm_simple_display_pipe;
struct drm_vram_mm_funcs;
struct filp;
struct vm_area_struct;
@ -124,6 +127,28 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle, uint64_t *offset);
/*
* Helpers for struct drm_plane_helper_funcs
*/
int
drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
/*
* Helpers for struct drm_simple_display_pipe_funcs
*/
int drm_gem_vram_simple_display_pipe_prepare_fb(
struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_state);
void drm_gem_vram_simple_display_pipe_cleanup_fb(
struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
/**
* define DRM_GEM_VRAM_DRIVER - default callback functions for \
&struct drm_driver

Просмотреть файл

@ -49,7 +49,7 @@ struct drm_simple_display_pipe_funcs {
*
* drm_mode_status Enum
*/
enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode);
/**

Просмотреть файл

@ -147,7 +147,6 @@ struct ttm_tt;
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
* @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
@ -198,12 +197,6 @@ struct ttm_buffer_object {
struct ttm_tt *ttm;
bool evicted;
/**
* Members protected by the bo::reserved lock only when written to.
*/
atomic_t cpu_writers;
/**
* Members protected by the bdev::lru_lock.
*/
@ -367,30 +360,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
/**
* ttm_bo_add_to_lru
*
* @bo: The buffer object.
*
* Add this bo to the relevant mem type lru and, if it's backed by
* system pages (ttms) to the swap list.
* This function must be called with struct ttm_bo_global::lru_lock held, and
* is typically called immediately prior to unreserving a bo.
*/
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_del_from_lru
*
* @bo: The buffer object.
*
* Remove this bo from all lru lists used to lookup and reserve an object.
* This function must be called with struct ttm_bo_global::lru_lock held,
* and is usually called just immediately after the bo has been reserved to
* avoid recursive reservation from lru lists.
*/
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_move_to_lru_tail
*
@ -441,31 +410,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
* command submission that affects the buffer will return -EBUSY
* until ttm_bo_synccpu_write_release is called.
*
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
*/
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
/**
* ttm_bo_synccpu_write_release:
*
* @bo : The buffer object.
*
* Releases a synccpu lock.
*/
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
* ttm_bo_acc_size
*

Просмотреть файл

@ -423,7 +423,6 @@ extern struct ttm_bo_global {
*/
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
spinlock_t lru_lock;
@ -467,7 +466,6 @@ struct ttm_bo_device {
* Constant after bo device init / atomic.
*/
struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
@ -631,9 +629,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* __ttm_bo_reserve:
*
@ -727,15 +722,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
int ret;
WARN_ON(!kref_read(&bo->kref));
ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
return ret;
return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/**
@ -762,9 +751,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
else
dma_resv_lock_slow(bo->base.resv, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
else if (ret == -EINTR)
if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
@ -779,12 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->glob->lru_lock);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
dma_resv_unlock(bo->base.resv);
}

Просмотреть файл

@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups, bool del_lru);
struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.

Просмотреть файл

@ -65,7 +65,6 @@
struct ttm_mem_zone;
extern struct ttm_mem_global {
struct kobject kobj;
struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;

Просмотреть файл

@ -42,6 +42,18 @@ struct dma_buf_ops {
*/
bool cache_sgt_mapping;
/**
* @dynamic_mapping:
*
* If true the framework makes sure that the map/unmap_dma_buf
* callbacks are always called with the dma_resv object locked.
*
* If false the framework makes sure that the map/unmap_dma_buf
* callbacks are always called without the dma_resv object locked.
* Mutual exclusive with @cache_sgt_mapping.
*/
bool dynamic_mapping;
/**
* @attach:
*
@ -109,6 +121,9 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
* This is always called with the dmabuf->resv object locked when
* the dynamic_mapping flag is true.
*
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@ -267,14 +282,16 @@ struct dma_buf_ops {
* struct dma_buf - shared buffer object
* @size: size of the buffer
* @file: file pointer used for sharing buffers across, and for refcounting.
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @attachments: list of dma_buf_attachment that denotes all devices attached,
* protected by dma_resv lock.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and
* vmap/unmap, and accesses to name
* vmap/unmap
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
* @name: userspace-provided name; useful for accounting and debugging.
* @name: userspace-provided name; useful for accounting and debugging,
* protected by @resv.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@ -323,10 +340,12 @@ struct dma_buf {
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment.
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
* @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
* dma_resv lock held.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@ -343,6 +362,7 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
bool dynamic_mapping;
void *priv;
};
@ -394,10 +414,40 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
/**
* dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
* @dmabuf: the DMA-buf to check
*
* Returns true if a DMA-buf exporter wants to be called with the dma_resv
* locked for the map/unmap callbacks, false if it doesn't wants to be called
* with the lock held.
*/
static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
{
return dmabuf->ops->dynamic_mapping;
}
/**
* dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
* mappinsg
* @attach: the DMA-buf attachment to check
*
* Returns true if a DMA-buf importer wants to call the map/unmap functions with
* the dma_resv lock held.
*/
static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
{
return attach->dynamic_mapping;
}
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct device *dev);
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
bool dynamic_mapping);
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@ -409,6 +459,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,

Просмотреть файл

@ -68,7 +68,7 @@ struct drm_exynos_gem_info {
/**
* A structure for user connection request of virtual display.
*
* @connection: indicate whether doing connetion or not by user.
* @connection: indicate whether doing connection or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.