drm/panfrost: Add madvise and shrinker support

Add support for madvise and a shrinker similar to other drivers. This
allows userspace to mark BOs which can be freed when there is memory
pressure.

Unlike other implementations, we don't depend on struct_mutex. The
driver maintains a list of BOs which can be freed when the shrinker
is called. Access to the list is serialized with the shrinker_lock.

Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Signed-off-by: Rob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190805143358.21245-2-robh@kernel.org
This commit is contained in:
Rob Herring 2019-08-05 08:33:58 -06:00
Родитель 17acb9f35e
Коммит 013b651013
8 изменённых файлов: 180 добавлений и 2 удалений

Просмотреть файл

@ -5,6 +5,7 @@ panfrost-y := \
panfrost_device.o \
panfrost_devfreq.o \
panfrost_gem.o \
panfrost_gem_shrinker.o \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o \

Просмотреть файл

@ -14,7 +14,5 @@
- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
- Support for madvise and a shrinker.
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.

Просмотреть файл

@ -85,6 +85,10 @@ struct panfrost_device {
struct mutex sched_lock;
struct mutex reset_lock;
struct mutex shrinker_lock;
struct list_head shrinker_list;
struct shrinker shrinker;
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;

Просмотреть файл

@ -333,6 +333,38 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
mutex_lock(&pfdev->shrinker_lock);
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
}
drm_gem_object_put_unlocked(gem_obj);
return 0;
}
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@ -384,6 +416,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
@ -432,6 +465,8 @@ static int panfrost_probe(struct platform_device *pdev)
pfdev->ddev = ddev;
spin_lock_init(&pfdev->mm_lock);
mutex_init(&pfdev->shrinker_lock);
INIT_LIST_HEAD(&pfdev->shrinker_list);
/* 4G enough for now. can be 48-bit */
drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
@ -462,6 +497,8 @@ static int panfrost_probe(struct platform_device *pdev)
if (err < 0)
goto err_out1;
panfrost_gem_shrinker_init(ddev);
return 0;
err_out1:
@ -478,6 +515,7 @@ static int panfrost_remove(struct platform_device *pdev)
struct drm_device *ddev = pfdev->ddev;
drm_dev_unregister(ddev);
panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
pm_runtime_put_sync_autosuspend(pfdev->dev);
pm_runtime_disable(pfdev->dev);

Просмотреть файл

@ -26,6 +26,11 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_mm_remove_node(&bo->node);
spin_unlock(&pfdev->mm_lock);
mutex_lock(&pfdev->shrinker_lock);
if (!list_empty(&bo->base.madv_list))
list_del(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
drm_gem_shmem_free_object(obj);
}

Просмотреть файл

@ -27,4 +27,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
#endif /* __PANFROST_GEM_H__ */

Просмотреть файл

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019 Arm Ltd.
*
* Based on msm_gem_freedreno.c:
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/list.h>
#include <drm/drm_device.h>
#include <drm/drm_gem_shmem_helper.h>
#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
static unsigned long
panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct panfrost_device *pfdev =
container_of(shrinker, struct panfrost_device, shrinker);
struct drm_gem_shmem_object *shmem;
unsigned long count = 0;
if (!mutex_trylock(&pfdev->shrinker_lock))
return 0;
list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
if (drm_gem_shmem_is_purgeable(shmem))
count += shmem->base.size >> PAGE_SHIFT;
}
mutex_unlock(&pfdev->shrinker_lock);
return count;
}
static void panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
mutex_lock(&shmem->pages_lock);
panfrost_mmu_unmap(to_panfrost_bo(obj));
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
}
static unsigned long
panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct panfrost_device *pfdev =
container_of(shrinker, struct panfrost_device, shrinker);
struct drm_gem_shmem_object *shmem, *tmp;
unsigned long freed = 0;
if (!mutex_trylock(&pfdev->shrinker_lock))
return SHRINK_STOP;
list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
if (freed >= sc->nr_to_scan)
break;
if (drm_gem_shmem_is_purgeable(shmem)) {
panfrost_gem_purge(&shmem->base);
freed += shmem->base.size >> PAGE_SHIFT;
list_del_init(&shmem->madv_list);
}
}
mutex_unlock(&pfdev->shrinker_lock);
if (freed > 0)
pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
return freed;
}
/**
* panfrost_gem_shrinker_init - Initialize panfrost shrinker
* @dev: DRM device
*
* This function registers and sets up the panfrost shrinker.
*/
void panfrost_gem_shrinker_init(struct drm_device *dev)
{
struct panfrost_device *pfdev = dev->dev_private;
pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
pfdev->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&pfdev->shrinker));
}
/**
* panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
* @dev: DRM device
*
* This function unregisters the panfrost shrinker.
*/
void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
{
struct panfrost_device *pfdev = dev->dev_private;
if (pfdev->shrinker.nr_deferred) {
unregister_shrinker(&pfdev->shrinker);
}
}

Просмотреть файл

@ -20,6 +20,7 @@ extern "C" {
#define DRM_PANFROST_GET_BO_OFFSET 0x05
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_PANFROST_MADVISE 0x08
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@ -27,6 +28,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@ -198,6 +200,26 @@ struct drm_panfrost_perfcnt_dump {
__u64 buf_ptr;
};
/* madvise provides a way to tell the kernel in case a buffers contents
* can be discarded under memory pressure, which is useful for userspace
* bo cache where we want to optimistically hold on to buffer allocate
* and potential mmap, but allow the pages to be discarded under memory
* pressure.
*
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
* In the WILLNEED case, 'retained' indicates to userspace whether the
* backing pages still exist.
*/
#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */
struct drm_panfrost_madvise {
__u32 handle; /* in, GEM handle */
__u32 madv; /* in, PANFROST_MADV_x */
__u32 retained; /* out, whether backing store still exists */
};
#if defined(__cplusplus)
}
#endif