omapdrm changes for v4.5
* enable DRIVER_ATOMIC * improved TILER performance * cleanups preparing for DMAbuf import * fbdev emulation is now optional * minor fixes -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWhPg8AAoJEPo9qoy8lh71UPIQALJ13oNAIjmII+XwTUG2cgWf yDx+kb1c+FLq7q6dxayFnEWFSyZO1ldOvT65+i1khRAUyLMjDhhBmMBwASz20BLl jBdBR0PDsQs6qVMEvcRRdTW+rW+xjAmnUmlJflzicnsuybzzmHmzt00B0cmICFrD BiWHNnLtRFaitMeuhKsK2WrrhmhcYiOgXaC4RPPBbBG4z7k3A2w7M18I9flQqYmR cLfrj7bRR4OF5PreXFOYcoNyfnpGLEHP9vi+9tamj43sf1EcGR38exNwujQlKgG1 gtLWdGHsgl54VDwmYGOjzrkWvV2oaX9ZwECQNGelq2qb5e42cD6Qnd15/n0SqAYS 8D8a3DpsnipalFFtw1+lLByM3DrtvXottVSGalQ81qk4SIlnD5BKq/nmIgOWDvV0 /FZ6kCkK9OXF7A5zZHH0yBVzC+/y6P+IiyhxB9vGacEcG813KUZMSWA/z3ck352l 6gPgJPAdlIYEObpCi2+TAh5LeXBVX8xGyRf3zVZaKA/54Hb1fPZHe5X4tQh0NCNa wr7SfiU7BxDBopDkNjWH68JSe7yTjOm/yql0vdCRY7Os0JmcBBgZjVx6bg9cHoWZ tk2WNgdAZ8/6hGCvujUN5PJP0vVHKtxLlc69p2r0YMOdTGwVPxedEm7JC0JrWocU 7X/rbZFzJfNU9TjBjBEq =z3XF -----END PGP SIGNATURE----- Merge tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux into drm-next omapdrm changes for v4.5 * enable DRIVER_ATOMIC * improved TILER performance * cleanups preparing for DMAbuf import * fbdev emulation is now optional * minor fixes * tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux: drm/omap: remove obsolete manager assignment drm/omap: set DRIVER_ATOMIC for omapdrm drm/omap: remove unused plugin defines drm/omap: Use bitmaps for TILER placement drm: omapdrm: gem: Remove check for impossible condition drm: omapdrm: gem: Simplify error handling when creating GEM object drm: omapdrm: gem: Don't free mmap offset twice drm: omapdrm: gem: Fix GEM object destroy in error path drm: omapdrm: gem: Free the correct memory object drm: omapdrm: gem: Mask out private flags passed from userspace drm: omapdrm: gem: Move global usergart variable to omap_drm_private drm: omapdrm: gem: Group functions by purpose drm: omapdrm: gem: Remove forward declarations drm: omapdrm: gem: Remove unused function prototypes drm: omapdrm: Make fbdev emulation optional drm: omapdrm: Fix plane state free in plane reset handler drm: omapdrm: move omap_plane_reset() drm/omap: Use platform_register/unregister_drivers() drm: omapdrm: tiler: Remove unneded module alias for tiler
This commit is contained in:
Коммит
c11b898963
|
@ -12,10 +12,11 @@ omapdrm-y := omap_drv.o \
|
|||
omap_encoder.o \
|
||||
omap_connector.o \
|
||||
omap_fb.o \
|
||||
omap_fbdev.o \
|
||||
omap_gem.o \
|
||||
omap_gem_dmabuf.o \
|
||||
omap_dmm_tiler.o \
|
||||
tcm-sita.o
|
||||
|
||||
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
|
||||
|
||||
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
|
||||
|
|
|
@ -51,6 +51,7 @@ static int mm_show(struct seq_file *m, void *arg)
|
|||
return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
static int fb_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
|
@ -73,12 +74,15 @@ static int fb_show(struct seq_file *m, void *arg)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* list of debufs files that are applicable to all devices */
|
||||
static struct drm_info_list omap_debugfs_list[] = {
|
||||
{"gem", gem_show, 0},
|
||||
{"mm", mm_show, 0},
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
{"fb", fb_show, 0},
|
||||
#endif
|
||||
};
|
||||
|
||||
/* list of debugfs files that are specific to devices with dmm/tiler */
|
||||
|
|
|
@ -363,6 +363,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
|
|||
u32 min_align = 128;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
size_t slot_bytes;
|
||||
|
||||
BUG_ON(!validfmt(fmt));
|
||||
|
||||
|
@ -371,13 +372,15 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
|
|||
h = DIV_ROUND_UP(h, geom[fmt].slot_h);
|
||||
|
||||
/* convert alignment to slots */
|
||||
min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
|
||||
align = ALIGN(align, min_align);
|
||||
align /= geom[fmt].slot_w * geom[fmt].cpp;
|
||||
slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
|
||||
min_align = max(min_align, slot_bytes);
|
||||
align = (align > min_align) ? ALIGN(align, min_align) : min_align;
|
||||
align /= slot_bytes;
|
||||
|
||||
block->fmt = fmt;
|
||||
|
||||
ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
|
||||
ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
|
||||
&block->area);
|
||||
if (ret) {
|
||||
kfree(block);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -739,8 +742,7 @@ static int omap_dmm_probe(struct platform_device *dev)
|
|||
programming during reill operations */
|
||||
for (i = 0; i < omap_dmm->num_lut; i++) {
|
||||
omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
|
||||
omap_dmm->container_height,
|
||||
NULL);
|
||||
omap_dmm->container_height);
|
||||
|
||||
if (!omap_dmm->tcm[i]) {
|
||||
dev_err(&dev->dev, "failed to allocate container\n");
|
||||
|
@ -1030,4 +1032,3 @@ struct platform_driver omap_dmm_driver = {
|
|||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
|
||||
MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
|
||||
MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
|
||||
|
|
|
@ -547,14 +547,19 @@ static int ioctl_set_param(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
|
||||
|
||||
static int ioctl_gem_new(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_omap_gem_new *args = data;
|
||||
u32 flags = args->flags & OMAP_BO_USER_MASK;
|
||||
|
||||
VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
|
||||
args->size.bytes, args->flags);
|
||||
return omap_gem_new_handle(dev, file_priv, args->size,
|
||||
args->flags, &args->handle);
|
||||
args->size.bytes, flags);
|
||||
|
||||
return omap_gem_new_handle(dev, file_priv, args->size, flags,
|
||||
&args->handle);
|
||||
}
|
||||
|
||||
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
|
||||
|
@ -692,10 +697,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
|
|||
drm_crtc_vblank_off(priv->crtcs[i]);
|
||||
|
||||
priv->fbdev = omap_fbdev_init(dev);
|
||||
if (!priv->fbdev) {
|
||||
dev_warn(dev->dev, "omap_fbdev_init failed\n");
|
||||
/* well, limp along without an fbdev.. maybe X11 will work? */
|
||||
}
|
||||
|
||||
/* store off drm_device for use in pm ops */
|
||||
dev_set_drvdata(dev->dev, dev);
|
||||
|
@ -831,7 +832,8 @@ static const struct file_operations omapdriver_fops = {
|
|||
};
|
||||
|
||||
static struct drm_driver omap_drm_driver = {
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
|
||||
DRIVER_ATOMIC,
|
||||
.load = dev_load,
|
||||
.unload = dev_unload,
|
||||
.open = dev_open,
|
||||
|
@ -928,35 +930,23 @@ static struct platform_driver pdev = {
|
|||
.remove = pdev_remove,
|
||||
};
|
||||
|
||||
static struct platform_driver * const drivers[] = {
|
||||
&omap_dmm_driver,
|
||||
&pdev,
|
||||
};
|
||||
|
||||
static int __init omap_drm_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
DBG("init");
|
||||
|
||||
r = platform_driver_register(&omap_dmm_driver);
|
||||
if (r) {
|
||||
pr_err("DMM driver registration failed\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = platform_driver_register(&pdev);
|
||||
if (r) {
|
||||
pr_err("omapdrm driver registration failed\n");
|
||||
platform_driver_unregister(&omap_dmm_driver);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
|
||||
}
|
||||
|
||||
static void __exit omap_drm_fini(void)
|
||||
{
|
||||
DBG("fini");
|
||||
|
||||
platform_driver_unregister(&pdev);
|
||||
|
||||
platform_driver_unregister(&omap_dmm_driver);
|
||||
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
|
||||
}
|
||||
|
||||
/* need late_initcall() so we load after dss_driver's are loaded */
|
||||
|
|
|
@ -36,11 +36,7 @@
|
|||
|
||||
#define MODULE_NAME "omapdrm"
|
||||
|
||||
/* max # of mapper-id's that can be assigned.. todo, come up with a better
|
||||
* (but still inexpensive) way to store/access per-buffer mapper private
|
||||
* data..
|
||||
*/
|
||||
#define MAX_MAPPERS 2
|
||||
struct omap_drm_usergart;
|
||||
|
||||
/* parameters which describe (unrotated) coordinates of scanout within a fb: */
|
||||
struct omap_drm_window {
|
||||
|
@ -97,6 +93,7 @@ struct omap_drm_private {
|
|||
/* list of GEM objects: */
|
||||
struct list_head obj_list;
|
||||
|
||||
struct omap_drm_usergart *usergart;
|
||||
bool has_dmm;
|
||||
|
||||
/* properties: */
|
||||
|
@ -138,8 +135,18 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
|
|||
void omap_drm_irq_uninstall(struct drm_device *dev);
|
||||
int omap_drm_irq_install(struct drm_device *dev);
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
|
||||
void omap_fbdev_free(struct drm_device *dev);
|
||||
#else
|
||||
static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void omap_fbdev_free(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
|
||||
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
|
||||
|
|
|
@ -110,8 +110,6 @@ static int omap_encoder_update(struct drm_encoder *encoder,
|
|||
struct omap_dss_driver *dssdrv = dssdev->driver;
|
||||
int ret;
|
||||
|
||||
dssdev->src->manager = omap_dss_get_overlay_manager(channel);
|
||||
|
||||
if (dssdrv->check_timings) {
|
||||
ret = dssdrv->check_timings(dssdev, timings);
|
||||
} else {
|
||||
|
|
|
@ -295,6 +295,10 @@ fini:
|
|||
drm_fb_helper_fini(helper);
|
||||
fail:
|
||||
kfree(fbdev);
|
||||
|
||||
dev_warn(dev->dev, "omap_fbdev_init failed\n");
|
||||
/* well, limp along without an fbdev.. maybe X11 will work? */
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,24 +25,15 @@
|
|||
#include "omap_drv.h"
|
||||
#include "omap_dmm_tiler.h"
|
||||
|
||||
/* remove these once drm core helpers are merged */
|
||||
struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
|
||||
void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
|
||||
bool dirty, bool accessed);
|
||||
int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
|
||||
|
||||
/*
|
||||
* GEM buffer object implementation.
|
||||
*/
|
||||
|
||||
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
|
||||
|
||||
/* note: we use upper 8 bits of flags for driver-internal flags: */
|
||||
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
|
||||
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
|
||||
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
|
||||
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
|
||||
|
||||
|
||||
struct omap_gem_object {
|
||||
struct drm_gem_object base;
|
||||
|
||||
|
@ -119,8 +110,7 @@ struct omap_gem_object {
|
|||
} *sync;
|
||||
};
|
||||
|
||||
static int get_pages(struct drm_gem_object *obj, struct page ***pages);
|
||||
static uint64_t mmap_offset(struct drm_gem_object *obj);
|
||||
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
|
||||
|
||||
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
|
||||
* not necessarily pinned in TILER all the time, and (b) when they are
|
||||
|
@ -134,27 +124,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj);
|
|||
* for later..
|
||||
*/
|
||||
#define NUM_USERGART_ENTRIES 2
|
||||
struct usergart_entry {
|
||||
struct omap_drm_usergart_entry {
|
||||
struct tiler_block *block; /* the reserved tiler block */
|
||||
dma_addr_t paddr;
|
||||
struct drm_gem_object *obj; /* the current pinned obj */
|
||||
pgoff_t obj_pgoff; /* page offset of obj currently
|
||||
mapped in */
|
||||
};
|
||||
static struct {
|
||||
struct usergart_entry entry[NUM_USERGART_ENTRIES];
|
||||
|
||||
struct omap_drm_usergart {
|
||||
struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
|
||||
int height; /* height in rows */
|
||||
int height_shift; /* ilog2(height in rows) */
|
||||
int slot_shift; /* ilog2(width per slot) */
|
||||
int stride_pfn; /* stride in pages */
|
||||
int last; /* index of last used entry */
|
||||
} *usergart;
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Helpers
|
||||
*/
|
||||
|
||||
/** get mmap offset */
|
||||
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
int ret;
|
||||
size_t size;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
/* Make it mmapable */
|
||||
size = omap_gem_mmap_size(obj);
|
||||
ret = drm_gem_create_mmap_offset_size(obj, size);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not allocate mmap offset\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return drm_vma_node_offset_addr(&obj->vma_node);
|
||||
}
|
||||
|
||||
/* GEM objects can either be allocated from contiguous memory (in which
|
||||
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
|
||||
* contiguous buffers can be remapped in TILER/DMM if they need to be
|
||||
* contiguous... but we don't do this all the time to reduce pressure
|
||||
* on TILER/DMM space when we know at allocation time that the buffer
|
||||
* will need to be scanned out.
|
||||
*/
|
||||
static inline bool is_shmem(struct drm_gem_object *obj)
|
||||
{
|
||||
return obj->filp != NULL;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Eviction
|
||||
*/
|
||||
|
||||
static void evict_entry(struct drm_gem_object *obj,
|
||||
enum tiler_fmt fmt, struct usergart_entry *entry)
|
||||
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int n = usergart[fmt].height;
|
||||
struct omap_drm_private *priv = obj->dev->dev_private;
|
||||
int n = priv->usergart[fmt].height;
|
||||
size_t size = PAGE_SIZE * n;
|
||||
loff_t off = mmap_offset(obj) +
|
||||
(entry->obj_pgoff << PAGE_SHIFT);
|
||||
|
@ -180,46 +212,25 @@ static void evict_entry(struct drm_gem_object *obj,
|
|||
static void evict(struct drm_gem_object *obj)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
struct omap_drm_private *priv = obj->dev->dev_private;
|
||||
|
||||
if (omap_obj->flags & OMAP_BO_TILED) {
|
||||
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
|
||||
int i;
|
||||
|
||||
if (!usergart)
|
||||
return;
|
||||
|
||||
for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
|
||||
struct usergart_entry *entry = &usergart[fmt].entry[i];
|
||||
struct omap_drm_usergart_entry *entry =
|
||||
&priv->usergart[fmt].entry[i];
|
||||
|
||||
if (entry->obj == obj)
|
||||
evict_entry(obj, fmt, entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* GEM objects can either be allocated from contiguous memory (in which
|
||||
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
|
||||
* contiguous buffers can be remapped in TILER/DMM if they need to be
|
||||
* contiguous... but we don't do this all the time to reduce pressure
|
||||
* on TILER/DMM space when we know at allocation time that the buffer
|
||||
* will need to be scanned out.
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Page Management
|
||||
*/
|
||||
static inline bool is_shmem(struct drm_gem_object *obj)
|
||||
{
|
||||
return obj->filp != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* shmem buffers that are mapped cached can simulate coherency via using
|
||||
* page faulting to keep track of dirty pages
|
||||
*/
|
||||
static inline bool is_cached_coherent(struct drm_gem_object *obj)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
return is_shmem(obj) &&
|
||||
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(sync_lock);
|
||||
|
||||
/** ensure backing pages are allocated */
|
||||
static int omap_gem_attach_pages(struct drm_gem_object *obj)
|
||||
|
@ -272,6 +283,28 @@ free_pages:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* acquire pages when needed (for example, for DMA where physically
|
||||
* contiguous buffer is not required
|
||||
*/
|
||||
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
if (is_shmem(obj) && !omap_obj->pages) {
|
||||
ret = omap_gem_attach_pages(obj);
|
||||
if (ret) {
|
||||
dev_err(obj->dev->dev, "could not attach pages\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: even phys-contig.. we should have a list of pages? */
|
||||
*pages = omap_obj->pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** release backing pages */
|
||||
static void omap_gem_detach_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
|
@ -301,26 +334,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
|
|||
return to_omap_bo(obj)->flags;
|
||||
}
|
||||
|
||||
/** get mmap offset */
|
||||
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
int ret;
|
||||
size_t size;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
/* Make it mmapable */
|
||||
size = omap_gem_mmap_size(obj);
|
||||
ret = drm_gem_create_mmap_offset_size(obj, size);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not allocate mmap offset\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return drm_vma_node_offset_addr(&obj->vma_node);
|
||||
}
|
||||
|
||||
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
uint64_t offset;
|
||||
|
@ -362,6 +375,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Fault Handling
|
||||
*/
|
||||
|
||||
/* Normal handling for the case of faulting in non-tiled buffers */
|
||||
static int fault_1d(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
|
@ -393,7 +410,8 @@ static int fault_2d(struct drm_gem_object *obj,
|
|||
struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
struct usergart_entry *entry;
|
||||
struct omap_drm_private *priv = obj->dev->dev_private;
|
||||
struct omap_drm_usergart_entry *entry;
|
||||
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
|
||||
struct page *pages[64]; /* XXX is this too much to have on stack? */
|
||||
unsigned long pfn;
|
||||
|
@ -406,8 +424,8 @@ static int fault_2d(struct drm_gem_object *obj,
|
|||
* that need to be mapped in to fill 4kb wide CPU page. If the slot
|
||||
* height is 64, then 64 pages fill a 4kb wide by 64 row region.
|
||||
*/
|
||||
const int n = usergart[fmt].height;
|
||||
const int n_shift = usergart[fmt].height_shift;
|
||||
const int n = priv->usergart[fmt].height;
|
||||
const int n_shift = priv->usergart[fmt].height_shift;
|
||||
|
||||
/*
|
||||
* If buffer width in bytes > PAGE_SIZE then the virtual stride is
|
||||
|
@ -428,11 +446,11 @@ static int fault_2d(struct drm_gem_object *obj,
|
|||
base_pgoff = round_down(pgoff, m << n_shift);
|
||||
|
||||
/* figure out buffer width in slots */
|
||||
slots = omap_obj->width >> usergart[fmt].slot_shift;
|
||||
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
|
||||
|
||||
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
|
||||
|
||||
entry = &usergart[fmt].entry[usergart[fmt].last];
|
||||
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
|
||||
|
||||
/* evict previous buffer using this usergart entry, if any: */
|
||||
if (entry->obj)
|
||||
|
@ -479,12 +497,13 @@ static int fault_2d(struct drm_gem_object *obj,
|
|||
|
||||
for (i = n; i > 0; i--) {
|
||||
vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
|
||||
pfn += usergart[fmt].stride_pfn;
|
||||
pfn += priv->usergart[fmt].stride_pfn;
|
||||
vaddr += PAGE_SIZE * m;
|
||||
}
|
||||
|
||||
/* simple round-robin: */
|
||||
usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
|
||||
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
|
||||
% NUM_USERGART_ENTRIES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -596,6 +615,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Dumb Buffers
|
||||
*/
|
||||
|
||||
/**
|
||||
* omap_gem_dumb_create - create a dumb buffer
|
||||
|
@ -653,6 +675,7 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
/* Set scrolling position. This allows us to implement fast scrolling
|
||||
* for console.
|
||||
*
|
||||
|
@ -689,6 +712,22 @@ fail:
|
|||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Memory Management & DMA Sync
|
||||
*/
|
||||
|
||||
/**
|
||||
* shmem buffers that are mapped cached can simulate coherency via using
|
||||
* page faulting to keep track of dirty pages
|
||||
*/
|
||||
static inline bool is_cached_coherent(struct drm_gem_object *obj)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
return is_shmem(obj) &&
|
||||
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
|
||||
}
|
||||
|
||||
/* Sync the buffer for CPU access.. note pages should already be
|
||||
* attached, ie. omap_gem_get_pages()
|
||||
|
@ -865,28 +904,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* acquire pages when needed (for example, for DMA where physically
|
||||
* contiguous buffer is not required
|
||||
*/
|
||||
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
|
||||
{
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
if (is_shmem(obj) && !omap_obj->pages) {
|
||||
ret = omap_gem_attach_pages(obj);
|
||||
if (ret) {
|
||||
dev_err(obj->dev->dev, "could not attach pages\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: even phys-contig.. we should have a list of pages? */
|
||||
*pages = omap_obj->pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* if !remap, and we don't have pages backing, then fail, rather than
|
||||
* increasing the pin count (which we don't really do yet anyways,
|
||||
* because we don't support swapping pages back out). And 'remap'
|
||||
|
@ -924,6 +941,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
/* Get kernel virtual address for CPU access.. this more or less only
|
||||
* exists for omap_fbdev. This should be called with struct_mutex
|
||||
* held.
|
||||
|
@ -942,6 +960,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
|
|||
}
|
||||
return omap_obj->vaddr;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Power Management
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* re-pin objects in DMM in resume path: */
|
||||
|
@ -971,6 +994,10 @@ int omap_gem_resume(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* DebugFS
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
||||
{
|
||||
|
@ -1017,9 +1044,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Buffer Synchronization:
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Buffer Synchronization
|
||||
*/
|
||||
|
||||
static DEFINE_SPINLOCK(sync_lock);
|
||||
|
||||
struct omap_gem_sync_waiter {
|
||||
struct list_head list;
|
||||
struct omap_gem_object *omap_obj;
|
||||
|
@ -1265,6 +1295,10 @@ unlock:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Constructor & Destructor
|
||||
*/
|
||||
|
||||
/* don't call directly.. called from GEM core when it is time to actually
|
||||
* free the object..
|
||||
*/
|
||||
|
@ -1282,8 +1316,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
|
|||
list_del(&omap_obj->mm_list);
|
||||
spin_unlock(&priv->list_lock);
|
||||
|
||||
drm_gem_free_mmap_offset(obj);
|
||||
|
||||
/* this means the object is still pinned.. which really should
|
||||
* not happen. I think..
|
||||
*/
|
||||
|
@ -1308,31 +1340,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
|
|||
|
||||
drm_gem_object_release(obj);
|
||||
|
||||
kfree(obj);
|
||||
}
|
||||
|
||||
/* convenience method to construct a GEM buffer object, and userspace handle */
|
||||
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = omap_gem_new(dev, gsize, flags);
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_gem_handle_create(file, obj, handle);
|
||||
if (ret) {
|
||||
drm_gem_object_release(obj);
|
||||
kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
kfree(omap_obj);
|
||||
}
|
||||
|
||||
/* GEM buffer object constructor */
|
||||
|
@ -1341,15 +1349,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
|
|||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_gem_object *omap_obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
struct drm_gem_object *obj;
|
||||
struct address_space *mapping;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (flags & OMAP_BO_TILED) {
|
||||
if (!usergart) {
|
||||
if (!priv->usergart) {
|
||||
dev_err(dev->dev, "Tiled buffers require DMM\n");
|
||||
goto fail;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* tiled buffers are always shmem paged backed.. when they are
|
||||
|
@ -1420,16 +1428,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
|
|||
return obj;
|
||||
|
||||
fail:
|
||||
if (obj)
|
||||
omap_gem_free_object(obj);
|
||||
|
||||
omap_gem_free_object(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
|
||||
/* convenience method to construct a GEM buffer object, and userspace handle */
|
||||
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = omap_gem_new(dev, gsize, flags);
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_gem_handle_create(file, obj, handle);
|
||||
if (ret) {
|
||||
omap_gem_free_object(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Init & Cleanup
|
||||
*/
|
||||
|
||||
/* If DMM is used, we need to set some stuff up.. */
|
||||
void omap_gem_init(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_drm_usergart *usergart;
|
||||
const enum tiler_fmt fmts[] = {
|
||||
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
|
||||
};
|
||||
|
@ -1458,10 +1492,11 @@ void omap_gem_init(struct drm_device *dev)
|
|||
usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
|
||||
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
|
||||
for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
|
||||
struct usergart_entry *entry = &usergart[i].entry[j];
|
||||
struct tiler_block *block =
|
||||
tiler_reserve_2d(fmts[i], w, h,
|
||||
PAGE_SIZE);
|
||||
struct omap_drm_usergart_entry *entry;
|
||||
struct tiler_block *block;
|
||||
|
||||
entry = &usergart[i].entry[j];
|
||||
block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
|
||||
if (IS_ERR(block)) {
|
||||
dev_err(dev->dev,
|
||||
"reserve failed: %d, %d, %ld\n",
|
||||
|
@ -1477,13 +1512,16 @@ void omap_gem_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
priv->usergart = usergart;
|
||||
priv->has_dmm = true;
|
||||
}
|
||||
|
||||
void omap_gem_deinit(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
|
||||
/* I believe we can rely on there being no more outstanding GEM
|
||||
* objects which could depend on usergart/dmm at this point.
|
||||
*/
|
||||
kfree(usergart);
|
||||
kfree(priv->usergart);
|
||||
}
|
||||
|
|
|
@ -188,33 +188,6 @@ static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
|
|||
.atomic_disable = omap_plane_atomic_disable,
|
||||
};
|
||||
|
||||
static void omap_plane_reset(struct drm_plane *plane)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
struct omap_plane_state *omap_state;
|
||||
|
||||
if (plane->state && plane->state->fb)
|
||||
drm_framebuffer_unreference(plane->state->fb);
|
||||
|
||||
kfree(plane->state);
|
||||
plane->state = NULL;
|
||||
|
||||
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
|
||||
if (omap_state == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Set defaults depending on whether we are a primary or overlay
|
||||
* plane.
|
||||
*/
|
||||
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
|
||||
? 0 : omap_plane->id;
|
||||
omap_state->base.rotation = BIT(DRM_ROTATE_0);
|
||||
|
||||
plane->state = &omap_state->base;
|
||||
plane->state->plane = plane;
|
||||
}
|
||||
|
||||
static void omap_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
|
@ -270,6 +243,32 @@ static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
|
|||
kfree(to_omap_plane_state(state));
|
||||
}
|
||||
|
||||
static void omap_plane_reset(struct drm_plane *plane)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
struct omap_plane_state *omap_state;
|
||||
|
||||
if (plane->state) {
|
||||
omap_plane_atomic_destroy_state(plane, plane->state);
|
||||
plane->state = NULL;
|
||||
}
|
||||
|
||||
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
|
||||
if (omap_state == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Set defaults depending on whether we are a primary or overlay
|
||||
* plane.
|
||||
*/
|
||||
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
|
||||
? 0 : omap_plane->id;
|
||||
omap_state->base.rotation = BIT(DRM_ROTATE_0);
|
||||
|
||||
plane->state = &omap_state->base;
|
||||
plane->state->plane = plane;
|
||||
}
|
||||
|
||||
static int omap_plane_atomic_set_property(struct drm_plane *plane,
|
||||
struct drm_plane_state *state,
|
||||
struct drm_property *property,
|
||||
|
|
|
@ -5,8 +5,9 @@
|
|||
*
|
||||
* Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
|
||||
* Lajos Molnar <molnar@ti.com>
|
||||
* Andy Gross <andy.gross@ti.com>
|
||||
*
|
||||
* Copyright (C) 2009-2010 Texas Instruments, Inc.
|
||||
* Copyright (C) 2012 Texas Instruments, Inc.
|
||||
*
|
||||
* This package is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -17,79 +18,225 @@
|
|||
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include "tcm.h"
|
||||
|
||||
#include "tcm-sita.h"
|
||||
static unsigned long mask[8];
|
||||
/*
|
||||
* pos position in bitmap
|
||||
* w width in slots
|
||||
* h height in slots
|
||||
* map ptr to bitmap
|
||||
* stride slots in a row
|
||||
*/
|
||||
static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
|
||||
unsigned long *map, uint16_t stride)
|
||||
{
|
||||
int i;
|
||||
|
||||
#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
|
||||
for (i = 0; i < h; i++, pos += stride)
|
||||
bitmap_clear(map, pos, w);
|
||||
}
|
||||
|
||||
/* Individual selection criteria for different scan areas */
|
||||
static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
|
||||
static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
|
||||
/*
|
||||
* w width in slots
|
||||
* pos ptr to position
|
||||
* map ptr to bitmap
|
||||
* num_bits number of bits in bitmap
|
||||
*/
|
||||
static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
|
||||
size_t num_bits)
|
||||
{
|
||||
unsigned long search_count = 0;
|
||||
unsigned long bit;
|
||||
bool area_found = false;
|
||||
|
||||
/*********************************************
|
||||
* TCM API - Sita Implementation
|
||||
*********************************************/
|
||||
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
|
||||
struct tcm_area *area);
|
||||
static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
|
||||
static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
|
||||
static void sita_deinit(struct tcm *tcm);
|
||||
*pos = num_bits - w;
|
||||
|
||||
/*********************************************
|
||||
* Main Scanner functions
|
||||
*********************************************/
|
||||
static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *area);
|
||||
while (search_count < num_bits) {
|
||||
bit = find_next_bit(map, num_bits, *pos);
|
||||
|
||||
static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area);
|
||||
if (bit - *pos >= w) {
|
||||
/* found a long enough free area */
|
||||
bitmap_set(map, *pos, w);
|
||||
area_found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area);
|
||||
search_count = num_bits - bit + w;
|
||||
*pos = bit - w;
|
||||
}
|
||||
|
||||
static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *field, struct tcm_area *area);
|
||||
return (area_found) ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
/*********************************************
|
||||
* Support Infrastructure Methods
|
||||
*********************************************/
|
||||
static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
|
||||
/*
|
||||
* w = width in slots
|
||||
* h = height in slots
|
||||
* a = align in slots (mask, 2^n-1, 0 is unaligned)
|
||||
* offset = offset in bytes from 4KiB
|
||||
* pos = position in bitmap for buffer
|
||||
* map = bitmap ptr
|
||||
* num_bits = size of bitmap
|
||||
* stride = bits in one row of container
|
||||
*/
|
||||
static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
|
||||
unsigned long *pos, unsigned long slot_bytes,
|
||||
unsigned long *map, size_t num_bits, size_t slot_stride)
|
||||
{
|
||||
int i;
|
||||
unsigned long index;
|
||||
bool area_free;
|
||||
unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
|
||||
unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
|
||||
unsigned long curr_bit = bit_offset;
|
||||
|
||||
static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
|
||||
struct tcm_area *field, s32 criteria,
|
||||
struct score *best);
|
||||
/* reset alignment to 1 if we are matching a specific offset */
|
||||
/* adjust alignment - 1 to get to the format expected in bitmaps */
|
||||
a = (offset > 0) ? 0 : a - 1;
|
||||
|
||||
static void get_nearness_factor(struct tcm_area *field,
|
||||
struct tcm_area *candidate,
|
||||
struct nearness_factor *nf);
|
||||
/* FIXME Return error if slots_per_band > stride */
|
||||
|
||||
static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
|
||||
struct neighbor_stats *stat);
|
||||
while (curr_bit < num_bits) {
|
||||
*pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w,
|
||||
a);
|
||||
|
||||
static void fill_area(struct tcm *tcm,
|
||||
struct tcm_area *area, struct tcm_area *parent);
|
||||
/* skip forward if we are not at right offset */
|
||||
if (bit_offset > 0 && (*pos % slots_per_band != bit_offset)) {
|
||||
curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* skip forward to next row if we overlap end of row */
|
||||
if ((*pos % slot_stride) + w > slot_stride) {
|
||||
curr_bit = ALIGN(*pos, slot_stride) + bit_offset;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* TODO: Handle overlapping 4K boundaries */
|
||||
|
||||
/*********************************************
|
||||
* Utility Methods
|
||||
*********************************************/
|
||||
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
|
||||
/* break out of look if we will go past end of container */
|
||||
if ((*pos + slot_stride * h) > num_bits)
|
||||
break;
|
||||
|
||||
/* generate mask that represents out matching pattern */
|
||||
bitmap_clear(mask, 0, slot_stride);
|
||||
bitmap_set(mask, (*pos % BITS_PER_LONG), w);
|
||||
|
||||
/* assume the area is free until we find an overlap */
|
||||
area_free = true;
|
||||
|
||||
/* check subsequent rows to see if complete area is free */
|
||||
for (i = 1; i < h; i++) {
|
||||
index = *pos / BITS_PER_LONG + i * 8;
|
||||
if (bitmap_intersects(&map[index], mask,
|
||||
(*pos % BITS_PER_LONG) + w)) {
|
||||
area_free = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (area_free)
|
||||
break;
|
||||
|
||||
/* go forward past this match */
|
||||
if (bit_offset > 0)
|
||||
curr_bit = ALIGN(*pos, slots_per_band) + bit_offset;
|
||||
else
|
||||
curr_bit = *pos + a + 1;
|
||||
}
|
||||
|
||||
if (area_free) {
|
||||
/* set area as in-use. iterate over rows */
|
||||
for (i = 0, index = *pos; i < h; i++, index += slot_stride)
|
||||
bitmap_set(map, index, w);
|
||||
}
|
||||
|
||||
return (area_free) ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
unsigned long pos;
|
||||
int ret;
|
||||
|
||||
spin_lock(&(tcm->lock));
|
||||
ret = r2l_b2t_1d(num_slots, &pos, tcm->bitmap, tcm->map_size);
|
||||
if (!ret) {
|
||||
area->p0.x = pos % tcm->width;
|
||||
area->p0.y = pos / tcm->width;
|
||||
area->p1.x = (pos + num_slots - 1) % tcm->width;
|
||||
area->p1.y = (pos + num_slots - 1) / tcm->width;
|
||||
}
|
||||
spin_unlock(&(tcm->lock));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
|
||||
int16_t offset, uint16_t slot_bytes,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
unsigned long pos;
|
||||
int ret;
|
||||
|
||||
spin_lock(&(tcm->lock));
|
||||
ret = l2r_t2b(w, h, align, offset, &pos, slot_bytes, tcm->bitmap,
|
||||
tcm->map_size, tcm->width);
|
||||
|
||||
if (!ret) {
|
||||
area->p0.x = pos % tcm->width;
|
||||
area->p0.y = pos / tcm->width;
|
||||
area->p1.x = area->p0.x + w - 1;
|
||||
area->p1.y = area->p0.y + h - 1;
|
||||
}
|
||||
spin_unlock(&(tcm->lock));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sita_deinit(struct tcm *tcm)
|
||||
{
|
||||
kfree(tcm);
|
||||
}
|
||||
|
||||
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
|
||||
{
|
||||
unsigned long pos;
|
||||
uint16_t w, h;
|
||||
|
||||
pos = area->p0.x + area->p0.y * tcm->width;
|
||||
if (area->is2d) {
|
||||
w = area->p1.x - area->p0.x + 1;
|
||||
h = area->p1.y - area->p0.y + 1;
|
||||
} else {
|
||||
w = area->p1.x + area->p1.y * tcm->width - pos + 1;
|
||||
h = 1;
|
||||
}
|
||||
|
||||
spin_lock(&(tcm->lock));
|
||||
free_slots(pos, w, h, tcm->bitmap, tcm->width);
|
||||
spin_unlock(&(tcm->lock));
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tcm *sita_init(u16 width, u16 height)
|
||||
{
|
||||
struct tcm *tcm;
|
||||
struct sita_pvt *pvt;
|
||||
struct tcm_area area = {0};
|
||||
s32 i;
|
||||
size_t map_size = BITS_TO_LONGS(width*height) * sizeof(unsigned long);
|
||||
|
||||
if (width == 0 || height == 0)
|
||||
return NULL;
|
||||
|
||||
tcm = kzalloc(sizeof(*tcm), GFP_KERNEL);
|
||||
pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
|
||||
if (!tcm || !pvt)
|
||||
tcm = kzalloc(sizeof(*tcm) + map_size, GFP_KERNEL);
|
||||
if (!tcm)
|
||||
goto error;
|
||||
|
||||
/* Updating the pointers to SiTA implementation APIs */
|
||||
|
@ -99,602 +246,16 @@ struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
|
|||
tcm->reserve_1d = sita_reserve_1d;
|
||||
tcm->free = sita_free;
|
||||
tcm->deinit = sita_deinit;
|
||||
tcm->pvt = (void *)pvt;
|
||||
|
||||
spin_lock_init(&(pvt->lock));
|
||||
spin_lock_init(&tcm->lock);
|
||||
tcm->bitmap = (unsigned long *)(tcm + 1);
|
||||
bitmap_clear(tcm->bitmap, 0, width*height);
|
||||
|
||||
/* Creating tam map */
|
||||
pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
|
||||
if (!pvt->map)
|
||||
goto error;
|
||||
tcm->map_size = width*height;
|
||||
|
||||
for (i = 0; i < tcm->width; i++) {
|
||||
pvt->map[i] =
|
||||
kmalloc(sizeof(**pvt->map) * tcm->height,
|
||||
GFP_KERNEL);
|
||||
if (pvt->map[i] == NULL) {
|
||||
while (i--)
|
||||
kfree(pvt->map[i]);
|
||||
kfree(pvt->map);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
|
||||
pvt->div_pt.x = attr->x;
|
||||
pvt->div_pt.y = attr->y;
|
||||
|
||||
} else {
|
||||
/* Defaulting to 3:1 ratio on width for 2D area split */
|
||||
/* Defaulting to 3:1 ratio on height for 2D and 1D split */
|
||||
pvt->div_pt.x = (tcm->width * 3) / 4;
|
||||
pvt->div_pt.y = (tcm->height * 3) / 4;
|
||||
}
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
assign(&area, 0, 0, width - 1, height - 1);
|
||||
fill_area(tcm, &area, NULL);
|
||||
spin_unlock(&(pvt->lock));
|
||||
return tcm;
|
||||
|
||||
error:
|
||||
kfree(tcm);
|
||||
kfree(pvt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void sita_deinit(struct tcm *tcm)
|
||||
{
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
struct tcm_area area = {0};
|
||||
s32 i;
|
||||
|
||||
area.p1.x = tcm->width - 1;
|
||||
area.p1.y = tcm->height - 1;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
fill_area(tcm, &area, NULL);
|
||||
spin_unlock(&(pvt->lock));
|
||||
|
||||
for (i = 0; i < tcm->height; i++)
|
||||
kfree(pvt->map[i]);
|
||||
kfree(pvt->map);
|
||||
kfree(pvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve a 1D area in the container
|
||||
*
|
||||
* @param num_slots size of 1D area
|
||||
* @param area pointer to the area that will be populated with the
|
||||
* reserved area
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
s32 ret;
|
||||
struct tcm_area field = {0};
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
|
||||
/* Scanning entire container */
|
||||
assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
|
||||
|
||||
ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
|
||||
if (!ret)
|
||||
/* update map */
|
||||
fill_area(tcm, area, area);
|
||||
|
||||
spin_unlock(&(pvt->lock));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve a 2D area in the container
|
||||
*
|
||||
* @param w width
|
||||
* @param h height
|
||||
* @param area pointer to the area that will be populated with the reserved
|
||||
* area
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
s32 ret;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
/* not supporting more than 64 as alignment */
|
||||
if (align > 64)
|
||||
return -EINVAL;
|
||||
|
||||
/* we prefer 1, 32 and 64 as alignment */
|
||||
align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
ret = scan_areas_and_find_fit(tcm, w, h, align, area);
|
||||
if (!ret)
|
||||
/* update map */
|
||||
fill_area(tcm, area, area);
|
||||
|
||||
spin_unlock(&(pvt->lock));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unreserve a previously allocated 2D or 1D area
|
||||
* @param area area to be freed
|
||||
* @return 0 - success
|
||||
*/
|
||||
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
|
||||
{
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
|
||||
/* check that this is in fact an existing area */
|
||||
WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
|
||||
pvt->map[area->p1.x][area->p1.y] != area);
|
||||
|
||||
/* Clear the contents of the associated tiles in the map */
|
||||
fill_area(tcm, area, NULL);
|
||||
|
||||
spin_unlock(&(pvt->lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: In general the cordinates in the scan field area relevant to the can
|
||||
* sweep directions. The scan origin (e.g. top-left corner) will always be
|
||||
* the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
|
||||
* and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
|
||||
* <= p0.y
|
||||
*/
|
||||
|
||||
/**
|
||||
* Raster scan horizontally right to left from top to bottom to find a place for
|
||||
* a 2D area of given size inside a scan field.
|
||||
*
|
||||
* @param w width of desired area
|
||||
* @param h height of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best position
|
||||
* @param field area to scan (inclusive)
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area)
|
||||
{
|
||||
s32 x, y;
|
||||
s16 start_x, end_x, start_y, end_y, found_x = -1;
|
||||
struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
|
||||
struct score best = {{0}, {0}, {0}, 0};
|
||||
|
||||
start_x = field->p0.x;
|
||||
end_x = field->p1.x;
|
||||
start_y = field->p0.y;
|
||||
end_y = field->p1.y;
|
||||
|
||||
/* check scan area co-ordinates */
|
||||
if (field->p0.x < field->p1.x ||
|
||||
field->p1.y < field->p0.y)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if allocation would fit in scan area */
|
||||
if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
|
||||
return -ENOSPC;
|
||||
|
||||
/* adjust start_x and end_y, as allocation would not fit beyond */
|
||||
start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
|
||||
end_y = end_y - h + 1;
|
||||
|
||||
/* check if allocation would still fit in scan area */
|
||||
if (start_x < end_x)
|
||||
return -ENOSPC;
|
||||
|
||||
/* scan field top-to-bottom, right-to-left */
|
||||
for (y = start_y; y <= end_y; y++) {
|
||||
for (x = start_x; x >= end_x; x -= align) {
|
||||
if (is_area_free(map, x, y, w, h)) {
|
||||
found_x = x;
|
||||
|
||||
/* update best candidate */
|
||||
if (update_candidate(tcm, x, y, w, h, field,
|
||||
CR_R2L_T2B, &best))
|
||||
goto done;
|
||||
|
||||
/* change upper x bound */
|
||||
end_x = x + 1;
|
||||
break;
|
||||
} else if (map[x][y] && map[x][y]->is2d) {
|
||||
/* step over 2D areas */
|
||||
x = ALIGN(map[x][y]->p0.x - w + 1, align);
|
||||
}
|
||||
}
|
||||
|
||||
/* break if you find a free area shouldering the scan field */
|
||||
if (found_x == start_x)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!best.a.tcm)
|
||||
return -ENOSPC;
|
||||
done:
|
||||
assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Raster scan horizontally left to right from top to bottom to find a place for
|
||||
* a 2D area of given size inside a scan field.
|
||||
*
|
||||
* @param w width of desired area
|
||||
* @param h height of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best position
|
||||
* @param field area to scan (inclusive)
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area)
|
||||
{
|
||||
s32 x, y;
|
||||
s16 start_x, end_x, start_y, end_y, found_x = -1;
|
||||
struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
|
||||
struct score best = {{0}, {0}, {0}, 0};
|
||||
|
||||
start_x = field->p0.x;
|
||||
end_x = field->p1.x;
|
||||
start_y = field->p0.y;
|
||||
end_y = field->p1.y;
|
||||
|
||||
/* check scan area co-ordinates */
|
||||
if (field->p1.x < field->p0.x ||
|
||||
field->p1.y < field->p0.y)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if allocation would fit in scan area */
|
||||
if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
|
||||
return -ENOSPC;
|
||||
|
||||
start_x = ALIGN(start_x, align);
|
||||
|
||||
/* check if allocation would still fit in scan area */
|
||||
if (w > LEN(end_x, start_x))
|
||||
return -ENOSPC;
|
||||
|
||||
/* adjust end_x and end_y, as allocation would not fit beyond */
|
||||
end_x = end_x - w + 1; /* + 1 to be inclusive */
|
||||
end_y = end_y - h + 1;
|
||||
|
||||
/* scan field top-to-bottom, left-to-right */
|
||||
for (y = start_y; y <= end_y; y++) {
|
||||
for (x = start_x; x <= end_x; x += align) {
|
||||
if (is_area_free(map, x, y, w, h)) {
|
||||
found_x = x;
|
||||
|
||||
/* update best candidate */
|
||||
if (update_candidate(tcm, x, y, w, h, field,
|
||||
CR_L2R_T2B, &best))
|
||||
goto done;
|
||||
/* change upper x bound */
|
||||
end_x = x - 1;
|
||||
|
||||
break;
|
||||
} else if (map[x][y] && map[x][y]->is2d) {
|
||||
/* step over 2D areas */
|
||||
x = ALIGN_DOWN(map[x][y]->p1.x, align);
|
||||
}
|
||||
}
|
||||
|
||||
/* break if you find a free area shouldering the scan field */
|
||||
if (found_x == start_x)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!best.a.tcm)
|
||||
return -ENOSPC;
|
||||
done:
|
||||
assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Raster scan horizontally right to left from bottom to top to find a place
|
||||
* for a 1D area of given size inside a scan field.
|
||||
*
|
||||
* @param num_slots size of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best
|
||||
* position
|
||||
* @param field area to scan (inclusive)
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *field, struct tcm_area *area)
|
||||
{
|
||||
s32 found = 0;
|
||||
s16 x, y;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
struct tcm_area *p;
|
||||
|
||||
/* check scan area co-ordinates */
|
||||
if (field->p0.y < field->p1.y)
|
||||
return -EINVAL;
|
||||
|
||||
/**
|
||||
* Currently we only support full width 1D scan field, which makes sense
|
||||
* since 1D slot-ordering spans the full container width.
|
||||
*/
|
||||
if (tcm->width != field->p0.x - field->p1.x + 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if allocation would fit in scan area */
|
||||
if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
|
||||
return -ENOSPC;
|
||||
|
||||
x = field->p0.x;
|
||||
y = field->p0.y;
|
||||
|
||||
/* find num_slots consecutive free slots to the left */
|
||||
while (found < num_slots) {
|
||||
if (y < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
/* remember bottom-right corner */
|
||||
if (found == 0) {
|
||||
area->p1.x = x;
|
||||
area->p1.y = y;
|
||||
}
|
||||
|
||||
/* skip busy regions */
|
||||
p = pvt->map[x][y];
|
||||
if (p) {
|
||||
/* move to left of 2D areas, top left of 1D */
|
||||
x = p->p0.x;
|
||||
if (!p->is2d)
|
||||
y = p->p0.y;
|
||||
|
||||
/* start over */
|
||||
found = 0;
|
||||
} else {
|
||||
/* count consecutive free slots */
|
||||
found++;
|
||||
if (found == num_slots)
|
||||
break;
|
||||
}
|
||||
|
||||
/* move to the left */
|
||||
if (x == 0)
|
||||
y--;
|
||||
x = (x ? : tcm->width) - 1;
|
||||
|
||||
}
|
||||
|
||||
/* set top-left corner */
|
||||
area->p0.x = x;
|
||||
area->p0.y = y;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a place for a 2D area of given size inside a scan field based on its
|
||||
* alignment needs.
|
||||
*
|
||||
* @param w width of desired area
|
||||
* @param h height of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best position
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
s32 ret = 0;
|
||||
struct tcm_area field = {0};
|
||||
u16 boundary_x, boundary_y;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
if (align > 1) {
|
||||
/* prefer top-left corner */
|
||||
boundary_x = pvt->div_pt.x - 1;
|
||||
boundary_y = pvt->div_pt.y - 1;
|
||||
|
||||
/* expand width and height if needed */
|
||||
if (w > pvt->div_pt.x)
|
||||
boundary_x = tcm->width - 1;
|
||||
if (h > pvt->div_pt.y)
|
||||
boundary_y = tcm->height - 1;
|
||||
|
||||
assign(&field, 0, 0, boundary_x, boundary_y);
|
||||
ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
|
||||
|
||||
/* scan whole container if failed, but do not scan 2x */
|
||||
if (ret != 0 && (boundary_x != tcm->width - 1 ||
|
||||
boundary_y != tcm->height - 1)) {
|
||||
/* scan the entire container if nothing found */
|
||||
assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
|
||||
ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
|
||||
}
|
||||
} else if (align == 1) {
|
||||
/* prefer top-right corner */
|
||||
boundary_x = pvt->div_pt.x;
|
||||
boundary_y = pvt->div_pt.y - 1;
|
||||
|
||||
/* expand width and height if needed */
|
||||
if (w > (tcm->width - pvt->div_pt.x))
|
||||
boundary_x = 0;
|
||||
if (h > pvt->div_pt.y)
|
||||
boundary_y = tcm->height - 1;
|
||||
|
||||
assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
|
||||
ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
|
||||
|
||||
/* scan whole container if failed, but do not scan 2x */
|
||||
if (ret != 0 && (boundary_x != 0 ||
|
||||
boundary_y != tcm->height - 1)) {
|
||||
/* scan the entire container if nothing found */
|
||||
assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
|
||||
ret = scan_r2l_t2b(tcm, w, h, align, &field,
|
||||
area);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* check if an entire area is free */
|
||||
static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
|
||||
{
|
||||
u16 x = 0, y = 0;
|
||||
for (y = y0; y < y0 + h; y++) {
|
||||
for (x = x0; x < x0 + w; x++) {
|
||||
if (map[x][y])
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* fills an area with a parent tcm_area */
|
||||
static void fill_area(struct tcm *tcm, struct tcm_area *area,
|
||||
struct tcm_area *parent)
|
||||
{
|
||||
s32 x, y;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
struct tcm_area a, a_;
|
||||
|
||||
/* set area's tcm; otherwise, enumerator considers it invalid */
|
||||
area->tcm = tcm;
|
||||
|
||||
tcm_for_each_slice(a, *area, a_) {
|
||||
for (x = a.p0.x; x <= a.p1.x; ++x)
|
||||
for (y = a.p0.y; y <= a.p1.y; ++y)
|
||||
pvt->map[x][y] = parent;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares a candidate area to the current best area, and if it is a better
|
||||
* fit, it updates the best to this one.
|
||||
*
|
||||
* @param x0, y0, w, h top, left, width, height of candidate area
|
||||
* @param field scan field
|
||||
* @param criteria scan criteria
|
||||
* @param best best candidate and its scores
|
||||
*
|
||||
* @return 1 (true) if the candidate area is known to be the final best, so no
|
||||
* more searching should be performed
|
||||
*/
|
||||
static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
|
||||
struct tcm_area *field, s32 criteria,
|
||||
struct score *best)
|
||||
{
|
||||
struct score me; /* score for area */
|
||||
|
||||
/*
|
||||
* NOTE: For horizontal bias we always give the first found, because our
|
||||
* scan is horizontal-raster-based and the first candidate will always
|
||||
* have the horizontal bias.
|
||||
*/
|
||||
bool first = criteria & CR_BIAS_HORIZONTAL;
|
||||
|
||||
assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
|
||||
|
||||
/* calculate score for current candidate */
|
||||
if (!first) {
|
||||
get_neighbor_stats(tcm, &me.a, &me.n);
|
||||
me.neighs = me.n.edge + me.n.busy;
|
||||
get_nearness_factor(field, &me.a, &me.f);
|
||||
}
|
||||
|
||||
/* the 1st candidate is always the best */
|
||||
if (!best->a.tcm)
|
||||
goto better;
|
||||
|
||||
BUG_ON(first);
|
||||
|
||||
/* diagonal balance check */
|
||||
if ((criteria & CR_DIAGONAL_BALANCE) &&
|
||||
best->neighs <= me.neighs &&
|
||||
(best->neighs < me.neighs ||
|
||||
/* this implies that neighs and occupied match */
|
||||
best->n.busy < me.n.busy ||
|
||||
(best->n.busy == me.n.busy &&
|
||||
/* check the nearness factor */
|
||||
best->f.x + best->f.y > me.f.x + me.f.y)))
|
||||
goto better;
|
||||
|
||||
/* not better, keep going */
|
||||
return 0;
|
||||
|
||||
better:
|
||||
/* save current area as best */
|
||||
memcpy(best, &me, sizeof(me));
|
||||
best->a.tcm = tcm;
|
||||
return first;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the nearness factor of an area in a search field. The nearness
|
||||
* factor is smaller if the area is closer to the search origin.
|
||||
*/
|
||||
static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
|
||||
struct nearness_factor *nf)
|
||||
{
|
||||
/**
|
||||
* Using signed math as field coordinates may be reversed if
|
||||
* search direction is right-to-left or bottom-to-top.
|
||||
*/
|
||||
nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
|
||||
(field->p1.x - field->p0.x);
|
||||
nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
|
||||
(field->p1.y - field->p0.y);
|
||||
}
|
||||
|
||||
/* get neighbor statistics */
|
||||
static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
|
||||
struct neighbor_stats *stat)
|
||||
{
|
||||
s16 x = 0, y = 0;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
/* Clearing any exisiting values */
|
||||
memset(stat, 0, sizeof(*stat));
|
||||
|
||||
/* process top & bottom edges */
|
||||
for (x = area->p0.x; x <= area->p1.x; x++) {
|
||||
if (area->p0.y == 0)
|
||||
stat->edge++;
|
||||
else if (pvt->map[x][area->p0.y - 1])
|
||||
stat->busy++;
|
||||
|
||||
if (area->p1.y == tcm->height - 1)
|
||||
stat->edge++;
|
||||
else if (pvt->map[x][area->p1.y + 1])
|
||||
stat->busy++;
|
||||
}
|
||||
|
||||
/* process left & right edges */
|
||||
for (y = area->p0.y; y <= area->p1.y; ++y) {
|
||||
if (area->p0.x == 0)
|
||||
stat->edge++;
|
||||
else if (pvt->map[area->p0.x - 1][y])
|
||||
stat->busy++;
|
||||
|
||||
if (area->p1.x == tcm->width - 1)
|
||||
stat->edge++;
|
||||
else if (pvt->map[area->p1.x + 1][y])
|
||||
stat->busy++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,18 +61,17 @@ struct tcm {
|
|||
|
||||
unsigned int y_offset; /* offset to use for y coordinates */
|
||||
|
||||
/* 'pvt' structure shall contain any tcm details (attr) along with
|
||||
linked list of allocated areas and mutex for mutually exclusive access
|
||||
to the list. It may also contain copies of width and height to notice
|
||||
any changes to the publicly available width and height fields. */
|
||||
void *pvt;
|
||||
spinlock_t lock;
|
||||
unsigned long *bitmap;
|
||||
size_t map_size;
|
||||
|
||||
/* function table */
|
||||
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
|
||||
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
|
||||
int16_t offset, uint16_t slot_bytes,
|
||||
struct tcm_area *area);
|
||||
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
|
||||
s32 (*free) (struct tcm *tcm, struct tcm_area *area);
|
||||
void (*deinit) (struct tcm *tcm);
|
||||
s32 (*free)(struct tcm *tcm, struct tcm_area *area);
|
||||
void (*deinit)(struct tcm *tcm);
|
||||
};
|
||||
|
||||
/*=============================================================================
|
||||
|
@ -91,7 +90,7 @@ struct tcm {
|
|||
*
|
||||
*/
|
||||
|
||||
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
|
||||
struct tcm *sita_init(u16 width, u16 height);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -120,6 +119,9 @@ static inline void tcm_deinit(struct tcm *tcm)
|
|||
* all values may be supported by the container manager,
|
||||
* but it must support 0 (1), 32 and 64.
|
||||
* 0 value is equivalent to 1.
|
||||
* @param offset Offset requirement, in bytes. This is the offset
|
||||
* from a 4KiB aligned virtual address.
|
||||
* @param slot_bytes Width of slot in bytes
|
||||
* @param area Pointer to where the reserved area should be stored.
|
||||
*
|
||||
* @return 0 on success. Non-0 error code on failure. Also,
|
||||
|
@ -129,7 +131,8 @@ static inline void tcm_deinit(struct tcm *tcm)
|
|||
* allocation.
|
||||
*/
|
||||
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
|
||||
u16 align, struct tcm_area *area)
|
||||
u16 align, int16_t offset, uint16_t slot_bytes,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
/* perform rudimentary error checking */
|
||||
s32 res = tcm == NULL ? -ENODEV :
|
||||
|
@ -140,7 +143,8 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
|
|||
|
||||
if (!res) {
|
||||
area->is2d = true;
|
||||
res = tcm->reserve_2d(tcm, height, width, align, area);
|
||||
res = tcm->reserve_2d(tcm, height, width, align, offset,
|
||||
slot_bytes, area);
|
||||
area->tcm = res ? NULL : tcm;
|
||||
}
|
||||
|
||||
|
|
|
@ -101,9 +101,6 @@ struct drm_omap_gem_info {
|
|||
|
||||
#define DRM_OMAP_GET_PARAM 0x00
|
||||
#define DRM_OMAP_SET_PARAM 0x01
|
||||
/* placeholder for plugin-api
|
||||
#define DRM_OMAP_GET_BASE 0x02
|
||||
*/
|
||||
#define DRM_OMAP_GEM_NEW 0x03
|
||||
#define DRM_OMAP_GEM_CPU_PREP 0x04
|
||||
#define DRM_OMAP_GEM_CPU_FINI 0x05
|
||||
|
@ -112,9 +109,6 @@ struct drm_omap_gem_info {
|
|||
|
||||
#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
|
||||
#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
|
||||
/* placeholder for plugin-api
|
||||
#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
|
||||
*/
|
||||
#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
|
||||
#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
|
||||
#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
|
||||
|
|
Загрузка…
Ссылка в новой задаче