Merge branch 'topic/univ_api' into for-linus
This commit is contained in:
Коммит
7c7b680fa6
|
@ -22,25 +22,14 @@ The slave DMA usage consists of following steps:
|
|||
Channel allocation is slightly different in the slave DMA context,
|
||||
client drivers typically need a channel from a particular DMA
|
||||
controller only and even in some cases a specific channel is desired.
|
||||
To request a channel dma_request_channel() API is used.
|
||||
To request a channel dma_request_chan() API is used.
|
||||
|
||||
Interface:
|
||||
struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
|
||||
dma_filter_fn filter_fn,
|
||||
void *filter_param);
|
||||
where dma_filter_fn is defined as:
|
||||
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
||||
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||
|
||||
The 'filter_fn' parameter is optional, but highly recommended for
|
||||
slave and cyclic channels as they typically need to obtain a specific
|
||||
DMA channel.
|
||||
|
||||
When the optional 'filter_fn' parameter is NULL, dma_request_channel()
|
||||
simply returns the first channel that satisfies the capability mask.
|
||||
|
||||
Otherwise, the 'filter_fn' routine will be called once for each free
|
||||
channel which has a capability in 'mask'. 'filter_fn' is expected to
|
||||
return 'true' when the desired DMA channel is found.
|
||||
Which will find and return the 'name' DMA channel associated with the 'dev'
|
||||
device. The association is done via DT, ACPI or board file based
|
||||
dma_slave_map matching table.
|
||||
|
||||
A channel allocated via this interface is exclusive to the caller,
|
||||
until dma_release_channel() is called.
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -512,7 +513,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
|
|||
{
|
||||
struct dma_chan *chan;
|
||||
|
||||
if (!__dma_device_satisfies_mask(dev, mask)) {
|
||||
if (mask && !__dma_device_satisfies_mask(dev, mask)) {
|
||||
pr_debug("%s: wrong capabilities\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -543,6 +544,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_chan *find_candidate(struct dma_device *device,
|
||||
const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param)
|
||||
{
|
||||
struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
|
||||
int err;
|
||||
|
||||
if (chan) {
|
||||
/* Found a suitable channel, try to grab, prep, and return it.
|
||||
* We first set DMA_PRIVATE to disable balance_ref_count as this
|
||||
* channel will not be published in the general-purpose
|
||||
* allocator
|
||||
*/
|
||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||
device->privatecnt++;
|
||||
err = dma_chan_get(chan);
|
||||
|
||||
if (err) {
|
||||
if (err == -ENODEV) {
|
||||
pr_debug("%s: %s module removed\n", __func__,
|
||||
dma_chan_name(chan));
|
||||
list_del_rcu(&device->global_node);
|
||||
} else
|
||||
pr_debug("%s: failed to get %s: (%d)\n",
|
||||
__func__, dma_chan_name(chan), err);
|
||||
|
||||
if (--device->privatecnt == 0)
|
||||
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
|
||||
|
||||
chan = ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_get_slave_channel - try to get specific channel exclusively
|
||||
* @chan: target channel
|
||||
|
@ -581,7 +618,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
|
|||
{
|
||||
dma_cap_mask_t mask;
|
||||
struct dma_chan *chan;
|
||||
int err;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
@ -589,23 +625,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
|
|||
/* lock against __dma_request_channel */
|
||||
mutex_lock(&dma_list_mutex);
|
||||
|
||||
chan = private_candidate(&mask, device, NULL, NULL);
|
||||
if (chan) {
|
||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||
device->privatecnt++;
|
||||
err = dma_chan_get(chan);
|
||||
if (err) {
|
||||
pr_debug("%s: failed to get %s: (%d)\n",
|
||||
__func__, dma_chan_name(chan), err);
|
||||
chan = NULL;
|
||||
if (--device->privatecnt == 0)
|
||||
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
|
||||
}
|
||||
}
|
||||
chan = find_candidate(device, &mask, NULL, NULL);
|
||||
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
return chan;
|
||||
return IS_ERR(chan) ? NULL : chan;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
|
||||
|
||||
|
@ -622,35 +646,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
|||
{
|
||||
struct dma_device *device, *_d;
|
||||
struct dma_chan *chan = NULL;
|
||||
int err;
|
||||
|
||||
/* Find a channel */
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
|
||||
chan = private_candidate(mask, device, fn, fn_param);
|
||||
if (chan) {
|
||||
/* Found a suitable channel, try to grab, prep, and
|
||||
* return it. We first set DMA_PRIVATE to disable
|
||||
* balance_ref_count as this channel will not be
|
||||
* published in the general-purpose allocator
|
||||
*/
|
||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||
device->privatecnt++;
|
||||
err = dma_chan_get(chan);
|
||||
chan = find_candidate(device, mask, fn, fn_param);
|
||||
if (!IS_ERR(chan))
|
||||
break;
|
||||
|
||||
if (err == -ENODEV) {
|
||||
pr_debug("%s: %s module removed\n",
|
||||
__func__, dma_chan_name(chan));
|
||||
list_del_rcu(&device->global_node);
|
||||
} else if (err)
|
||||
pr_debug("%s: failed to get %s: (%d)\n",
|
||||
__func__, dma_chan_name(chan), err);
|
||||
else
|
||||
break;
|
||||
if (--device->privatecnt == 0)
|
||||
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
|
||||
chan = NULL;
|
||||
}
|
||||
chan = NULL;
|
||||
}
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
|
@ -663,27 +667,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__dma_request_channel);
|
||||
|
||||
static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
|
||||
const char *name,
|
||||
struct device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!device->filter.mapcnt)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < device->filter.mapcnt; i++) {
|
||||
const struct dma_slave_map *map = &device->filter.map[i];
|
||||
|
||||
if (!strcmp(map->devname, dev_name(dev)) &&
|
||||
!strcmp(map->slave, name))
|
||||
return map;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_request_slave_channel_reason - try to allocate an exclusive slave channel
|
||||
* dma_request_chan - try to allocate an exclusive slave channel
|
||||
* @dev: pointer to client device structure
|
||||
* @name: slave channel name
|
||||
*
|
||||
* Returns pointer to appropriate DMA channel on success or an error pointer.
|
||||
*/
|
||||
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
|
||||
const char *name)
|
||||
struct dma_chan *dma_request_chan(struct device *dev, const char *name)
|
||||
{
|
||||
struct dma_device *d, *_d;
|
||||
struct dma_chan *chan = NULL;
|
||||
|
||||
/* If device-tree is present get slave info from here */
|
||||
if (dev->of_node)
|
||||
return of_dma_request_slave_channel(dev->of_node, name);
|
||||
chan = of_dma_request_slave_channel(dev->of_node, name);
|
||||
|
||||
/* If device was enumerated by ACPI get slave info from here */
|
||||
if (ACPI_HANDLE(dev))
|
||||
return acpi_dma_request_slave_chan_by_name(dev, name);
|
||||
if (has_acpi_companion(dev) && !chan)
|
||||
chan = acpi_dma_request_slave_chan_by_name(dev, name);
|
||||
|
||||
return ERR_PTR(-ENODEV);
|
||||
if (chan) {
|
||||
/* Valid channel found or requester need to be deferred */
|
||||
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
|
||||
return chan;
|
||||
}
|
||||
|
||||
/* Try to find the channel via the DMA filter map(s) */
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
|
||||
dma_cap_mask_t mask;
|
||||
const struct dma_slave_map *map = dma_filter_match(d, name, dev);
|
||||
|
||||
if (!map)
|
||||
continue;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
chan = find_candidate(d, &mask, d->filter.fn, map->param);
|
||||
if (!IS_ERR(chan))
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
|
||||
EXPORT_SYMBOL_GPL(dma_request_chan);
|
||||
|
||||
/**
|
||||
* dma_request_slave_channel - try to allocate an exclusive slave channel
|
||||
|
@ -695,17 +745,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
|
|||
struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
|
||||
struct dma_chan *ch = dma_request_chan(dev, name);
|
||||
if (IS_ERR(ch))
|
||||
return NULL;
|
||||
|
||||
dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
|
||||
ch->device->privatecnt++;
|
||||
|
||||
return ch;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
||||
|
||||
/**
|
||||
* dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
|
||||
* @mask: capabilities that the channel must satisfy
|
||||
*
|
||||
* Returns pointer to appropriate DMA channel on success or an error pointer.
|
||||
*/
|
||||
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
|
||||
{
|
||||
struct dma_chan *chan;
|
||||
|
||||
if (!mask)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
chan = __dma_request_channel(mask, NULL, NULL);
|
||||
if (!chan)
|
||||
chan = ERR_PTR(-ENODEV);
|
||||
|
||||
return chan;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
|
||||
|
||||
void dma_release_channel(struct dma_chan *chan)
|
||||
{
|
||||
mutex_lock(&dma_list_mutex);
|
||||
|
|
|
@ -2297,6 +2297,10 @@ static int edma_probe(struct platform_device *pdev)
|
|||
edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
|
||||
}
|
||||
|
||||
ecc->dma_slave.filter.map = info->slave_map;
|
||||
ecc->dma_slave.filter.mapcnt = info->slavecnt;
|
||||
ecc->dma_slave.filter.fn = edma_filter_fn;
|
||||
|
||||
ret = dma_async_device_register(&ecc->dma_slave);
|
||||
if (ret) {
|
||||
dev_err(dev, "slave ddev registration failed (%d)\n", ret);
|
||||
|
|
|
@ -1203,6 +1203,10 @@ static int omap_dma_probe(struct platform_device *pdev)
|
|||
return rc;
|
||||
}
|
||||
|
||||
od->ddev.filter.map = od->plat->slave_map;
|
||||
od->ddev.filter.mapcnt = od->plat->slavecnt;
|
||||
od->ddev.filter.fn = omap_dma_filter_fn;
|
||||
|
||||
rc = dma_async_device_register(&od->ddev);
|
||||
if (rc) {
|
||||
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
|
||||
|
|
|
@ -606,12 +606,39 @@ enum dmaengine_alignment {
|
|||
DMAENGINE_ALIGN_64_BYTES = 6,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_slave_map - associates slave device and it's slave channel with
|
||||
* parameter to be used by a filter function
|
||||
* @devname: name of the device
|
||||
* @slave: slave channel name
|
||||
* @param: opaque parameter to pass to struct dma_filter.fn
|
||||
*/
|
||||
struct dma_slave_map {
|
||||
const char *devname;
|
||||
const char *slave;
|
||||
void *param;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_filter - information for slave device/channel to filter_fn/param
|
||||
* mapping
|
||||
* @fn: filter function callback
|
||||
* @mapcnt: number of slave device/channel in the map
|
||||
* @map: array of channel to filter mapping data
|
||||
*/
|
||||
struct dma_filter {
|
||||
dma_filter_fn fn;
|
||||
int mapcnt;
|
||||
const struct dma_slave_map *map;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_device - info on the entity supplying DMA services
|
||||
* @chancnt: how many DMA channels are supported
|
||||
* @privatecnt: how many DMA channels are requested by dma_request_channel
|
||||
* @channels: the list of struct dma_chan
|
||||
* @global_node: list_head for global dma_device_list
|
||||
* @filter: information for device/slave to filter function/param mapping
|
||||
* @cap_mask: one or more dma_capability flags
|
||||
* @max_xor: maximum number of xor sources, 0 if no capability
|
||||
* @max_pq: maximum number of PQ sources and PQ-continue capability
|
||||
|
@ -667,6 +694,7 @@ struct dma_device {
|
|||
unsigned int privatecnt;
|
||||
struct list_head channels;
|
||||
struct list_head global_node;
|
||||
struct dma_filter filter;
|
||||
dma_cap_mask_t cap_mask;
|
||||
unsigned short max_xor;
|
||||
unsigned short max_pq;
|
||||
|
@ -1142,9 +1170,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
|||
void dma_issue_pending_all(void);
|
||||
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param);
|
||||
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
|
||||
const char *name);
|
||||
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
|
||||
|
||||
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
|
||||
|
||||
void dma_release_channel(struct dma_chan *chan);
|
||||
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
|
||||
#else
|
||||
|
@ -1168,16 +1198,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct dma_chan *dma_request_slave_channel_reason(
|
||||
struct device *dev, const char *name)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct dma_chan *dma_request_chan(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
static inline struct dma_chan *dma_request_chan_by_mask(
|
||||
const dma_cap_mask_t *mask)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
static inline void dma_release_channel(struct dma_chan *chan)
|
||||
{
|
||||
}
|
||||
|
@ -1188,6 +1223,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
|
|||
}
|
||||
#endif
|
||||
|
||||
#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
|
||||
|
||||
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct dma_slave_caps caps;
|
||||
|
|
|
@ -267,6 +267,9 @@ struct omap_dma_reg {
|
|||
u8 type;
|
||||
};
|
||||
|
||||
#define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) })
|
||||
struct dma_slave_map;
|
||||
|
||||
/* System DMA platform data structure */
|
||||
struct omap_system_dma_plat_info {
|
||||
const struct omap_dma_reg *reg_map;
|
||||
|
@ -278,6 +281,9 @@ struct omap_system_dma_plat_info {
|
|||
void (*clear_dma)(int lch);
|
||||
void (*dma_write)(u32 val, int reg, int lch);
|
||||
u32 (*dma_read)(int reg, int lch);
|
||||
|
||||
const struct dma_slave_map *slave_map;
|
||||
int slavecnt;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARCH_OMAP2PLUS
|
||||
|
|
|
@ -53,12 +53,16 @@ enum dma_event_q {
|
|||
#define EDMA_CTLR(i) ((i) >> 16)
|
||||
#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
|
||||
|
||||
#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) })
|
||||
|
||||
struct edma_rsv_info {
|
||||
|
||||
const s16 (*rsv_chans)[2];
|
||||
const s16 (*rsv_slots)[2];
|
||||
};
|
||||
|
||||
struct dma_slave_map;
|
||||
|
||||
/* platform_data for EDMA driver */
|
||||
struct edma_soc_info {
|
||||
/*
|
||||
|
@ -76,6 +80,9 @@ struct edma_soc_info {
|
|||
|
||||
s8 (*queue_priority_mapping)[2];
|
||||
const s16 (*xbar_chans)[2];
|
||||
|
||||
const struct dma_slave_map *slave_map;
|
||||
int slavecnt;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче