dmaengine: core: Introduce new, universal API to request a channel
The two API function can cover most, if not all current APIs used to request a channel. With minimal effort dmaengine drivers, platforms and dmaengine user drivers can be converted to use the two function. struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); To request any channel matching with the requested capabilities, can be used to request channel for memcpy, memset, xor, etc where no hardware synchronization is needed. struct dma_chan *dma_request_chan(struct device *dev, const char *name); To request a slave channel. The dma_request_chan() will try to find the channel via DT, ACPI or in case if the kernel booted in non DT/ACPI mode it will use a filter lookup table and retrieves the needed information from the dma_slave_map provided by the DMA drivers. This legacy mode needs changes in platform code, in dmaengine drivers and finally the dmaengine user drivers can be converted: For each dmaengine driver an array of DMA device, slave and the parameter for the filter function needs to be added: static const struct dma_slave_map da830_edma_map[] = { { "davinci-mcasp.0", "rx", EDMA_FILTER_PARAM(0, 0) }, { "davinci-mcasp.0", "tx", EDMA_FILTER_PARAM(0, 1) }, { "davinci-mcasp.1", "rx", EDMA_FILTER_PARAM(0, 2) }, { "davinci-mcasp.1", "tx", EDMA_FILTER_PARAM(0, 3) }, { "davinci-mcasp.2", "rx", EDMA_FILTER_PARAM(0, 4) }, { "davinci-mcasp.2", "tx", EDMA_FILTER_PARAM(0, 5) }, { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 14) }, { "spi_davinci.0", "tx", EDMA_FILTER_PARAM(0, 15) }, { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 16) }, { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 17) }, { "spi_davinci.1", "rx", EDMA_FILTER_PARAM(0, 18) }, { "spi_davinci.1", "tx", EDMA_FILTER_PARAM(0, 19) }, }; This information is going to be needed by the dmaengine driver, so modification to the platform_data is needed, and the driver map should be added to the pdata of the DMA driver: da8xx_edma0_pdata.slave_map = da830_edma_map; da8xx_edma0_pdata.slavecnt = ARRAY_SIZE(da830_edma_map); The DMA driver then needs to configure the needed device -> filter_fn mapping before it registers with dma_async_device_register() : ecc->dma_slave.filter_map.map = info->slave_map; ecc->dma_slave.filter_map.mapcnt = info->slavecnt; ecc->dma_slave.filter_map.fn = edma_filter_fn; When neither DT or ACPI lookup is available the dma_request_chan() will try to match the requester's device name with the filter_map's list of device names, when a match found it will use the information from the dma_slave_map to get the channel with the dma_get_channel() internal function. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Родитель
7bd903c5ca
Коммит
a8135d0d79
|
@ -22,25 +22,14 @@ The slave DMA usage consists of following steps:
|
||||||
Channel allocation is slightly different in the slave DMA context,
|
Channel allocation is slightly different in the slave DMA context,
|
||||||
client drivers typically need a channel from a particular DMA
|
client drivers typically need a channel from a particular DMA
|
||||||
controller only and even in some cases a specific channel is desired.
|
controller only and even in some cases a specific channel is desired.
|
||||||
To request a channel dma_request_channel() API is used.
|
To request a channel dma_request_chan() API is used.
|
||||||
|
|
||||||
Interface:
|
Interface:
|
||||||
struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
|
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||||
dma_filter_fn filter_fn,
|
|
||||||
void *filter_param);
|
|
||||||
where dma_filter_fn is defined as:
|
|
||||||
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
|
||||||
|
|
||||||
The 'filter_fn' parameter is optional, but highly recommended for
|
Which will find and return the 'name' DMA channel associated with the 'dev'
|
||||||
slave and cyclic channels as they typically need to obtain a specific
|
device. The association is done via DT, ACPI or board file based
|
||||||
DMA channel.
|
dma_slave_map matching table.
|
||||||
|
|
||||||
When the optional 'filter_fn' parameter is NULL, dma_request_channel()
|
|
||||||
simply returns the first channel that satisfies the capability mask.
|
|
||||||
|
|
||||||
Otherwise, the 'filter_fn' routine will be called once for each free
|
|
||||||
channel which has a capability in 'mask'. 'filter_fn' is expected to
|
|
||||||
return 'true' when the desired DMA channel is found.
|
|
||||||
|
|
||||||
A channel allocated via this interface is exclusive to the caller,
|
A channel allocated via this interface is exclusive to the caller,
|
||||||
until dma_release_channel() is called.
|
until dma_release_channel() is called.
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
|
|
||||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
|
#include <linux/platform_device.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -665,27 +666,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__dma_request_channel);
|
EXPORT_SYMBOL_GPL(__dma_request_channel);
|
||||||
|
|
||||||
|
static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
|
||||||
|
const char *name,
|
||||||
|
struct device *dev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!device->filter.mapcnt)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
for (i = 0; i < device->filter.mapcnt; i++) {
|
||||||
|
const struct dma_slave_map *map = &device->filter.map[i];
|
||||||
|
|
||||||
|
if (!strcmp(map->devname, dev_name(dev)) &&
|
||||||
|
!strcmp(map->slave, name))
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_request_slave_channel_reason - try to allocate an exclusive slave channel
|
* dma_request_chan - try to allocate an exclusive slave channel
|
||||||
* @dev: pointer to client device structure
|
* @dev: pointer to client device structure
|
||||||
* @name: slave channel name
|
* @name: slave channel name
|
||||||
*
|
*
|
||||||
* Returns pointer to appropriate DMA channel on success or an error pointer.
|
* Returns pointer to appropriate DMA channel on success or an error pointer.
|
||||||
*/
|
*/
|
||||||
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
|
struct dma_chan *dma_request_chan(struct device *dev, const char *name)
|
||||||
const char *name)
|
|
||||||
{
|
{
|
||||||
|
struct dma_device *d, *_d;
|
||||||
|
struct dma_chan *chan = NULL;
|
||||||
|
|
||||||
/* If device-tree is present get slave info from here */
|
/* If device-tree is present get slave info from here */
|
||||||
if (dev->of_node)
|
if (dev->of_node)
|
||||||
return of_dma_request_slave_channel(dev->of_node, name);
|
chan = of_dma_request_slave_channel(dev->of_node, name);
|
||||||
|
|
||||||
/* If device was enumerated by ACPI get slave info from here */
|
/* If device was enumerated by ACPI get slave info from here */
|
||||||
if (ACPI_HANDLE(dev))
|
if (has_acpi_companion(dev) && !chan)
|
||||||
return acpi_dma_request_slave_chan_by_name(dev, name);
|
chan = acpi_dma_request_slave_chan_by_name(dev, name);
|
||||||
|
|
||||||
return ERR_PTR(-ENODEV);
|
if (chan) {
|
||||||
|
/* Valid channel found or requester need to be deferred */
|
||||||
|
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
|
||||||
|
return chan;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Try to find the channel via the DMA filter map(s) */
|
||||||
|
mutex_lock(&dma_list_mutex);
|
||||||
|
list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
|
||||||
|
dma_cap_mask_t mask;
|
||||||
|
const struct dma_slave_map *map = dma_filter_match(d, name, dev);
|
||||||
|
|
||||||
|
if (!map)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
dma_cap_zero(mask);
|
||||||
|
dma_cap_set(DMA_SLAVE, mask);
|
||||||
|
|
||||||
|
chan = find_candidate(d, &mask, d->filter.fn, map->param);
|
||||||
|
if (!IS_ERR(chan))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mutex_unlock(&dma_list_mutex);
|
||||||
|
|
||||||
|
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
|
EXPORT_SYMBOL_GPL(dma_request_chan);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_request_slave_channel - try to allocate an exclusive slave channel
|
* dma_request_slave_channel - try to allocate an exclusive slave channel
|
||||||
|
@ -697,17 +744,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
|
||||||
struct dma_chan *dma_request_slave_channel(struct device *dev,
|
struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
|
struct dma_chan *ch = dma_request_chan(dev, name);
|
||||||
if (IS_ERR(ch))
|
if (IS_ERR(ch))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
|
|
||||||
ch->device->privatecnt++;
|
|
||||||
|
|
||||||
return ch;
|
return ch;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
|
||||||
|
* @mask: capabilities that the channel must satisfy
|
||||||
|
*
|
||||||
|
* Returns pointer to appropriate DMA channel on success or an error pointer.
|
||||||
|
*/
|
||||||
|
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
|
||||||
|
{
|
||||||
|
struct dma_chan *chan;
|
||||||
|
|
||||||
|
if (!mask)
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
chan = __dma_request_channel(mask, NULL, NULL);
|
||||||
|
if (!chan)
|
||||||
|
chan = ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
return chan;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
|
||||||
|
|
||||||
void dma_release_channel(struct dma_chan *chan)
|
void dma_release_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
mutex_lock(&dma_list_mutex);
|
mutex_lock(&dma_list_mutex);
|
||||||
|
|
|
@ -606,12 +606,39 @@ enum dmaengine_alignment {
|
||||||
DMAENGINE_ALIGN_64_BYTES = 6,
|
DMAENGINE_ALIGN_64_BYTES = 6,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dma_slave_map - associates slave device and it's slave channel with
|
||||||
|
* parameter to be used by a filter function
|
||||||
|
* @devname: name of the device
|
||||||
|
* @slave: slave channel name
|
||||||
|
* @param: opaque parameter to pass to struct dma_filter.fn
|
||||||
|
*/
|
||||||
|
struct dma_slave_map {
|
||||||
|
const char *devname;
|
||||||
|
const char *slave;
|
||||||
|
void *param;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dma_filter - information for slave device/channel to filter_fn/param
|
||||||
|
* mapping
|
||||||
|
* @fn: filter function callback
|
||||||
|
* @mapcnt: number of slave device/channel in the map
|
||||||
|
* @map: array of channel to filter mapping data
|
||||||
|
*/
|
||||||
|
struct dma_filter {
|
||||||
|
dma_filter_fn fn;
|
||||||
|
int mapcnt;
|
||||||
|
const struct dma_slave_map *map;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct dma_device - info on the entity supplying DMA services
|
* struct dma_device - info on the entity supplying DMA services
|
||||||
* @chancnt: how many DMA channels are supported
|
* @chancnt: how many DMA channels are supported
|
||||||
* @privatecnt: how many DMA channels are requested by dma_request_channel
|
* @privatecnt: how many DMA channels are requested by dma_request_channel
|
||||||
* @channels: the list of struct dma_chan
|
* @channels: the list of struct dma_chan
|
||||||
* @global_node: list_head for global dma_device_list
|
* @global_node: list_head for global dma_device_list
|
||||||
|
* @filter: information for device/slave to filter function/param mapping
|
||||||
* @cap_mask: one or more dma_capability flags
|
* @cap_mask: one or more dma_capability flags
|
||||||
* @max_xor: maximum number of xor sources, 0 if no capability
|
* @max_xor: maximum number of xor sources, 0 if no capability
|
||||||
* @max_pq: maximum number of PQ sources and PQ-continue capability
|
* @max_pq: maximum number of PQ sources and PQ-continue capability
|
||||||
|
@ -666,6 +693,7 @@ struct dma_device {
|
||||||
unsigned int privatecnt;
|
unsigned int privatecnt;
|
||||||
struct list_head channels;
|
struct list_head channels;
|
||||||
struct list_head global_node;
|
struct list_head global_node;
|
||||||
|
struct dma_filter filter;
|
||||||
dma_cap_mask_t cap_mask;
|
dma_cap_mask_t cap_mask;
|
||||||
unsigned short max_xor;
|
unsigned short max_xor;
|
||||||
unsigned short max_pq;
|
unsigned short max_pq;
|
||||||
|
@ -1140,9 +1168,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
|
||||||
void dma_issue_pending_all(void);
|
void dma_issue_pending_all(void);
|
||||||
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||||
dma_filter_fn fn, void *fn_param);
|
dma_filter_fn fn, void *fn_param);
|
||||||
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
|
|
||||||
const char *name);
|
|
||||||
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
|
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
|
||||||
|
|
||||||
|
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||||
|
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
|
||||||
|
|
||||||
void dma_release_channel(struct dma_chan *chan);
|
void dma_release_channel(struct dma_chan *chan);
|
||||||
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
|
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
|
||||||
#else
|
#else
|
||||||
|
@ -1166,16 +1196,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
static inline struct dma_chan *dma_request_slave_channel_reason(
|
|
||||||
struct device *dev, const char *name)
|
|
||||||
{
|
|
||||||
return ERR_PTR(-ENODEV);
|
|
||||||
}
|
|
||||||
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
|
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
static inline struct dma_chan *dma_request_chan(struct device *dev,
|
||||||
|
const char *name)
|
||||||
|
{
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
}
|
||||||
|
static inline struct dma_chan *dma_request_chan_by_mask(
|
||||||
|
const dma_cap_mask_t *mask)
|
||||||
|
{
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
}
|
||||||
static inline void dma_release_channel(struct dma_chan *chan)
|
static inline void dma_release_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -1186,6 +1221,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
|
||||||
|
|
||||||
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
|
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
|
||||||
{
|
{
|
||||||
struct dma_slave_caps caps;
|
struct dma_slave_caps caps;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче