Merge branch 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop into fix

* 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop:
  async_tx: allow architecture specific async_tx_find_channel implementations
  async_tx: replace 'int_en' with operation preparation flags
  async_tx: kill tx_set_src and tx_set_dest methods
  async_tx: kill ASYNC_TX_ASSUME_COHERENT
  iop-adma: use LIST_HEAD instead of LIST_HEAD_INIT
  async_tx: use LIST_HEAD instead of LIST_HEAD_INIT
  async_tx: fix compile breakage, mark do_async_xor __always_inline
This commit is contained in:
Linus Torvalds 2008-02-06 11:16:11 -08:00
Родитель 8f1bfa4c5c 47437b2c9a
Коммит 3d4d4582e5
12 изменённых файлов: 259 добавлений и 261 удалений

Просмотреть файл

@ -35,7 +35,7 @@
* @src: src page * @src: src page
* @offset: offset in pages to start transaction * @offset: offset in pages to start transaction
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* @depend_tx: memcpy depends on the result of this transaction * @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes * @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
@ -46,33 +46,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param) dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY); struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
int int_en = cb_fn ? 1 : 0; struct dma_async_tx_descriptor *tx = NULL;
struct dma_async_tx_descriptor *tx = device ?
device->device_prep_dma_memcpy(chan, len,
int_en) : NULL;
if (tx) { /* run the memcpy asynchronously */ if (device) {
dma_addr_t addr; dma_addr_t dma_dest, dma_src;
enum dma_data_direction dir; unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
dma_src = dma_map_page(device->dev, src, src_offset, len,
DMA_TO_DEVICE);
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
len, dma_prep_flags);
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_FROM_DEVICE;
addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
tx->tx_set_dest(addr, tx, 0);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_TO_DEVICE;
addr = dma_map_page(device->dev, src, src_offset, len, dir);
tx->tx_set_src(addr, tx, 0);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memcpy synchronously */ } else {
void *dest_buf, *src_buf; void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);

Просмотреть файл

@ -35,7 +35,7 @@
* @val: fill value * @val: fill value
* @offset: offset in pages to start transaction * @offset: offset in pages to start transaction
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction * @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes * @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
@ -46,24 +46,24 @@ async_memset(struct page *dest, int val, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param) dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET); struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
&dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
int int_en = cb_fn ? 1 : 0; struct dma_async_tx_descriptor *tx = NULL;
struct dma_async_tx_descriptor *tx = device ?
device->device_prep_dma_memset(chan, val, len,
int_en) : NULL;
if (tx) { /* run the memset asynchronously */ if (device) {
dma_addr_t dma_addr; dma_addr_t dma_dest;
enum dma_data_direction dir; unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE);
tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
dma_prep_flags);
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_FROM_DEVICE;
dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
tx->tx_set_dest(dma_addr, tx, 0);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memset synchronously */ } else { /* run the memset synchronously */
void *dest_buf; void *dest_buf;

Просмотреть файл

@ -57,8 +57,7 @@ static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
*/ */
static spinlock_t async_tx_lock; static spinlock_t async_tx_lock;
static struct list_head static LIST_HEAD(async_tx_master_list);
async_tx_master_list = LIST_HEAD_INIT(async_tx_master_list);
/* async_tx_issue_pending_all - start all transactions on all channels */ /* async_tx_issue_pending_all - start all transactions on all channels */
void async_tx_issue_pending_all(void) void async_tx_issue_pending_all(void)
@ -362,13 +361,13 @@ static void __exit async_tx_exit(void)
} }
/** /**
* async_tx_find_channel - find a channel to carry out the operation or let * __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously * the transaction execute synchronously
* @depend_tx: transaction dependency * @depend_tx: transaction dependency
* @tx_type: transaction type * @tx_type: transaction type
*/ */
struct dma_chan * struct dma_chan *
async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type) enum dma_transaction_type tx_type)
{ {
/* see if we can keep the chain on one channel */ /* see if we can keep the chain on one channel */
@ -384,7 +383,7 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
} else } else
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(async_tx_find_channel); EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else #else
static int __init async_tx_init(void) static int __init async_tx_init(void)
{ {

Просмотреть файл

@ -30,35 +30,51 @@
#include <linux/raid/xor.h> #include <linux/raid/xor.h>
#include <linux/async_tx.h> #include <linux/async_tx.h>
static void /* do_async_xor - dma map the pages and perform the xor with an engine.
do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device, * This routine is marked __always_inline so it can be compiled away
* when CONFIG_DMA_ENGINE=n
*/
static __always_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_device *device,
struct dma_chan *chan, struct page *dest, struct page **src_list, struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, unsigned int src_cnt, size_t len, unsigned int offset, unsigned int src_cnt, size_t len,
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param) dma_async_tx_callback cb_fn, void *cb_param)
{ {
dma_addr_t dma_addr; dma_addr_t dma_dest;
enum dma_data_direction dir; dma_addr_t *dma_src = (dma_addr_t *) src_list;
struct dma_async_tx_descriptor *tx;
int i; int i;
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
pr_debug("%s: len: %zu\n", __FUNCTION__, len); pr_debug("%s: len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_NONE : DMA_FROM_DEVICE; DMA_FROM_DEVICE);
dma_addr = dma_map_page(device->dev, dest, offset, len, dir); for (i = 0; i < src_cnt; i++)
tx->tx_set_dest(dma_addr, tx, 0); dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
len, DMA_TO_DEVICE);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? /* Since we have clobbered the src_list we are committed
DMA_NONE : DMA_TO_DEVICE; * to doing this asynchronously. Drivers force forward progress
* in case they can not provide a descriptor
*/
tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
dma_prep_flags);
if (!tx) {
if (depend_tx)
dma_wait_for_async_tx(depend_tx);
for (i = 0; i < src_cnt; i++) { while (!tx)
dma_addr = dma_map_page(device->dev, src_list[i], tx = device->device_prep_dma_xor(chan, dma_dest,
offset, len, dir); dma_src, src_cnt, len,
tx->tx_set_src(dma_addr, tx, i); dma_prep_flags);
} }
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
return tx;
} }
static void static void
@ -102,7 +118,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
* @src_cnt: number of source pages * @src_cnt: number of source pages
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
* ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction. * @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes * @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
@ -113,14 +129,16 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param) dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR); struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
dma_async_tx_callback _cb_fn; dma_async_tx_callback _cb_fn;
void *_cb_param; void *_cb_param;
unsigned long local_flags; unsigned long local_flags;
int xor_src_cnt; int xor_src_cnt;
int i = 0, src_off = 0, int_en; int i = 0, src_off = 0;
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
@ -140,20 +158,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
_cb_param = cb_param; _cb_param = cb_param;
} }
int_en = _cb_fn ? 1 : 0; tx = do_async_xor(device, chan, dest,
&src_list[src_off], offset,
tx = device->device_prep_dma_xor( xor_src_cnt, len, local_flags,
chan, xor_src_cnt, len, int_en); depend_tx, _cb_fn, _cb_param);
if (tx) {
do_async_xor(tx, device, chan, dest,
&src_list[src_off], offset, xor_src_cnt, len,
local_flags, depend_tx, _cb_fn,
_cb_param);
} else /* fall through */
goto xor_sync;
} else { /* run the xor synchronously */ } else { /* run the xor synchronously */
xor_sync:
/* in the sync case the dest is an implied source /* in the sync case the dest is an implied source
* (assumes the dest is at the src_off index) * (assumes the dest is at the src_off index)
*/ */
@ -242,7 +251,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
* @src_cnt: number of source pages * @src_cnt: number of source pages
* @len: length in bytes * @len: length in bytes
* @result: 0 if sum == 0 else non-zero * @result: 0 if sum == 0 else non-zero
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction. * @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes * @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
@ -254,29 +263,36 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param) dma_async_tx_callback cb_fn, void *cb_param)
{ {
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM); struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
&dest, 1, src_list,
src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
int int_en = cb_fn ? 1 : 0; struct dma_async_tx_descriptor *tx = NULL;
struct dma_async_tx_descriptor *tx = device ?
device->device_prep_dma_zero_sum(chan, src_cnt, len, result,
int_en) : NULL;
int i;
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (tx) { if (device) {
dma_addr_t dma_addr; dma_addr_t *dma_src = (dma_addr_t *) src_list;
enum dma_data_direction dir; unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
int i;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? for (i = 0; i < src_cnt; i++)
DMA_NONE : DMA_TO_DEVICE; dma_src[i] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
for (i = 0; i < src_cnt; i++) { tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
dma_addr = dma_map_page(device->dev, src_list[i], len, result,
offset, len, dir); dma_prep_flags);
tx->tx_set_src(dma_addr, tx, i); if (!tx) {
if (depend_tx)
dma_wait_for_async_tx(depend_tx);
while (!tx)
tx = device->device_prep_dma_zero_sum(chan,
dma_src, src_cnt, len, result,
dma_prep_flags);
} }
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@ -311,6 +327,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum);
static int __init async_xor_init(void) static int __init async_xor_init(void)
{ {
#ifdef CONFIG_DMA_ENGINE
/* To conserve stack space the input src_list (array of page pointers)
* is reused to hold the array of dma addresses passed to the driver.
* This conversion is only possible when dma_addr_t is less than the
* the size of a pointer. HIGHMEM64G is known to violate this
* assumption.
*/
BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
#endif
return 0; return 0;
} }

Просмотреть файл

@ -5,6 +5,7 @@
menuconfig DMADEVICES menuconfig DMADEVICES
bool "DMA Engine support" bool "DMA Engine support"
depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
depends on !HIGHMEM64G
help help
DMA engines can do asynchronous data transfers without DMA engines can do asynchronous data transfers without
involving the host CPU. Currently, this framework can be involving the host CPU. Currently, this framework can be

Просмотреть файл

@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
{ {
struct dma_device *dev = chan->device; struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
dma_addr_t addr; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int cpu; int cpu;
tx = dev->device_prep_dma_memcpy(chan, len, 0); dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
if (!tx) dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM; return -ENOMEM;
}
tx->ack = 1; tx->ack = 1;
tx->callback = NULL; tx->callback = NULL;
addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
tx->tx_set_src(addr, tx, 0);
addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx); cookie = tx->tx_submit(tx);
cpu = get_cpu(); cpu = get_cpu();
@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
{ {
struct dma_device *dev = chan->device; struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
dma_addr_t addr; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int cpu; int cpu;
tx = dev->device_prep_dma_memcpy(chan, len, 0); dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
if (!tx) dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM; return -ENOMEM;
}
tx->ack = 1; tx->ack = 1;
tx->callback = NULL; tx->callback = NULL;
addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
tx->tx_set_src(addr, tx, 0);
addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx); cookie = tx->tx_submit(tx);
cpu = get_cpu(); cpu = get_cpu();
@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{ {
struct dma_device *dev = chan->device; struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
dma_addr_t addr; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int cpu; int cpu;
tx = dev->device_prep_dma_memcpy(chan, len, 0); dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
if (!tx) dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
if (!tx) {
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM; return -ENOMEM;
}
tx->ack = 1; tx->ack = 1;
tx->callback = NULL; tx->callback = NULL;
addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
tx->tx_set_src(addr, tx, 0);
addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx); cookie = tx->tx_submit(tx);
cpu = get_cpu(); cpu = get_cpu();

Просмотреть файл

@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
return device->common.chancnt; return device->common.chancnt;
} }
static void ioat_set_src(dma_addr_t addr,
struct dma_async_tx_descriptor *tx,
int index)
{
tx_to_ioat_desc(tx)->src = addr;
}
static void ioat_set_dest(dma_addr_t addr,
struct dma_async_tx_descriptor *tx,
int index)
{
tx_to_ioat_desc(tx)->dst = addr;
}
/** /**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw * descriptors to hw
@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
memset(desc, 0, sizeof(*desc)); memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
desc_sw->async_tx.tx_set_src = ioat_set_src;
desc_sw->async_tx.tx_set_dest = ioat_set_dest;
switch (ioat_chan->device->version) { switch (ioat_chan->device->version) {
case IOAT_VER_1_2: case IOAT_VER_1_2:
desc_sw->async_tx.tx_submit = ioat1_tx_submit; desc_sw->async_tx.tx_submit = ioat1_tx_submit;
@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
struct dma_chan *chan, struct dma_chan *chan,
dma_addr_t dma_dest,
dma_addr_t dma_src,
size_t len, size_t len,
int int_en) unsigned long flags)
{ {
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new; struct ioat_desc_sw *new;
@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
if (new) { if (new) {
new->len = len; new->len = len;
new->dst = dma_dest;
new->src = dma_src;
return &new->async_tx; return &new->async_tx;
} else } else
return NULL; return NULL;
@ -733,8 +721,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
struct dma_chan *chan, struct dma_chan *chan,
dma_addr_t dma_dest,
dma_addr_t dma_src,
size_t len, size_t len,
int int_en) unsigned long flags)
{ {
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new; struct ioat_desc_sw *new;
@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
if (new) { if (new) {
new->len = len; new->len = len;
new->dst = dma_dest;
new->src = dma_src;
return &new->async_tx; return &new->async_tx;
} else } else
return NULL; return NULL;
@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
u8 *dest; u8 *dest;
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
dma_addr_t addr; dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie; dma_cookie_t cookie;
int err = 0; int err = 0;
@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto out; goto out;
} }
tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
DMA_TO_DEVICE);
dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
DMA_FROM_DEVICE);
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, 0);
if (!tx) { if (!tx) {
dev_err(&device->pdev->dev, dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n"); "Self-test prep failed, disabling\n");
@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
} }
async_tx_ack(tx); async_tx_ack(tx);
addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
DMA_TO_DEVICE);
tx->tx_set_src(addr, tx, 0);
addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
DMA_FROM_DEVICE);
tx->tx_set_dest(addr, tx, 0);
tx->callback = ioat_dma_test_callback; tx->callback = ioat_dma_test_callback;
tx->callback_param = (void *)0x8086; tx->callback_param = (void *)0x8086;
cookie = tx->tx_submit(tx); cookie = tx->tx_submit(tx);

Просмотреть файл

@ -284,7 +284,7 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
int slots_per_op) int slots_per_op)
{ {
struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
struct list_head chain = LIST_HEAD_INIT(chain); LIST_HEAD(chain);
int slots_found, retry = 0; int slots_found, retry = 0;
/* start search from the last allocated descrtiptor /* start search from the last allocated descrtiptor
@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
return cookie; return cookie;
} }
static void
iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
int index)
{
struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
/* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
}
static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan); dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = iop_adma_tx_submit; slot->async_tx.tx_submit = iop_adma_tx_submit;
slot->async_tx.tx_set_dest = iop_adma_set_dest;
INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->async_tx.tx_list); INIT_LIST_HEAD(&slot->async_tx.tx_list);
@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
return sw_desc ? &sw_desc->async_tx : NULL; return sw_desc ? &sw_desc->async_tx : NULL;
} }
static void
iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
int index)
{
struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
iop_desc_set_memcpy_src_addr(grp_start, addr);
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en) iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{ {
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start; struct iop_adma_desc_slot *sw_desc, *grp_start;
@ -576,11 +555,12 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) { if (sw_desc) {
grp_start = sw_desc->group_head; grp_start = sw_desc->group_head;
iop_desc_init_memcpy(grp_start, int_en); iop_desc_init_memcpy(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
iop_desc_set_memcpy_src_addr(grp_start, dma_src);
sw_desc->unmap_src_cnt = 1; sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len; sw_desc->unmap_len = len;
sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
} }
spin_unlock_bh(&iop_chan->lock); spin_unlock_bh(&iop_chan->lock);
@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len, iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
int int_en) int value, size_t len, unsigned long flags)
{ {
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start; struct iop_adma_desc_slot *sw_desc, *grp_start;
@ -607,9 +587,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) { if (sw_desc) {
grp_start = sw_desc->group_head; grp_start = sw_desc->group_head;
iop_desc_init_memset(grp_start, int_en); iop_desc_init_memset(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_block_fill_val(grp_start, value); iop_desc_set_block_fill_val(grp_start, value);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = 1; sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len; sw_desc->unmap_len = len;
} }
@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
return sw_desc ? &sw_desc->async_tx : NULL; return sw_desc ? &sw_desc->async_tx : NULL;
} }
static void
iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
int index)
{
struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
iop_desc_set_xor_src_addr(grp_start, index, addr);
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len, iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
int int_en) dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
unsigned long flags)
{ {
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start; struct iop_adma_desc_slot *sw_desc, *grp_start;
@ -641,39 +613,32 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev, dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u int_en: %d\n", "%s src_cnt: %d len: %u flags: %lx\n",
__FUNCTION__, src_cnt, len, int_en); __FUNCTION__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock); spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) { if (sw_desc) {
grp_start = sw_desc->group_head; grp_start = sw_desc->group_head;
iop_desc_init_xor(grp_start, src_cnt, int_en); iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len; sw_desc->unmap_len = len;
sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src; while (src_cnt--)
iop_desc_set_xor_src_addr(grp_start, src_cnt,
dma_src[src_cnt]);
} }
spin_unlock_bh(&iop_chan->lock); spin_unlock_bh(&iop_chan->lock);
return sw_desc ? &sw_desc->async_tx : NULL; return sw_desc ? &sw_desc->async_tx : NULL;
} }
static void
iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
struct dma_async_tx_descriptor *tx,
int index)
{
struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt, iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
size_t len, u32 *result, int int_en) unsigned int src_cnt, size_t len, u32 *result,
unsigned long flags)
{ {
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start; struct iop_adma_desc_slot *sw_desc, *grp_start;
@ -690,14 +655,16 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) { if (sw_desc) {
grp_start = sw_desc->group_head; grp_start = sw_desc->group_head;
iop_desc_init_zero_sum(grp_start, src_cnt, int_en); iop_desc_init_zero_sum(grp_start, src_cnt, flags);
iop_desc_set_zero_sum_byte_count(grp_start, len); iop_desc_set_zero_sum_byte_count(grp_start, len);
grp_start->xor_check_result = result; grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n", pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__FUNCTION__, grp_start->xor_check_result); __FUNCTION__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len; sw_desc->unmap_len = len;
sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src; while (src_cnt--)
iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
dma_src[src_cnt]);
} }
spin_unlock_bh(&iop_chan->lock); spin_unlock_bh(&iop_chan->lock);
@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
goto out; goto out;
} }
tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
dest_dma = dma_map_single(dma_chan->device->dev, dest, dest_dma = dma_map_single(dma_chan->device->dev, dest,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
iop_adma_set_dest(dest_dma, tx, 0);
src_dma = dma_map_single(dma_chan->device->dev, src, src_dma = dma_map_single(dma_chan->device->dev, src,
IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
iop_adma_memcpy_set_src(src_dma, tx, 0); tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
IOP_ADMA_TEST_SIZE, 1);
cookie = iop_adma_tx_submit(tx); cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
struct page *dest; struct page *dest;
struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
dma_addr_t dma_addr, dest_dma; dma_addr_t dma_addr, dest_dma;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
} }
/* test xor */ /* test xor */
tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
PAGE_SIZE, 1);
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
iop_adma_set_dest(dest_dma, tx, 0); for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { 0, PAGE_SIZE, DMA_TO_DEVICE);
dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
PAGE_SIZE, DMA_TO_DEVICE); IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
iop_adma_xor_set_src(dma_addr, tx, i);
}
cookie = iop_adma_tx_submit(tx); cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
zero_sum_result = 1; zero_sum_result = 1;
tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
PAGE_SIZE, &zero_sum_result, 1); dma_srcs[i] = dma_map_page(dma_chan->device->dev,
for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { zero_sum_srcs[i], 0, PAGE_SIZE,
dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], DMA_TO_DEVICE);
0, PAGE_SIZE, DMA_TO_DEVICE); tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
} &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx); cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
} }
/* test memset */ /* test memset */
tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
iop_adma_set_dest(dma_addr, tx, 0); tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx); cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
/* test for non-zero parity sum */ /* test for non-zero parity sum */
zero_sum_result = 0; zero_sum_result = 0;
tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1, for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
PAGE_SIZE, &zero_sum_result, 1); dma_srcs[i] = dma_map_page(dma_chan->device->dev,
for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) { zero_sum_srcs[i], 0, PAGE_SIZE,
dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], DMA_TO_DEVICE);
0, PAGE_SIZE, DMA_TO_DEVICE); tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
iop_adma_xor_zero_sum_set_src(dma_addr, tx, i); IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
} &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx); cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);

Просмотреть файл

@ -247,7 +247,7 @@ static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc,
} }
static inline void static inline void
iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
{ {
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union { union {
@ -257,13 +257,13 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0; u_desc_ctrl.value = 0;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0; hw_desc->crc_addr = 0;
} }
static inline void static inline void
iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
{ {
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union { union {
@ -274,14 +274,15 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0; u_desc_ctrl.value = 0;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.block_fill_en = 1; u_desc_ctrl.field.block_fill_en = 1;
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0; hw_desc->crc_addr = 0;
} }
/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ /* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
static inline void static inline void
iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{ {
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union { union {
@ -292,7 +293,7 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
u_desc_ctrl.value = 0; u_desc_ctrl.value = 0;
u_desc_ctrl.field.src_select = src_cnt - 1; u_desc_ctrl.field.src_select = src_cnt - 1;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0; hw_desc->crc_addr = 0;
@ -301,7 +302,8 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ /* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
static inline int static inline int
iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{ {
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union { union {
@ -314,7 +316,7 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.zero_result = 1; u_desc_ctrl.field.zero_result = 1;
u_desc_ctrl.field.status_write_back_en = 1; u_desc_ctrl.field.status_write_back_en = 1;
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0; hw_desc->crc_addr = 0;

Просмотреть файл

@ -414,7 +414,7 @@ static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
} }
static inline void static inline void
iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
{ {
struct iop3xx_desc_dma *hw_desc = desc->hw_desc; struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
union { union {
@ -425,14 +425,14 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0; u_desc_ctrl.value = 0;
u_desc_ctrl.field.mem_to_mem_en = 1; u_desc_ctrl.field.mem_to_mem_en = 1;
u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->upper_pci_src_addr = 0; hw_desc->upper_pci_src_addr = 0;
hw_desc->crc_addr = 0; hw_desc->crc_addr = 0;
} }
static inline void static inline void
iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
{ {
struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
union { union {
@ -443,12 +443,13 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0; u_desc_ctrl.value = 0;
u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
u_desc_ctrl.field.dest_write_en = 1; u_desc_ctrl.field.dest_write_en = 1;
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
} }
static inline u32 static inline u32
iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en) iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
unsigned long flags)
{ {
int i, shift; int i, shift;
u32 edcr; u32 edcr;
@ -509,21 +510,23 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
u_desc_ctrl.field.dest_write_en = 1; u_desc_ctrl.field.dest_write_en = 1;
u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
return u_desc_ctrl.value; return u_desc_ctrl.value;
} }
static inline void static inline void
iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{ {
iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en); iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
} }
/* return the number of operations */ /* return the number of operations */
static inline int static inline int
iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{ {
int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
@ -538,10 +541,10 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
i += slots_per_op, j++) { i += slots_per_op, j++) {
iter = iop_hw_desc_slot_idx(hw_desc, i); iter = iop_hw_desc_slot_idx(hw_desc, i);
u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en); u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
u_desc_ctrl.field.dest_write_en = 0; u_desc_ctrl.field.dest_write_en = 0;
u_desc_ctrl.field.zero_result_en = 1; u_desc_ctrl.field.zero_result_en = 1;
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
iter->desc_ctrl = u_desc_ctrl.value; iter->desc_ctrl = u_desc_ctrl.value;
/* for the subsequent descriptors preserve the store queue /* for the subsequent descriptors preserve the store queue
@ -559,7 +562,8 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
} }
static inline void static inline void
iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)
{ {
struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
union { union {
@ -591,7 +595,7 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
} }
u_desc_ctrl.field.dest_write_en = 0; u_desc_ctrl.field.dest_write_en = 0;
u_desc_ctrl.field.int_en = int_en; u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value; hw_desc->desc_ctrl = u_desc_ctrl.value;
} }

Просмотреть файл

@ -47,7 +47,6 @@ struct dma_chan_ref {
* address is an implied source, whereas the asynchronous case it must be listed * address is an implied source, whereas the asynchronous case it must be listed
* as a source. The destination address must be the first address in the source * as a source. The destination address must be the first address in the source
* array. * array.
* @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations
* @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
* dependency chain * dependency chain
* @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
@ -55,7 +54,6 @@ struct dma_chan_ref {
enum async_tx_flags { enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1), ASYNC_TX_XOR_DROP_DST = (1 << 1),
ASYNC_TX_ASSUME_COHERENT = (1 << 2),
ASYNC_TX_ACK = (1 << 3), ASYNC_TX_ACK = (1 << 3),
ASYNC_TX_DEP_ACK = (1 << 4), ASYNC_TX_DEP_ACK = (1 << 4),
}; };
@ -64,9 +62,15 @@ enum async_tx_flags {
void async_tx_issue_pending_all(void); void async_tx_issue_pending_all(void);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
#include <asm/async_tx.h>
#else
#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
__async_tx_find_channel(dep, type)
struct dma_chan * struct dma_chan *
async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type); enum dma_transaction_type tx_type);
#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
#else #else
static inline void async_tx_issue_pending_all(void) static inline void async_tx_issue_pending_all(void)
{ {
@ -88,7 +92,8 @@ async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
static inline struct dma_chan * static inline struct dma_chan *
async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type) enum dma_transaction_type tx_type, struct page **dst, int dst_count,
struct page **src, int src_count, size_t len)
{ {
return NULL; return NULL;
} }

Просмотреть файл

@ -94,6 +94,15 @@ enum dma_transaction_type {
/* last transaction type for creation of the capabilities mask */ /* last transaction type for creation of the capabilities mask */
#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
/**
* enum dma_prep_flags - DMA flags to augment operation preparation
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
*/
enum dma_prep_flags {
DMA_PREP_INTERRUPT = (1 << 0),
};
/** /**
* dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
* See linux/cpumask.h * See linux/cpumask.h
@ -209,8 +218,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* descriptors * descriptors
* @chan: target channel for this operation * @chan: target channel for this operation
* @tx_submit: set the prepared descriptor(s) to be executed by the engine * @tx_submit: set the prepared descriptor(s) to be executed by the engine
* @tx_set_dest: set a destination address in a hardware descriptor
* @tx_set_src: set a source address in a hardware descriptor
* @callback: routine to call after this operation is complete * @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine * @callback_param: general parameter to pass to the callback routine
* ---async_tx api specific fields--- * ---async_tx api specific fields---
@ -227,10 +234,6 @@ struct dma_async_tx_descriptor {
struct list_head tx_list; struct list_head tx_list;
struct dma_chan *chan; struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
void (*tx_set_dest)(dma_addr_t addr,
struct dma_async_tx_descriptor *tx, int index);
void (*tx_set_src)(dma_addr_t addr,
struct dma_async_tx_descriptor *tx, int index);
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
struct list_head depend_list; struct list_head depend_list;
@ -279,15 +282,17 @@ struct dma_device {
void (*device_free_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
struct dma_chan *chan, size_t len, int int_en); struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)( struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
struct dma_chan *chan, unsigned int src_cnt, size_t len, struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
int int_en); unsigned int src_cnt, size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
struct dma_chan *chan, unsigned int src_cnt, size_t len, struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
u32 *result, int int_en); size_t len, u32 *result, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
struct dma_chan *chan, int value, size_t len, int int_en); struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan); struct dma_chan *chan);