2012-04-05 00:58:27 +04:00
|
|
|
/*
|
|
|
|
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/atomic.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <gxio/dma_queue.h>
|
|
|
|
|
|
|
|
/* Wait for a memory read to complete. */
|
|
|
|
#define wait_for_value(val) \
|
|
|
|
__asm__ __volatile__("move %0, %0" :: "r"(val))
|
|
|
|
|
|
|
|
/* The index is in the low 16. */
|
|
|
|
#define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The hardware descriptor-ring type.
|
|
|
|
* This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
|
|
|
|
* and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
|
|
|
|
* See those types for more documentation on the individual fields.
|
|
|
|
*/
|
|
|
|
typedef union {
|
|
|
|
struct {
|
|
|
|
#ifndef __BIG_ENDIAN__
|
|
|
|
uint64_t ring_idx:16;
|
|
|
|
uint64_t count:16;
|
|
|
|
uint64_t gen:1;
|
|
|
|
uint64_t __reserved:31;
|
|
|
|
#else
|
|
|
|
uint64_t __reserved:31;
|
|
|
|
uint64_t gen:1;
|
|
|
|
uint64_t count:16;
|
|
|
|
uint64_t ring_idx:16;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
uint64_t word;
|
|
|
|
} __gxio_ring_t;
|
|
|
|
|
|
|
|
void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
|
|
|
|
void *post_region_addr, unsigned int num_entries)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Limit 65536 entry rings to 65535 credits because we only have a
|
|
|
|
* 16 bit completion counter.
|
|
|
|
*/
|
|
|
|
int64_t credits = (num_entries < 65536) ? num_entries : 65535;
|
|
|
|
|
|
|
|
memset(dma_queue, 0, sizeof(*dma_queue));
|
|
|
|
|
|
|
|
dma_queue->post_region_addr = post_region_addr;
|
|
|
|
dma_queue->hw_complete_count = 0;
|
|
|
|
dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);
|
|
|
|
|
|
|
|
void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
|
|
|
|
{
|
|
|
|
__gxio_ring_t val;
|
|
|
|
uint64_t count;
|
|
|
|
uint64_t delta;
|
|
|
|
uint64_t new_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the 64-bit completion count without touching the cache, so
|
|
|
|
* we later avoid having to evict any sharers of this cache line
|
|
|
|
* when we update it below.
|
|
|
|
*/
|
|
|
|
uint64_t orig_hw_complete_count =
|
|
|
|
cmpxchg(&dma_queue->hw_complete_count,
|
|
|
|
-1, -1);
|
|
|
|
|
|
|
|
/* Make sure the load completes before we access the hardware. */
|
|
|
|
wait_for_value(orig_hw_complete_count);
|
|
|
|
|
|
|
|
/* Read the 16-bit count of how many packets it has completed. */
|
|
|
|
val.word = __gxio_mmio_read(dma_queue->post_region_addr);
|
|
|
|
count = val.count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the number of completions since we last updated the
|
|
|
|
* 64-bit counter. It's safe to ignore the high bits because the
|
|
|
|
* maximum credit value is 65535.
|
|
|
|
*/
|
|
|
|
delta = (count - orig_hw_complete_count) & 0xffff;
|
|
|
|
if (delta == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to write back the count, advanced by delta. If we race with
|
|
|
|
* another thread, this might fail, in which case we return
|
|
|
|
* immediately on the assumption that some credits are (or at least
|
|
|
|
* were) available.
|
|
|
|
*/
|
|
|
|
new_count = orig_hw_complete_count + delta;
|
|
|
|
if (cmpxchg(&dma_queue->hw_complete_count,
|
|
|
|
orig_hw_complete_count,
|
|
|
|
new_count) != orig_hw_complete_count)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We succeeded in advancing the completion count; add back the
|
|
|
|
* corresponding number of egress credits.
|
|
|
|
*/
|
|
|
|
__insn_fetchadd(&dma_queue->credits_and_next_index,
|
|
|
|
(delta << DMA_QUEUE_CREDIT_SHIFT));
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A separate 'blocked' method for put() so that backtraces and
|
|
|
|
* profiles will clearly indicate that we're wasting time spinning on
|
|
|
|
* egress availability rather than actually posting commands.
|
|
|
|
*/
|
|
|
|
int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
|
|
|
|
int64_t modifier)
|
|
|
|
{
|
|
|
|
int backoff = 16;
|
|
|
|
int64_t old;
|
|
|
|
|
|
|
|
do {
|
|
|
|
int i;
|
|
|
|
/* Back off to avoid spamming memory networks. */
|
|
|
|
for (i = backoff; i > 0; i--)
|
|
|
|
__insn_mfspr(SPR_PASS);
|
|
|
|
|
|
|
|
/* Check credits again. */
|
|
|
|
__gxio_dma_queue_update_credits(dma_queue);
|
|
|
|
old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
|
|
|
|
modifier);
|
|
|
|
|
|
|
|
/* Calculate bounded exponential backoff for next iteration. */
|
|
|
|
if (backoff < 256)
|
|
|
|
backoff *= 2;
|
|
|
|
} while (old + modifier < 0);
|
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);
|
|
|
|
|
|
|
|
int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
|
|
|
|
unsigned int num, int wait)
|
|
|
|
{
|
|
|
|
return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);
|
|
|
|
|
|
|
|
int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
|
|
|
|
int64_t completion_slot, int update)
|
|
|
|
{
|
|
|
|
if (update) {
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
|
|
|
if (READ_ONCE(dma_queue->hw_complete_count) >
|
2012-04-05 00:58:27 +04:00
|
|
|
completion_slot)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
__gxio_dma_queue_update_credits(dma_queue);
|
|
|
|
}
|
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:29 +03:00
|
|
|
return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
|
2012-04-05 00:58:27 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
|