2017-11-14 20:38:02 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-07-17 19:16:48 +04:00
|
|
|
/*
|
|
|
|
* Linux for s390 qdio support, buffer handling, qdio API and module support.
|
|
|
|
*
|
2012-07-20 13:15:04 +04:00
|
|
|
* Copyright IBM Corp. 2000, 2008
|
2008-07-17 19:16:48 +04:00
|
|
|
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
|
|
|
|
* Jan Glauber <jang@linux.vnet.ibm.com>
|
|
|
|
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/delay.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/gfp.h>
|
2011-08-08 05:33:55 +04:00
|
|
|
#include <linux/io.h>
|
2011-07-27 03:09:06 +04:00
|
|
|
#include <linux/atomic.h>
|
2008-07-17 19:16:48 +04:00
|
|
|
#include <asm/debug.h>
|
|
|
|
#include <asm/qdio.h>
|
2012-03-11 19:59:32 +04:00
|
|
|
#include <asm/ipl.h>
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
#include "cio.h"
|
|
|
|
#include "css.h"
|
|
|
|
#include "device.h"
|
|
|
|
#include "qdio.h"
|
|
|
|
#include "qdio_debug.h"
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
|
|
|
|
"Jan Glauber <jang@linux.vnet.ibm.com>");
|
|
|
|
MODULE_DESCRIPTION("QDIO base support");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
2011-01-05 14:47:52 +03:00
|
|
|
static inline int do_siga_sync(unsigned long schid,
|
2021-06-22 16:26:16 +03:00
|
|
|
unsigned long out_mask, unsigned long in_mask,
|
2011-01-05 14:47:52 +03:00
|
|
|
unsigned int fc)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
int cc;
|
|
|
|
|
|
|
|
asm volatile(
|
2021-06-22 16:26:16 +03:00
|
|
|
" lgr 0,%[fc]\n"
|
|
|
|
" lgr 1,%[schid]\n"
|
|
|
|
" lgr 2,%[out]\n"
|
|
|
|
" lgr 3,%[in]\n"
|
2008-07-17 19:16:48 +04:00
|
|
|
" siga 0\n"
|
2021-06-22 16:26:16 +03:00
|
|
|
" ipm %[cc]\n"
|
|
|
|
" srl %[cc],28\n"
|
|
|
|
: [cc] "=&d" (cc)
|
|
|
|
: [fc] "d" (fc), [schid] "d" (schid),
|
|
|
|
[out] "d" (out_mask), [in] "d" (in_mask)
|
|
|
|
: "cc", "0", "1", "2", "3");
|
2008-07-17 19:16:48 +04:00
|
|
|
return cc;
|
|
|
|
}
|
|
|
|
|
2021-06-22 16:26:16 +03:00
|
|
|
static inline int do_siga_input(unsigned long schid, unsigned long mask,
|
|
|
|
unsigned long fc)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
int cc;
|
|
|
|
|
|
|
|
asm volatile(
|
2021-06-22 16:26:16 +03:00
|
|
|
" lgr 0,%[fc]\n"
|
|
|
|
" lgr 1,%[schid]\n"
|
|
|
|
" lgr 2,%[mask]\n"
|
2008-07-17 19:16:48 +04:00
|
|
|
" siga 0\n"
|
2021-06-22 16:26:16 +03:00
|
|
|
" ipm %[cc]\n"
|
|
|
|
" srl %[cc],28\n"
|
|
|
|
: [cc] "=&d" (cc)
|
|
|
|
: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
|
|
|
|
: "cc", "0", "1", "2");
|
2008-07-17 19:16:48 +04:00
|
|
|
return cc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do_siga_output - perform SIGA-w/wt function
|
|
|
|
* @schid: subchannel id or in case of QEBSM the subchannel token
|
|
|
|
* @mask: which output queues to process
|
|
|
|
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
|
|
|
|
* @fc: function code to perform
|
2018-01-29 14:55:29 +03:00
|
|
|
* @aob: asynchronous operation block
|
2008-07-17 19:16:48 +04:00
|
|
|
*
|
2012-05-09 18:27:34 +04:00
|
|
|
* Returns condition code.
|
2008-07-17 19:16:48 +04:00
|
|
|
* Note: For IQDC unicast queues only the highest priority queue is processed.
|
|
|
|
*/
|
|
|
|
static inline int do_siga_output(unsigned long schid, unsigned long mask,
|
2021-06-22 16:26:16 +03:00
|
|
|
unsigned int *bb, unsigned long fc,
|
2011-08-08 05:33:55 +04:00
|
|
|
unsigned long aob)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2012-05-09 18:27:34 +04:00
|
|
|
int cc;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
asm volatile(
|
2021-06-22 16:26:16 +03:00
|
|
|
" lgr 0,%[fc]\n"
|
|
|
|
" lgr 1,%[schid]\n"
|
|
|
|
" lgr 2,%[mask]\n"
|
|
|
|
" lgr 3,%[aob]\n"
|
2008-07-17 19:16:48 +04:00
|
|
|
" siga 0\n"
|
2021-06-22 16:26:16 +03:00
|
|
|
" lgr %[fc],0\n"
|
|
|
|
" ipm %[cc]\n"
|
|
|
|
" srl %[cc],28\n"
|
|
|
|
: [cc] "=&d" (cc), [fc] "+&d" (fc)
|
|
|
|
: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
|
|
|
|
: "cc", "0", "1", "2", "3");
|
|
|
|
*bb = fc >> 31;
|
2008-07-17 19:16:48 +04:00
|
|
|
return cc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_do_eqbs - extract buffer states for QEBSM
|
|
|
|
* @q: queue to manipulate
|
|
|
|
* @state: state of the extracted buffers
|
|
|
|
* @start: buffer number to start at
|
|
|
|
* @count: count of buffers to examine
|
2008-12-25 15:38:47 +03:00
|
|
|
* @auto_ack: automatically acknowledge buffers
|
2008-07-17 19:16:48 +04:00
|
|
|
*
|
2009-01-08 05:09:16 +03:00
|
|
|
* Returns the number of successfully extracted equal buffer states.
|
2008-07-17 19:16:48 +04:00
|
|
|
* Stops processing if a state is different from the last buffers state.
|
|
|
|
*/
|
|
|
|
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
|
2008-12-25 15:38:47 +03:00
|
|
|
int start, int count, int auto_ack)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2018-03-06 19:58:49 +03:00
|
|
|
int tmp_count = count, tmp_start = start, nr = q->nr;
|
2008-07-17 19:16:48 +04:00
|
|
|
unsigned int ccq = 0;
|
|
|
|
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, eqbs);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (!q->is_input_q)
|
|
|
|
nr += q->irq_ptr->nr_input_qs;
|
|
|
|
again:
|
2008-12-25 15:38:47 +03:00
|
|
|
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
|
|
|
|
auto_ack);
|
2008-12-25 15:38:46 +03:00
|
|
|
|
2018-03-06 19:58:49 +03:00
|
|
|
switch (ccq) {
|
|
|
|
case 0:
|
|
|
|
case 32:
|
|
|
|
/* all done, or next buffer state different */
|
|
|
|
return count - tmp_count;
|
|
|
|
case 96:
|
|
|
|
/* not all buffers processed */
|
2011-10-30 18:17:06 +04:00
|
|
|
qperf_inc(q, eqbs_partial);
|
2019-08-19 18:20:05 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
|
2011-10-30 18:17:06 +04:00
|
|
|
tmp_count);
|
s390/qdio: don't retry EQBS after CCQ 96
Immediate retry of EQBS after CCQ 96 means that we potentially misreport
the state of buffers inspected during the first EQBS call.
This occurs when
1. the first EQBS finds all inspected buffers still in the initial state
set by the driver (ie INPUT EMPTY or OUTPUT PRIMED),
2. the EQBS terminates early with CCQ 96, and
3. by the time that the second EQBS comes around, the state of those
previously inspected buffers has changed.
If the state reported by the second EQBS is 'driver-owned', all we know
is that the previous buffers are driver-owned now as well. But we can't
tell if they all have the same state. So for instance
- the second EQBS reports OUTPUT EMPTY, but any number of the previous
buffers could be OUTPUT ERROR by now,
- the second EQBS reports OUTPUT ERROR, but any number of the previous
buffers could be OUTPUT EMPTY by now.
Effectively, this can result in both over- and underreporting of errors.
If the state reported by the second EQBS is 'HW-owned', that doesn't
guarantee that the previous buffers have not been switched to
driver-owned in the mean time. So for instance
- the second EQBS reports INPUT EMPTY, but any number of the previous
buffers could be INPUT PRIMED (or INPUT ERROR) by now.
This would result in failure to process pending work on the queue. If
it's the final check before yielding initiative, this can cause
a (temporary) queue stall due to IRQ avoidance.
Fixes: 25f269f17316 ("[S390] qdio: EQBS retry after CCQ 96")
Cc: <stable@vger.kernel.org> #v3.2+
Signed-off-by: Julian Wiedmann <jwi@linux.vnet.ibm.com>
Reviewed-by: Benjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2018-03-05 11:39:38 +03:00
|
|
|
return count - tmp_count;
|
2018-03-06 19:58:49 +03:00
|
|
|
case 97:
|
|
|
|
/* no buffer processed */
|
|
|
|
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
|
|
|
|
goto again;
|
|
|
|
default:
|
|
|
|
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
|
|
|
|
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
|
|
|
|
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
|
|
|
|
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
|
2020-05-07 11:21:53 +03:00
|
|
|
q->first_to_check, count, q->irq_ptr->int_parm);
|
2018-03-06 19:58:49 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_do_sqbs - set buffer states for QEBSM
|
|
|
|
* @q: queue to manipulate
|
|
|
|
* @state: new state of the buffers
|
|
|
|
* @start: first buffer number to change
|
|
|
|
* @count: how many buffers to change
|
|
|
|
*
|
|
|
|
* Returns the number of successfully changed buffers.
|
|
|
|
* Does retrying until the specified count of buffer states is set or an
|
|
|
|
* error occurs.
|
|
|
|
*/
|
|
|
|
static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
|
|
|
|
int count)
|
|
|
|
{
|
|
|
|
unsigned int ccq = 0;
|
|
|
|
int tmp_count = count, tmp_start = start;
|
|
|
|
int nr = q->nr;
|
|
|
|
|
2008-12-25 15:38:47 +03:00
|
|
|
if (!count)
|
|
|
|
return 0;
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, sqbs);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (!q->is_input_q)
|
|
|
|
nr += q->irq_ptr->nr_input_qs;
|
|
|
|
again:
|
|
|
|
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
|
2018-03-06 19:58:49 +03:00
|
|
|
|
|
|
|
switch (ccq) {
|
|
|
|
case 0:
|
|
|
|
case 32:
|
|
|
|
/* all done, or active buffer adapter-owned */
|
2012-10-24 14:38:35 +04:00
|
|
|
WARN_ON_ONCE(tmp_count);
|
2011-10-30 18:17:06 +04:00
|
|
|
return count - tmp_count;
|
2018-03-06 19:58:49 +03:00
|
|
|
case 96:
|
|
|
|
/* not all buffers processed */
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, sqbs_partial);
|
2008-07-17 19:16:48 +04:00
|
|
|
goto again;
|
2018-03-06 19:58:49 +03:00
|
|
|
default:
|
|
|
|
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
|
|
|
|
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
|
|
|
|
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
|
|
|
|
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
|
2020-05-07 11:21:53 +03:00
|
|
|
q->first_to_check, count, q->irq_ptr->int_parm);
|
2018-03-06 19:58:49 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-07 16:01:01 +03:00
|
|
|
/*
|
|
|
|
* Returns number of examined buffers and their common state in *state.
|
|
|
|
* Requested number of buffers-to-examine must be > 0.
|
|
|
|
*/
|
2008-07-17 19:16:48 +04:00
|
|
|
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
|
2008-12-25 15:38:47 +03:00
|
|
|
unsigned char *state, unsigned int count,
|
2021-01-30 15:22:56 +03:00
|
|
|
int auto_ack)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
unsigned char __state = 0;
|
2019-04-26 10:37:41 +03:00
|
|
|
int i = 1;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (is_qebsm(q))
|
2008-12-25 15:38:47 +03:00
|
|
|
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2018-03-07 16:01:01 +03:00
|
|
|
/* get initial state: */
|
|
|
|
__state = q->slsb.val[bufnr];
|
2019-04-26 10:37:41 +03:00
|
|
|
|
|
|
|
/* Bail out early if there is no work on the queue: */
|
|
|
|
if (__state & SLSB_OWNER_CU)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (; i < count; i++) {
|
2008-07-17 19:16:48 +04:00
|
|
|
bufnr = next_buf(bufnr);
|
2018-03-07 16:01:01 +03:00
|
|
|
|
|
|
|
/* stop if next state differs from initial state: */
|
|
|
|
if (q->slsb.val[bufnr] != __state)
|
|
|
|
break;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
2019-04-26 10:37:41 +03:00
|
|
|
|
|
|
|
out:
|
2008-07-17 19:16:48 +04:00
|
|
|
*state = __state;
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2009-06-22 14:08:10 +04:00
|
|
|
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
|
|
|
unsigned char *state, int auto_ack)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2021-01-30 15:22:56 +03:00
|
|
|
return get_buf_states(q, bufnr, state, 1, auto_ack);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* wrap-around safe setting of slsb states, returns number of changed buffers */
|
|
|
|
static inline int set_buf_states(struct qdio_q *q, int bufnr,
|
|
|
|
unsigned char state, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (is_qebsm(q))
|
|
|
|
return qdio_do_sqbs(q, state, bufnr, count);
|
|
|
|
|
2020-05-08 18:00:21 +03:00
|
|
|
/* Ensure that all preceding changes to the SBALs are visible: */
|
|
|
|
mb();
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
for (i = 0; i < count; i++) {
|
2020-05-08 18:00:21 +03:00
|
|
|
WRITE_ONCE(q->slsb.val[bufnr], state);
|
2008-07-17 19:16:48 +04:00
|
|
|
bufnr = next_buf(bufnr);
|
|
|
|
}
|
2020-05-08 18:00:21 +03:00
|
|
|
|
|
|
|
/* Make our SLSB changes visible: */
|
|
|
|
mb();
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int set_buf_state(struct qdio_q *q, int bufnr,
|
|
|
|
unsigned char state)
|
|
|
|
{
|
|
|
|
return set_buf_states(q, bufnr, state, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set slsb states to initial state */
|
2011-10-30 18:17:11 +04:00
|
|
|
static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
struct qdio_q *q;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_input_queue(irq_ptr, q, i)
|
|
|
|
set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
|
|
|
|
QDIO_MAX_BUFFERS_PER_Q);
|
|
|
|
for_each_output_queue(irq_ptr, q, i)
|
|
|
|
set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
|
|
|
|
QDIO_MAX_BUFFERS_PER_Q);
|
|
|
|
}
|
|
|
|
|
2009-06-22 14:08:10 +04:00
|
|
|
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
|
2008-07-17 19:16:48 +04:00
|
|
|
unsigned int input)
|
|
|
|
{
|
2011-01-05 14:47:52 +03:00
|
|
|
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
|
|
|
|
unsigned int fc = QDIO_SIGA_SYNC;
|
2008-07-17 19:16:48 +04:00
|
|
|
int cc;
|
|
|
|
|
2008-12-25 15:38:48 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, siga_sync);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2011-01-05 14:47:52 +03:00
|
|
|
if (is_qebsm(q)) {
|
|
|
|
schid = q->irq_ptr->sch_token;
|
|
|
|
fc |= QDIO_SIGA_QEBSM_FLAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = do_siga_sync(schid, output, input, fc);
|
2011-01-05 14:47:53 +03:00
|
|
|
if (unlikely(cc))
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
|
2012-05-09 18:27:34 +04:00
|
|
|
return (cc) ? -EIO : 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2009-06-22 14:08:10 +04:00
|
|
|
static inline int qdio_siga_sync_q(struct qdio_q *q)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
if (q->is_input_q)
|
|
|
|
return qdio_siga_sync(q, 0, q->mask);
|
|
|
|
else
|
|
|
|
return qdio_siga_sync(q, q->mask, 0);
|
|
|
|
}
|
|
|
|
|
2019-10-31 15:42:14 +03:00
|
|
|
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
|
|
|
|
unsigned int *busy_bit, unsigned long aob)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2011-01-05 14:47:52 +03:00
|
|
|
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
|
|
|
|
unsigned int fc = QDIO_SIGA_WRITE;
|
2008-12-25 15:38:48 +03:00
|
|
|
u64 start_time = 0;
|
2011-08-03 18:44:17 +04:00
|
|
|
int retries = 0, cc;
|
2011-08-08 05:33:55 +04:00
|
|
|
|
2019-10-31 15:42:14 +03:00
|
|
|
if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
|
|
|
|
if (count > 1)
|
|
|
|
fc = QDIO_SIGA_WRITEM;
|
|
|
|
else if (aob)
|
|
|
|
fc = QDIO_SIGA_WRITEQ;
|
2011-08-08 05:33:55 +04:00
|
|
|
}
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2008-12-25 15:38:48 +03:00
|
|
|
if (is_qebsm(q)) {
|
2008-07-17 19:16:48 +04:00
|
|
|
schid = q->irq_ptr->sch_token;
|
2011-01-05 14:47:52 +03:00
|
|
|
fc |= QDIO_SIGA_QEBSM_FLAG;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
again:
|
2019-10-31 15:42:14 +03:00
|
|
|
cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
|
2008-12-25 15:38:48 +03:00
|
|
|
|
|
|
|
/* hipersocket busy condition */
|
2011-01-05 14:47:53 +03:00
|
|
|
if (unlikely(*busy_bit)) {
|
2011-08-03 18:44:17 +04:00
|
|
|
retries++;
|
2008-08-21 21:46:34 +04:00
|
|
|
|
2008-12-25 15:38:48 +03:00
|
|
|
if (!start_time) {
|
2013-10-17 14:38:17 +04:00
|
|
|
start_time = get_tod_clock_fast();
|
2008-12-25 15:38:48 +03:00
|
|
|
goto again;
|
|
|
|
}
|
2013-10-17 14:38:17 +04:00
|
|
|
if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
|
2008-07-17 19:16:48 +04:00
|
|
|
goto again;
|
|
|
|
}
|
2011-08-03 18:44:17 +04:00
|
|
|
if (retries) {
|
|
|
|
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
|
|
|
|
"%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
|
|
|
|
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
|
|
|
|
}
|
2008-07-17 19:16:48 +04:00
|
|
|
return cc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int qdio_siga_input(struct qdio_q *q)
|
|
|
|
{
|
2011-01-05 14:47:52 +03:00
|
|
|
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
|
|
|
|
unsigned int fc = QDIO_SIGA_READ;
|
2008-07-17 19:16:48 +04:00
|
|
|
int cc;
|
|
|
|
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, siga_read);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2011-01-05 14:47:52 +03:00
|
|
|
if (is_qebsm(q)) {
|
|
|
|
schid = q->irq_ptr->sch_token;
|
|
|
|
fc |= QDIO_SIGA_QEBSM_FLAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = do_siga_input(schid, q->mask, fc);
|
2011-01-05 14:47:53 +03:00
|
|
|
if (unlikely(cc))
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
|
2012-05-09 18:27:34 +04:00
|
|
|
return (cc) ? -EIO : 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2011-01-05 14:47:54 +03:00
|
|
|
#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
|
|
|
|
#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
|
|
|
|
|
|
|
|
static inline void qdio_sync_queues(struct qdio_q *q)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2011-01-05 14:47:54 +03:00
|
|
|
/* PCI capable outbound queues will also be scanned so sync them too */
|
2018-10-30 10:19:54 +03:00
|
|
|
if (pci_out_supported(q->irq_ptr))
|
2011-01-05 14:47:54 +03:00
|
|
|
qdio_siga_sync_all(q);
|
|
|
|
else
|
2008-07-17 19:16:48 +04:00
|
|
|
qdio_siga_sync_q(q);
|
|
|
|
}
|
|
|
|
|
2009-06-22 14:08:10 +04:00
|
|
|
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
|
|
|
unsigned char *state)
|
|
|
|
{
|
2011-01-05 14:47:54 +03:00
|
|
|
if (need_siga_sync(q))
|
|
|
|
qdio_siga_sync_q(q);
|
2019-04-26 10:30:11 +03:00
|
|
|
return get_buf_state(q, bufnr, state, 0);
|
2009-06-22 14:08:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdio_stop_polling(struct qdio_q *q)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2020-04-21 13:35:00 +03:00
|
|
|
if (!q->u.in.batch_count)
|
2008-07-17 19:16:48 +04:00
|
|
|
return;
|
2008-12-25 15:38:47 +03:00
|
|
|
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, stop_polling);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
/* show the card that we are not polling anymore */
|
2020-04-21 13:35:00 +03:00
|
|
|
set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
|
|
|
|
q->u.in.batch_count);
|
|
|
|
q->u.in.batch_count = 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2014-06-03 23:55:15 +04:00
|
|
|
static inline void account_sbals(struct qdio_q *q, unsigned int count)
|
2010-02-27 00:37:36 +03:00
|
|
|
{
|
|
|
|
q->q_stats.nr_sbal_total += count;
|
2020-06-15 18:23:11 +03:00
|
|
|
q->q_stats.nr_sbals[ilog2(count)]++;
|
2010-02-27 00:37:36 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 12:19:54 +03:00
|
|
|
static void process_buffer_error(struct qdio_q *q, unsigned int start,
|
|
|
|
int count)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2008-12-25 15:38:47 +03:00
|
|
|
/* special handling for no target buffer empty */
|
2017-10-23 10:38:18 +03:00
|
|
|
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
|
2019-04-08 12:19:54 +03:00
|
|
|
q->sbal[start]->element[15].sflags == 0x10) {
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, target_full);
|
2019-04-08 12:19:54 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
|
2019-10-04 12:07:13 +03:00
|
|
|
return;
|
2008-12-25 15:38:47 +03:00
|
|
|
}
|
|
|
|
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
|
|
|
|
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
|
2019-04-08 12:19:54 +03:00
|
|
|
DBF_ERROR("FTC:%3d C:%3d", start, count);
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("F14:%2x F15:%2x",
|
2019-04-08 12:19:54 +03:00
|
|
|
q->sbal[start]->element[14].sflags,
|
|
|
|
q->sbal[start]->element[15].sflags);
|
2008-12-25 15:38:47 +03:00
|
|
|
}
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-04-21 11:38:18 +03:00
|
|
|
static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
|
|
|
|
int count, bool auto_ack)
|
2008-12-25 15:38:47 +03:00
|
|
|
{
|
2020-04-21 13:35:00 +03:00
|
|
|
/* ACK the newest SBAL: */
|
|
|
|
if (!auto_ack)
|
|
|
|
set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
|
2008-12-25 15:38:47 +03:00
|
|
|
|
2020-04-21 13:35:00 +03:00
|
|
|
if (!q->u.in.batch_count)
|
|
|
|
q->u.in.batch_start = start;
|
|
|
|
q->u.in.batch_count += count;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2021-01-30 14:44:17 +03:00
|
|
|
static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
|
|
|
|
unsigned int *error)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2011-01-31 13:30:08 +03:00
|
|
|
unsigned char state = 0;
|
2017-12-06 10:53:33 +03:00
|
|
|
int count;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2013-10-17 14:38:17 +04:00
|
|
|
q->timestamp = get_tod_clock_fast();
|
2011-10-30 18:17:05 +04:00
|
|
|
|
2020-06-17 16:30:14 +03:00
|
|
|
count = atomic_read(&q->nr_buf_used);
|
2017-12-06 10:53:33 +03:00
|
|
|
if (!count)
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2009-06-22 14:08:12 +04:00
|
|
|
/*
|
|
|
|
* No siga sync here, as a PCI or we after a thin interrupt
|
|
|
|
* already sync'ed the queues.
|
|
|
|
*/
|
2021-01-30 15:22:56 +03:00
|
|
|
count = get_buf_states(q, start, &state, count, 1);
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!count)
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case SLSB_P_INPUT_PRIMED:
|
2020-04-21 11:38:18 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
|
|
|
|
count);
|
|
|
|
|
|
|
|
inbound_handle_work(q, start, count, is_qebsm(q));
|
2013-09-16 08:59:50 +04:00
|
|
|
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, inbound_queue_full);
|
2010-02-27 00:37:36 +03:00
|
|
|
if (q->irq_ptr->perf_stat_enabled)
|
|
|
|
account_sbals(q, count);
|
2019-03-28 12:39:25 +03:00
|
|
|
return count;
|
2008-07-17 19:16:48 +04:00
|
|
|
case SLSB_P_INPUT_ERROR:
|
2020-04-21 11:38:18 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
|
|
|
|
count);
|
|
|
|
|
2021-01-30 14:44:17 +03:00
|
|
|
*error = QDIO_ERROR_SLSB_STATE;
|
2019-04-08 12:19:54 +03:00
|
|
|
process_buffer_error(q, start, count);
|
2020-04-21 11:38:18 +03:00
|
|
|
inbound_handle_work(q, start, count, false);
|
2017-10-23 10:40:16 +03:00
|
|
|
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
|
|
|
|
qperf_inc(q, inbound_queue_full);
|
2010-02-27 00:37:36 +03:00
|
|
|
if (q->irq_ptr->perf_stat_enabled)
|
|
|
|
account_sbals_error(q, count);
|
2019-03-28 12:39:25 +03:00
|
|
|
return count;
|
2008-07-17 19:16:48 +04:00
|
|
|
case SLSB_CU_INPUT_EMPTY:
|
2010-02-27 00:37:36 +03:00
|
|
|
if (q->irq_ptr->perf_stat_enabled)
|
|
|
|
q->q_stats.nr_sbal_nop++;
|
2016-11-21 13:34:25 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
|
2019-04-08 12:19:54 +03:00
|
|
|
q->nr, start);
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2020-06-02 15:26:36 +03:00
|
|
|
case SLSB_P_INPUT_NOT_INIT:
|
|
|
|
case SLSB_P_INPUT_ACK:
|
|
|
|
/* We should never see this state, throw a WARN: */
|
2008-07-17 19:16:48 +04:00
|
|
|
default:
|
2020-06-02 15:26:36 +03:00
|
|
|
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
|
|
|
|
"found state %#x at index %u on queue %u\n",
|
|
|
|
state, start, q->nr);
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-08 14:32:12 +03:00
|
|
|
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2008-12-25 15:38:45 +03:00
|
|
|
unsigned char state = 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (!atomic_read(&q->nr_buf_used))
|
|
|
|
return 1;
|
|
|
|
|
2011-01-05 14:47:54 +03:00
|
|
|
if (need_siga_sync(q))
|
|
|
|
qdio_siga_sync_q(q);
|
2019-04-08 12:19:54 +03:00
|
|
|
get_buf_state(q, start, &state, 0);
|
2009-06-22 14:08:11 +04:00
|
|
|
|
2010-02-09 11:46:07 +03:00
|
|
|
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
|
2009-06-22 14:08:11 +04:00
|
|
|
/* more work coming */
|
2008-07-17 19:16:48 +04:00
|
|
|
return 0;
|
|
|
|
|
2020-06-16 15:13:00 +03:00
|
|
|
return 1;
|
2009-06-22 14:08:10 +04:00
|
|
|
}
|
|
|
|
|
2016-08-05 13:33:10 +03:00
|
|
|
static inline int qdio_tasklet_schedule(struct qdio_q *q)
|
|
|
|
{
|
|
|
|
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
|
2020-06-02 15:09:10 +03:00
|
|
|
tasklet_schedule(&q->u.out.tasklet);
|
2016-08-05 13:33:10 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2021-01-30 14:44:17 +03:00
|
|
|
static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
|
|
|
|
unsigned int *error)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2011-01-31 13:30:08 +03:00
|
|
|
unsigned char state = 0;
|
2017-12-06 10:53:33 +03:00
|
|
|
int count;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2013-10-17 14:38:17 +04:00
|
|
|
q->timestamp = get_tod_clock_fast();
|
2011-10-30 18:17:05 +04:00
|
|
|
|
2011-01-05 14:47:54 +03:00
|
|
|
if (need_siga_sync(q))
|
|
|
|
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
|
2018-10-30 10:19:54 +03:00
|
|
|
!pci_out_supported(q->irq_ptr)) ||
|
2011-01-05 14:47:54 +03:00
|
|
|
(queue_type(q) == QDIO_IQDIO_QFMT &&
|
|
|
|
multicast_outbound(q)))
|
|
|
|
qdio_siga_sync_q(q);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2019-04-01 11:08:08 +03:00
|
|
|
count = atomic_read(&q->nr_buf_used);
|
2017-12-06 10:53:33 +03:00
|
|
|
if (!count)
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2021-01-30 15:22:56 +03:00
|
|
|
count = get_buf_states(q, start, &state, count, 0);
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!count)
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
switch (state) {
|
2019-06-03 08:47:04 +03:00
|
|
|
case SLSB_P_OUTPUT_PENDING:
|
2021-01-30 15:04:53 +03:00
|
|
|
*error = QDIO_ERROR_SLSB_PENDING;
|
|
|
|
fallthrough;
|
|
|
|
case SLSB_P_OUTPUT_EMPTY:
|
2008-07-17 19:16:48 +04:00
|
|
|
/* the adapter got it */
|
2011-08-08 05:33:55 +04:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
|
|
|
|
"out empty:%1d %02x", q->nr, count);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
atomic_sub(count, &q->nr_buf_used);
|
2010-02-27 00:37:36 +03:00
|
|
|
if (q->irq_ptr->perf_stat_enabled)
|
|
|
|
account_sbals(q, count);
|
2019-03-28 12:39:25 +03:00
|
|
|
return count;
|
2008-07-17 19:16:48 +04:00
|
|
|
case SLSB_P_OUTPUT_ERROR:
|
2021-01-30 15:22:56 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
|
|
|
|
q->nr, count);
|
|
|
|
|
2021-01-30 14:44:17 +03:00
|
|
|
*error = QDIO_ERROR_SLSB_STATE;
|
2019-04-08 12:19:54 +03:00
|
|
|
process_buffer_error(q, start, count);
|
2008-07-17 19:16:48 +04:00
|
|
|
atomic_sub(count, &q->nr_buf_used);
|
2010-02-27 00:37:36 +03:00
|
|
|
if (q->irq_ptr->perf_stat_enabled)
|
|
|
|
account_sbals_error(q, count);
|
2019-03-28 12:39:25 +03:00
|
|
|
return count;
|
2008-07-17 19:16:48 +04:00
|
|
|
case SLSB_CU_OUTPUT_PRIMED:
|
|
|
|
/* the adapter has not fetched the output yet */
|
2010-02-27 00:37:36 +03:00
|
|
|
if (q->irq_ptr->perf_stat_enabled)
|
|
|
|
q->q_stats.nr_sbal_nop++;
|
2011-08-08 05:33:55 +04:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
|
|
|
|
q->nr);
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
case SLSB_P_OUTPUT_HALTED:
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2020-06-02 15:26:36 +03:00
|
|
|
case SLSB_P_OUTPUT_NOT_INIT:
|
|
|
|
/* We should never see this state, throw a WARN: */
|
2008-07-17 19:16:48 +04:00
|
|
|
default:
|
2020-06-02 15:26:36 +03:00
|
|
|
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
|
|
|
|
"found state %#x at index %u on queue %u\n",
|
|
|
|
state, start, q->nr);
|
2019-03-28 12:39:25 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all buffers processed? */
|
|
|
|
static inline int qdio_outbound_q_done(struct qdio_q *q)
|
|
|
|
{
|
|
|
|
return atomic_read(&q->nr_buf_used) == 0;
|
|
|
|
}
|
|
|
|
|
2019-10-31 15:42:14 +03:00
|
|
|
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
|
|
|
|
unsigned long aob)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2011-08-03 18:44:17 +04:00
|
|
|
int retries = 0, cc;
|
2008-12-25 15:38:48 +03:00
|
|
|
unsigned int busy_bit;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (!need_siga_out(q))
|
2009-03-26 17:24:31 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2008-12-25 15:38:48 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
|
2011-08-03 18:44:17 +04:00
|
|
|
retry:
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, siga_write);
|
2008-12-25 15:38:48 +03:00
|
|
|
|
2019-10-31 15:42:14 +03:00
|
|
|
cc = qdio_siga_output(q, count, &busy_bit, aob);
|
2008-12-25 15:38:48 +03:00
|
|
|
switch (cc) {
|
2008-07-17 19:16:48 +04:00
|
|
|
case 0:
|
|
|
|
break;
|
2008-12-25 15:38:48 +03:00
|
|
|
case 2:
|
|
|
|
if (busy_bit) {
|
2011-08-03 18:44:17 +04:00
|
|
|
while (++retries < QDIO_BUSY_BIT_RETRIES) {
|
|
|
|
mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
|
2012-05-09 18:27:34 +04:00
|
|
|
cc = -EBUSY;
|
|
|
|
} else {
|
2009-03-26 17:24:31 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
|
2012-05-09 18:27:34 +04:00
|
|
|
cc = -ENOBUFS;
|
|
|
|
}
|
2008-12-25 15:38:48 +03:00
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
case 3:
|
|
|
|
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
|
2012-05-09 18:27:34 +04:00
|
|
|
cc = -EIO;
|
2008-12-25 15:38:48 +03:00
|
|
|
break;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
2011-08-03 18:44:17 +04:00
|
|
|
if (retries) {
|
|
|
|
DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
|
|
|
|
DBF_ERROR("count:%u", retries);
|
|
|
|
}
|
2009-03-26 17:24:31 +03:00
|
|
|
return cc;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2020-10-01 10:47:47 +03:00
|
|
|
void qdio_outbound_tasklet(struct tasklet_struct *t)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2020-10-01 10:47:47 +03:00
|
|
|
struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
|
|
|
|
struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
|
2019-04-08 14:32:12 +03:00
|
|
|
unsigned int start = q->first_to_check;
|
2021-01-30 14:44:17 +03:00
|
|
|
unsigned int error = 0;
|
2019-03-28 12:39:25 +03:00
|
|
|
int count;
|
|
|
|
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, tasklet_outbound);
|
2012-10-24 14:38:35 +04:00
|
|
|
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2021-01-30 15:22:56 +03:00
|
|
|
count = get_outbound_buffer_frontier(q, start, &error);
|
2019-04-08 14:32:12 +03:00
|
|
|
if (count) {
|
|
|
|
q->first_to_check = add_buf(start, count);
|
2021-01-30 14:28:30 +03:00
|
|
|
|
|
|
|
if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
|
|
|
|
qperf_inc(q, outbound_handler);
|
|
|
|
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
|
|
|
|
start, count);
|
|
|
|
|
2021-01-30 14:44:17 +03:00
|
|
|
q->handler(q->irq_ptr->cdev, error, q->nr, start,
|
|
|
|
count, q->irq_ptr->int_parm);
|
2021-01-30 14:28:30 +03:00
|
|
|
}
|
2019-04-08 14:32:12 +03:00
|
|
|
}
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2018-10-30 10:19:54 +03:00
|
|
|
if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
|
|
|
|
!qdio_outbound_q_done(q))
|
|
|
|
goto sched;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (q->u.out.pci_out_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we know that queue type is either qeth without pci enabled
|
2011-10-30 18:17:06 +04:00
|
|
|
* or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
|
|
|
|
* is noticed and outbound_handler is called after some time.
|
2008-07-17 19:16:48 +04:00
|
|
|
*/
|
|
|
|
if (qdio_outbound_q_done(q))
|
2016-08-05 13:33:10 +03:00
|
|
|
del_timer_sync(&q->u.out.timer);
|
2010-01-04 11:05:42 +03:00
|
|
|
else
|
2016-08-05 13:33:10 +03:00
|
|
|
if (!timer_pending(&q->u.out.timer) &&
|
|
|
|
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
|
2008-07-17 19:16:48 +04:00
|
|
|
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
|
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
sched:
|
2016-08-05 13:33:10 +03:00
|
|
|
qdio_tasklet_schedule(q);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2017-10-05 03:54:35 +03:00
|
|
|
void qdio_outbound_timer(struct timer_list *t)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2017-10-05 03:54:35 +03:00
|
|
|
struct qdio_q *q = from_timer(q, t, u.out.timer);
|
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
|
|
|
|
2016-08-05 13:33:10 +03:00
|
|
|
qdio_tasklet_schedule(q);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2018-10-30 10:21:27 +03:00
|
|
|
static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
struct qdio_q *out;
|
|
|
|
int i;
|
|
|
|
|
2019-08-23 12:48:48 +03:00
|
|
|
if (!pci_out_supported(irq) || !irq->scan_threshold)
|
2008-07-17 19:16:48 +04:00
|
|
|
return;
|
|
|
|
|
2018-10-30 10:19:54 +03:00
|
|
|
for_each_output_queue(irq, out, i)
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!qdio_outbound_q_done(out))
|
2016-08-05 13:33:10 +03:00
|
|
|
qdio_tasklet_schedule(out);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
|
|
|
|
enum qdio_irq_states state)
|
|
|
|
{
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
irq_ptr->state = state;
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
2008-12-25 15:38:46 +03:00
|
|
|
static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
if (irb->esw.esw0.erw.cons) {
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
|
|
|
|
DBF_ERROR_HEX(irb, 64);
|
|
|
|
DBF_ERROR_HEX(irb->ecw, 64);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PCI interrupt handler */
|
|
|
|
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct qdio_q *q;
|
|
|
|
|
2016-08-05 13:33:10 +03:00
|
|
|
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
|
|
|
return;
|
|
|
|
|
2020-06-02 15:09:10 +03:00
|
|
|
qdio_deliver_irq(irq_ptr);
|
2020-09-15 10:04:39 +03:00
|
|
|
irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2019-08-23 12:48:48 +03:00
|
|
|
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
|
2008-07-17 19:16:48 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_output_queue(irq_ptr, q, i) {
|
|
|
|
if (qdio_outbound_q_done(q))
|
|
|
|
continue;
|
2011-01-05 14:47:54 +03:00
|
|
|
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
|
2008-07-17 19:16:48 +04:00
|
|
|
qdio_siga_sync_q(q);
|
2016-08-05 13:33:10 +03:00
|
|
|
qdio_tasklet_schedule(q);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-02 15:23:32 +03:00
|
|
|
static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
|
|
|
|
unsigned long intparm, int cstat,
|
|
|
|
int dstat)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
struct qdio_q *q;
|
|
|
|
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
|
|
|
|
DBF_ERROR("intp :%lx", intparm);
|
|
|
|
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (irq_ptr->nr_input_qs) {
|
|
|
|
q = irq_ptr->input_qs[0];
|
|
|
|
} else if (irq_ptr->nr_output_qs) {
|
|
|
|
q = irq_ptr->output_qs[0];
|
|
|
|
} else {
|
|
|
|
dump_stack();
|
|
|
|
goto no_handler;
|
|
|
|
}
|
2011-08-15 16:40:31 +04:00
|
|
|
|
2012-05-09 18:27:34 +04:00
|
|
|
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
|
2020-05-07 11:21:53 +03:00
|
|
|
q->nr, q->first_to_check, 0, irq_ptr->int_parm);
|
2008-07-17 19:16:48 +04:00
|
|
|
no_handler:
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
|
2012-03-11 19:59:32 +04:00
|
|
|
/*
|
|
|
|
* In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
|
|
|
|
* Therefore we call the LGR detection function here.
|
|
|
|
*/
|
|
|
|
lgr_info_log();
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2020-06-02 15:23:32 +03:00
|
|
|
static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
|
2009-06-12 12:26:28 +04:00
|
|
|
int dstat)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2009-06-12 12:26:28 +04:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2009-06-12 12:26:28 +04:00
|
|
|
if (cstat)
|
2008-07-17 19:16:48 +04:00
|
|
|
goto error;
|
2009-06-12 12:26:28 +04:00
|
|
|
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
|
2008-07-17 19:16:48 +04:00
|
|
|
goto error;
|
2009-06-12 12:26:28 +04:00
|
|
|
if (!(dstat & DEV_STAT_DEV_END))
|
|
|
|
goto error;
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
|
|
|
|
return;
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
error:
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
|
|
|
|
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
|
2008-07-17 19:16:48 +04:00
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* qdio interrupt handler */
|
|
|
|
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|
|
|
struct irb *irb)
|
|
|
|
{
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-07-17 19:16:48 +04:00
|
|
|
int cstat, dstat;
|
|
|
|
|
|
|
|
if (!intparm || !irq_ptr) {
|
2016-07-28 21:30:31 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
DBF_ERROR("qint:%4x", schid.sch_no);
|
2008-07-17 19:16:48 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-17 12:00:14 +04:00
|
|
|
if (irq_ptr->perf_stat_enabled)
|
|
|
|
irq_ptr->perf_stat.qdio_int++;
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
if (IS_ERR(irb)) {
|
2012-10-24 14:38:35 +04:00
|
|
|
DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
|
|
|
|
wake_up(&cdev->private->wait_q);
|
|
|
|
return;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
2008-12-25 15:38:46 +03:00
|
|
|
qdio_irq_check_sense(irq_ptr, irb);
|
2008-07-17 19:16:48 +04:00
|
|
|
cstat = irb->scsw.cmd.cstat;
|
|
|
|
dstat = irb->scsw.cmd.dstat;
|
|
|
|
|
|
|
|
switch (irq_ptr->state) {
|
|
|
|
case QDIO_IRQ_STATE_INACTIVE:
|
2020-06-02 15:23:32 +03:00
|
|
|
qdio_establish_handle_irq(irq_ptr, cstat, dstat);
|
2008-07-17 19:16:48 +04:00
|
|
|
break;
|
|
|
|
case QDIO_IRQ_STATE_CLEANUP:
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
|
|
|
break;
|
|
|
|
case QDIO_IRQ_STATE_ESTABLISHED:
|
|
|
|
case QDIO_IRQ_STATE_ACTIVE:
|
|
|
|
if (cstat & SCHN_STAT_PCI) {
|
|
|
|
qdio_int_handler_pci(irq_ptr);
|
|
|
|
return;
|
|
|
|
}
|
2009-06-12 12:26:28 +04:00
|
|
|
if (cstat || dstat)
|
2020-06-02 15:23:32 +03:00
|
|
|
qdio_handle_activate_check(irq_ptr, intparm, cstat,
|
2008-07-17 19:16:48 +04:00
|
|
|
dstat);
|
2009-06-12 12:26:28 +04:00
|
|
|
break;
|
2010-02-09 11:46:08 +03:00
|
|
|
case QDIO_IRQ_STATE_STOPPED:
|
|
|
|
break;
|
2008-07-17 19:16:48 +04:00
|
|
|
default:
|
2012-10-24 14:38:35 +04:00
|
|
|
WARN_ON_ONCE(1);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
wake_up(&cdev->private->wait_q);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_get_ssqd_desc - get qdio subchannel description
|
|
|
|
* @cdev: ccw device to get description for
|
2008-12-25 15:38:43 +03:00
|
|
|
* @data: where to store the ssqd
|
2008-07-17 19:16:48 +04:00
|
|
|
*
|
2008-12-25 15:38:43 +03:00
|
|
|
* Returns 0 or an error code. The results of the chsc are stored in the
|
|
|
|
* specified structure.
|
2008-07-17 19:16:48 +04:00
|
|
|
*/
|
2008-12-25 15:38:43 +03:00
|
|
|
int qdio_get_ssqd_desc(struct ccw_device *cdev,
|
|
|
|
struct qdio_ssqd_desc *data)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2008-12-25 15:38:43 +03:00
|
|
|
if (!cdev || !cdev->private)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-07-28 21:30:31 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
DBF_EVENT("get ssqd:%4x", schid.sch_no);
|
|
|
|
return qdio_setup_get_ssqd(NULL, &schid, data);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
|
|
|
|
|
2020-02-10 16:58:07 +03:00
|
|
|
static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
|
|
|
struct qdio_q *q;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_output_queue(irq_ptr, q, i) {
|
2016-08-05 13:33:10 +03:00
|
|
|
del_timer_sync(&q->u.out.timer);
|
2020-06-02 15:09:10 +03:00
|
|
|
tasklet_kill(&q->u.out.tasklet);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_shutdown - shut down a qdio subchannel
|
|
|
|
* @cdev: associated ccw device
|
|
|
|
* @how: use halt or clear to shutdown
|
|
|
|
*/
|
|
|
|
int qdio_shutdown(struct ccw_device *cdev, int how)
|
|
|
|
{
|
2008-12-25 15:38:46 +03:00
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-07-17 19:16:48 +04:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2012-10-24 14:38:35 +04:00
|
|
|
WARN_ON_ONCE(irqs_disabled());
|
2016-07-28 21:30:31 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
DBF_EVENT("qshutdown:%4x", schid.sch_no);
|
2008-12-25 15:38:46 +03:00
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
|
|
|
/*
|
|
|
|
* Subchannel was already shot down. We cannot prevent being called
|
|
|
|
* twice since cio may trigger a shutdown asynchronously.
|
|
|
|
*/
|
|
|
|
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
|
|
|
/*
|
|
|
|
* Indicate that the device is going down. Scheduling the queue
|
|
|
|
* tasklets is forbidden from here on.
|
|
|
|
*/
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
|
|
|
|
|
2020-02-10 16:58:07 +03:00
|
|
|
qdio_shutdown_queues(irq_ptr);
|
2013-02-26 16:08:34 +04:00
|
|
|
qdio_shutdown_debug_entries(irq_ptr);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
/* cleanup subchannel */
|
2016-07-29 14:41:20 +03:00
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
2020-04-09 11:55:16 +03:00
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
|
2008-07-17 19:16:48 +04:00
|
|
|
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
|
|
|
|
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
|
|
|
|
else
|
|
|
|
/* default behaviour is halt */
|
|
|
|
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
|
2020-04-09 11:55:16 +03:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
2008-07-17 19:16:48 +04:00
|
|
|
if (rc) {
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
|
|
|
|
DBF_ERROR("rc:%4d", rc);
|
2008-07-17 19:16:48 +04:00
|
|
|
goto no_cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
|
|
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
|
|
|
|
irq_ptr->state == QDIO_IRQ_STATE_ERR,
|
|
|
|
10 * HZ);
|
|
|
|
|
|
|
|
no_cleanup:
|
|
|
|
qdio_shutdown_thinint(irq_ptr);
|
2020-04-09 11:55:16 +03:00
|
|
|
qdio_shutdown_irq(irq_ptr);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_shutdown);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_free - free data structures for a qdio subchannel
|
|
|
|
* @cdev: associated ccw device
|
|
|
|
*/
|
|
|
|
int qdio_free(struct ccw_device *cdev)
|
|
|
|
{
|
2008-12-25 15:38:46 +03:00
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-08-21 21:46:34 +04:00
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-07-28 21:30:31 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
DBF_EVENT("qfree:%4x", schid.sch_no);
|
2014-06-12 16:24:45 +04:00
|
|
|
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
|
2008-07-17 19:16:48 +04:00
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
2008-12-25 15:38:46 +03:00
|
|
|
|
2014-06-12 16:24:45 +04:00
|
|
|
irq_ptr->debug_area = NULL;
|
2008-07-17 19:16:48 +04:00
|
|
|
cdev->private->qdio_data = NULL;
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
|
2020-04-03 00:22:18 +03:00
|
|
|
qdio_free_queues(irq_ptr);
|
|
|
|
free_page((unsigned long) irq_ptr->qdr);
|
|
|
|
free_page(irq_ptr->chsc_page);
|
|
|
|
free_page((unsigned long) irq_ptr);
|
2008-07-17 19:16:48 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_free);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_allocate - allocate qdio queues and associated data
|
2020-03-20 16:00:00 +03:00
|
|
|
* @cdev: associated ccw device
|
|
|
|
* @no_input_qs: allocate this number of Input Queues
|
|
|
|
* @no_output_qs: allocate this number of Output Queues
|
2008-07-17 19:16:48 +04:00
|
|
|
*/
|
2020-03-20 16:00:00 +03:00
|
|
|
int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
|
|
|
|
unsigned int no_output_qs)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-07-17 19:16:48 +04:00
|
|
|
struct qdio_irq *irq_ptr;
|
2020-04-03 00:22:18 +03:00
|
|
|
int rc = -ENOMEM;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
2016-07-28 21:30:31 +03:00
|
|
|
DBF_EVENT("qallocate:%4x", schid.sch_no);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
|
|
|
|
no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
|
2008-07-17 19:16:48 +04:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
|
|
|
|
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!irq_ptr)
|
2020-04-03 00:22:18 +03:00
|
|
|
return -ENOMEM;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
irq_ptr->cdev = cdev;
|
2008-07-17 19:16:48 +04:00
|
|
|
mutex_init(&irq_ptr->setup_mutex);
|
2020-03-20 16:00:00 +03:00
|
|
|
if (qdio_allocate_dbf(irq_ptr))
|
2020-04-03 00:22:18 +03:00
|
|
|
goto err_dbf;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
|
|
|
|
no_output_qs);
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
/*
|
|
|
|
* Allocate a page for the chsc calls in qdio_establish.
|
|
|
|
* Must be pre-allocated since a zfcp recovery will call
|
|
|
|
* qdio_establish. In case of low memory and swap on a zfcp disk
|
|
|
|
* we may not be able to allocate memory otherwise.
|
|
|
|
*/
|
|
|
|
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
|
|
|
|
if (!irq_ptr->chsc_page)
|
2020-04-03 00:22:18 +03:00
|
|
|
goto err_chsc;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
/* qdr is used in ccw1.cda which is u32 */
|
2008-08-01 18:39:17 +04:00
|
|
|
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!irq_ptr->qdr)
|
2020-04-03 00:22:18 +03:00
|
|
|
goto err_qdr;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-04-03 00:22:18 +03:00
|
|
|
rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
|
|
|
|
if (rc)
|
|
|
|
goto err_queues;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
cdev->private->qdio_data = irq_ptr;
|
2008-07-17 19:16:48 +04:00
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
|
|
|
|
return 0;
|
2020-04-03 00:22:18 +03:00
|
|
|
|
|
|
|
err_queues:
|
|
|
|
free_page((unsigned long) irq_ptr->qdr);
|
|
|
|
err_qdr:
|
|
|
|
free_page(irq_ptr->chsc_page);
|
|
|
|
err_chsc:
|
|
|
|
err_dbf:
|
|
|
|
free_page((unsigned long) irq_ptr);
|
|
|
|
return rc;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_allocate);
|
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
static void qdio_trace_init_data(struct qdio_irq *irq,
|
|
|
|
struct qdio_initialize *data)
|
|
|
|
{
|
|
|
|
DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
|
|
|
|
DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
|
|
|
|
DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
|
|
|
|
DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
|
|
|
|
DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
|
|
|
|
DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
|
|
|
|
data->no_output_qs);
|
|
|
|
DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
|
|
|
|
DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
|
|
|
|
DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
|
|
|
|
DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
|
|
|
|
DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
|
|
|
|
DBF_ERR);
|
|
|
|
}
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
/**
|
|
|
|
* qdio_establish - establish queues on a qdio subchannel
|
2020-03-20 16:00:00 +03:00
|
|
|
* @cdev: associated ccw device
|
2008-07-17 19:16:48 +04:00
|
|
|
* @init_data: initialization data
|
|
|
|
*/
|
2020-03-20 16:00:00 +03:00
|
|
|
int qdio_establish(struct ccw_device *cdev,
|
|
|
|
struct qdio_initialize *init_data)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2020-02-10 16:58:07 +03:00
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-07-17 19:16:48 +04:00
|
|
|
int rc;
|
|
|
|
|
2016-07-28 21:30:31 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
DBF_EVENT("qestablish:%4x", schid.sch_no);
|
2008-08-21 21:46:34 +04:00
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2020-04-03 00:48:00 +03:00
|
|
|
if (init_data->no_input_qs > irq_ptr->max_input_qs ||
|
|
|
|
init_data->no_output_qs > irq_ptr->max_output_qs)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-20 16:00:00 +03:00
|
|
|
if ((init_data->no_input_qs && !init_data->input_handler) ||
|
|
|
|
(init_data->no_output_qs && !init_data->output_handler))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!init_data->input_sbal_addr_array ||
|
|
|
|
!init_data->output_sbal_addr_array)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-06-02 15:09:10 +03:00
|
|
|
if (!init_data->irq_poll)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
2020-03-20 16:00:00 +03:00
|
|
|
qdio_trace_init_data(irq_ptr, init_data);
|
2020-02-10 16:58:07 +03:00
|
|
|
qdio_setup_irq(irq_ptr, init_data);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
rc = qdio_establish_thinint(irq_ptr);
|
|
|
|
if (rc) {
|
2020-04-09 11:55:16 +03:00
|
|
|
qdio_shutdown_irq(irq_ptr);
|
2008-07-17 19:16:48 +04:00
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* establish q */
|
|
|
|
irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
|
|
|
|
irq_ptr->ccw.flags = CCW_FLAG_SLI;
|
|
|
|
irq_ptr->ccw.count = irq_ptr->equeue.count;
|
|
|
|
irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
|
|
|
|
|
2016-07-29 14:41:20 +03:00
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
2008-07-17 19:16:48 +04:00
|
|
|
ccw_device_set_options_mask(cdev, 0);
|
|
|
|
|
|
|
|
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
|
2016-07-29 15:00:27 +03:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
2008-07-17 19:16:48 +04:00
|
|
|
if (rc) {
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
|
|
|
|
DBF_ERROR("rc:%4x", rc);
|
2020-04-09 10:55:05 +03:00
|
|
|
qdio_shutdown_thinint(irq_ptr);
|
2020-04-09 11:55:16 +03:00
|
|
|
qdio_shutdown_irq(irq_ptr);
|
2008-07-17 19:16:48 +04:00
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_event_interruptible_timeout(cdev->private->wait_q,
|
|
|
|
irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
|
|
|
|
irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
|
|
|
|
|
|
|
|
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
qdio_setup_ssqd_info(irq_ptr);
|
|
|
|
|
|
|
|
/* qebsm is now setup if available, initialize buffer states */
|
|
|
|
qdio_init_buf_states(irq_ptr);
|
|
|
|
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
2020-03-20 16:00:00 +03:00
|
|
|
qdio_print_subchannel_info(irq_ptr);
|
|
|
|
qdio_setup_debug_entries(irq_ptr);
|
2008-07-17 19:16:48 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_establish);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_activate - activate queues on a qdio subchannel
|
|
|
|
* @cdev: associated cdev
|
|
|
|
*/
|
|
|
|
int qdio_activate(struct ccw_device *cdev)
|
|
|
|
{
|
2020-02-10 16:58:07 +03:00
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2016-07-28 21:30:31 +03:00
|
|
|
struct subchannel_id schid;
|
2008-07-17 19:16:48 +04:00
|
|
|
int rc;
|
|
|
|
|
2016-07-28 21:30:31 +03:00
|
|
|
ccw_device_get_schid(cdev, &schid);
|
|
|
|
DBF_EVENT("qactivate:%4x", schid.sch_no);
|
2008-08-21 21:46:34 +04:00
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
mutex_lock(&irq_ptr->setup_mutex);
|
|
|
|
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
|
|
|
|
rc = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
|
|
|
|
irq_ptr->ccw.flags = CCW_FLAG_SLI;
|
|
|
|
irq_ptr->ccw.count = irq_ptr->aqueue.count;
|
|
|
|
irq_ptr->ccw.cda = 0;
|
|
|
|
|
2016-07-29 14:41:20 +03:00
|
|
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
2008-07-17 19:16:48 +04:00
|
|
|
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
|
|
|
|
|
|
|
|
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
|
|
|
|
0, DOIO_DENY_PREFETCH);
|
2016-07-29 15:00:27 +03:00
|
|
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
2008-07-17 19:16:48 +04:00
|
|
|
if (rc) {
|
2008-12-25 15:38:46 +03:00
|
|
|
DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
|
|
|
|
DBF_ERROR("rc:%4x", rc);
|
2008-07-17 19:16:48 +04:00
|
|
|
goto out;
|
2016-07-29 15:00:27 +03:00
|
|
|
}
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
/* wait for subchannel to become active */
|
|
|
|
msleep(5);
|
|
|
|
|
|
|
|
switch (irq_ptr->state) {
|
|
|
|
case QDIO_IRQ_STATE_STOPPED:
|
|
|
|
case QDIO_IRQ_STATE_ERR:
|
2009-03-26 17:24:25 +03:00
|
|
|
rc = -EIO;
|
|
|
|
break;
|
2008-07-17 19:16:48 +04:00
|
|
|
default:
|
|
|
|
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
mutex_unlock(&irq_ptr->setup_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_activate);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* handle_inbound - reset processed input buffers
|
|
|
|
* @q: queue containing the buffers
|
|
|
|
* @callflags: flags
|
|
|
|
* @bufnr: first buffer to process
|
|
|
|
* @count: how many buffers are emptied
|
|
|
|
*/
|
2009-03-26 17:24:31 +03:00
|
|
|
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
|
|
|
|
int bufnr, int count)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2020-04-21 11:22:01 +03:00
|
|
|
int overlap;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, inbound_call);
|
|
|
|
|
2020-04-21 13:35:00 +03:00
|
|
|
/* If any processed SBALs are returned to HW, adjust our tracking: */
|
|
|
|
overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
|
|
|
|
q->u.in.batch_count);
|
2020-04-21 11:22:01 +03:00
|
|
|
if (overlap > 0) {
|
2020-04-21 13:35:00 +03:00
|
|
|
q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
|
|
|
|
q->u.in.batch_count -= overlap;
|
2008-12-25 15:38:47 +03:00
|
|
|
}
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
|
2013-07-05 11:22:11 +04:00
|
|
|
atomic_add(count, &q->nr_buf_used);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2009-03-26 17:24:31 +03:00
|
|
|
if (need_siga_in(q))
|
|
|
|
return qdio_siga_input(q);
|
2011-08-08 05:33:56 +04:00
|
|
|
|
2009-03-26 17:24:31 +03:00
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* handle_outbound - process filled outbound buffers
|
|
|
|
* @q: queue containing the buffers
|
|
|
|
* @callflags: flags
|
|
|
|
* @bufnr: first buffer to process
|
|
|
|
* @count: how many buffers are filled
|
2021-01-30 16:56:20 +03:00
|
|
|
* @aob: asynchronous operation block
|
2008-07-17 19:16:48 +04:00
|
|
|
*/
|
2009-03-26 17:24:31 +03:00
|
|
|
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
2021-01-30 16:56:20 +03:00
|
|
|
unsigned int bufnr, unsigned int count,
|
|
|
|
struct qaob *aob)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2019-08-23 12:48:48 +03:00
|
|
|
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
|
2011-05-23 12:24:38 +04:00
|
|
|
unsigned char state = 0;
|
2009-03-26 17:24:31 +03:00
|
|
|
int used, rc = 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, outbound_call);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
|
|
|
|
used = atomic_add_return(count, &q->nr_buf_used);
|
|
|
|
|
2011-01-05 14:47:51 +03:00
|
|
|
if (used == QDIO_MAX_BUFFERS_PER_Q)
|
|
|
|
qperf_inc(q, outbound_queue_full);
|
|
|
|
|
2010-01-04 11:05:42 +03:00
|
|
|
if (callflags & QDIO_FLAG_PCI_OUT) {
|
2008-07-17 19:16:48 +04:00
|
|
|
q->u.out.pci_out_enabled = 1;
|
2010-01-04 11:05:42 +03:00
|
|
|
qperf_inc(q, pci_request_int);
|
2011-01-05 14:47:53 +03:00
|
|
|
} else
|
2008-07-17 19:16:48 +04:00
|
|
|
q->u.out.pci_out_enabled = 0;
|
|
|
|
|
|
|
|
if (queue_type(q) == QDIO_IQDIO_QFMT) {
|
2021-01-30 16:56:20 +03:00
|
|
|
unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
|
2011-08-08 05:33:55 +04:00
|
|
|
|
2021-01-30 16:56:20 +03:00
|
|
|
WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
|
2019-10-31 15:42:14 +03:00
|
|
|
rc = qdio_kick_outbound_q(q, count, phys_aob);
|
2011-01-05 14:47:54 +03:00
|
|
|
} else if (need_siga_sync(q)) {
|
2011-01-05 14:47:53 +03:00
|
|
|
rc = qdio_siga_sync_q(q);
|
2019-07-11 19:17:36 +03:00
|
|
|
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
|
|
|
|
get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
|
|
|
|
state == SLSB_CU_OUTPUT_PRIMED) {
|
|
|
|
/* The previous buffer is not processed yet, tack on. */
|
|
|
|
qperf_inc(q, fast_requeue);
|
2011-01-05 14:47:53 +03:00
|
|
|
} else {
|
2019-10-31 15:42:14 +03:00
|
|
|
rc = qdio_kick_outbound_q(q, count, 0);
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
2019-08-23 12:48:48 +03:00
|
|
|
/* Let drivers implement their own completion scanning: */
|
|
|
|
if (!scan_threshold)
|
|
|
|
return rc;
|
|
|
|
|
2011-01-05 14:47:50 +03:00
|
|
|
/* in case of SIGA errors we must process the error immediately */
|
2019-08-23 12:48:48 +03:00
|
|
|
if (used >= scan_threshold || rc)
|
2016-08-05 13:33:10 +03:00
|
|
|
qdio_tasklet_schedule(q);
|
2011-01-05 14:47:50 +03:00
|
|
|
else
|
|
|
|
/* free the SBALs in case of no further traffic */
|
2016-08-05 13:33:10 +03:00
|
|
|
if (!timer_pending(&q->u.out.timer) &&
|
|
|
|
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
|
2011-01-05 14:47:50 +03:00
|
|
|
mod_timer(&q->u.out.timer, jiffies + HZ);
|
2009-03-26 17:24:31 +03:00
|
|
|
return rc;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do_QDIO - process input or output buffers
|
|
|
|
* @cdev: associated ccw_device for the qdio subchannel
|
|
|
|
* @callflags: input or output and special flags from the program
|
|
|
|
* @q_nr: queue number
|
|
|
|
* @bufnr: buffer number
|
|
|
|
* @count: how many buffers to process
|
2021-01-30 16:56:20 +03:00
|
|
|
* @aob: asynchronous operation block (outbound only)
|
2008-07-17 19:16:48 +04:00
|
|
|
*/
|
|
|
|
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
|
2021-01-30 16:56:20 +03:00
|
|
|
int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
|
2008-07-17 19:16:48 +04:00
|
|
|
{
|
2020-06-02 15:23:32 +03:00
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2008-07-17 19:16:48 +04:00
|
|
|
|
2009-06-22 14:08:15 +04:00
|
|
|
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
|
2008-07-17 19:16:48 +04:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2009-09-23 00:58:39 +04:00
|
|
|
DBF_DEV_EVENT(DBF_INFO, irq_ptr,
|
|
|
|
"do%02x b:%02x c:%02x", callflags, bufnr, count);
|
2008-07-17 19:16:48 +04:00
|
|
|
|
|
|
|
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
|
2012-05-09 18:27:34 +04:00
|
|
|
return -EIO;
|
2011-03-23 12:16:01 +03:00
|
|
|
if (!count)
|
|
|
|
return 0;
|
2008-07-17 19:16:48 +04:00
|
|
|
if (callflags & QDIO_FLAG_SYNC_INPUT)
|
2009-03-26 17:24:31 +03:00
|
|
|
return handle_inbound(irq_ptr->input_qs[q_nr],
|
|
|
|
callflags, bufnr, count);
|
2008-07-17 19:16:48 +04:00
|
|
|
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
|
2009-03-26 17:24:31 +03:00
|
|
|
return handle_outbound(irq_ptr->output_qs[q_nr],
|
2021-01-30 16:56:20 +03:00
|
|
|
callflags, bufnr, count, aob);
|
2009-03-26 17:24:31 +03:00
|
|
|
return -EINVAL;
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(do_QDIO);
|
|
|
|
|
2010-09-08 01:14:39 +04:00
|
|
|
/**
|
2020-05-09 16:12:05 +03:00
|
|
|
* qdio_start_irq - enable interrupt processing for the device
|
2010-09-08 01:14:39 +04:00
|
|
|
* @cdev: associated ccw_device for the qdio subchannel
|
|
|
|
*
|
|
|
|
* Return codes
|
|
|
|
* 0 - success
|
|
|
|
* 1 - irqs not started since new data is available
|
|
|
|
*/
|
2020-03-25 12:35:00 +03:00
|
|
|
int qdio_start_irq(struct ccw_device *cdev)
|
2010-09-08 01:14:39 +04:00
|
|
|
{
|
|
|
|
struct qdio_q *q;
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
2020-03-25 12:35:00 +03:00
|
|
|
unsigned int i;
|
2010-09-08 01:14:39 +04:00
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2020-03-25 12:35:00 +03:00
|
|
|
for_each_input_queue(irq_ptr, q, i)
|
|
|
|
qdio_stop_polling(q);
|
|
|
|
|
|
|
|
clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
|
2010-09-08 01:14:39 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to check again to not lose initiative after
|
|
|
|
* resetting the ACK state.
|
|
|
|
*/
|
2011-10-30 18:17:20 +04:00
|
|
|
if (test_nonshared_ind(irq_ptr))
|
2010-09-08 01:14:39 +04:00
|
|
|
goto rescan;
|
2020-03-25 12:35:00 +03:00
|
|
|
|
|
|
|
for_each_input_queue(irq_ptr, q, i) {
|
|
|
|
if (!qdio_inbound_q_done(q, q->first_to_check))
|
|
|
|
goto rescan;
|
|
|
|
}
|
|
|
|
|
2010-09-08 01:14:39 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
rescan:
|
2020-03-25 12:35:00 +03:00
|
|
|
if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
|
2010-09-08 01:14:39 +04:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(qdio_start_irq);
|
|
|
|
|
2019-08-23 12:48:47 +03:00
|
|
|
static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
|
|
|
|
unsigned int *error)
|
|
|
|
{
|
|
|
|
unsigned int start = q->first_to_check;
|
|
|
|
int count;
|
|
|
|
|
2021-01-30 14:44:17 +03:00
|
|
|
*error = 0;
|
|
|
|
count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) :
|
2021-01-30 15:22:56 +03:00
|
|
|
get_outbound_buffer_frontier(q, start, error);
|
2019-08-23 12:48:47 +03:00
|
|
|
if (count == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*bufnr = start;
|
|
|
|
|
|
|
|
/* for the next time */
|
|
|
|
q->first_to_check = add_buf(start, count);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
|
|
|
|
unsigned int *bufnr, unsigned int *error)
|
|
|
|
{
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
|
|
struct qdio_q *q;
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
|
|
|
|
|
|
|
|
if (need_siga_sync(q))
|
|
|
|
qdio_siga_sync_q(q);
|
|
|
|
|
|
|
|
return __qdio_inspect_queue(q, bufnr, error);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(qdio_inspect_queue);
|
|
|
|
|
2010-09-08 01:14:39 +04:00
|
|
|
/**
|
|
|
|
* qdio_get_next_buffers - process input buffers
|
|
|
|
* @cdev: associated ccw_device for the qdio subchannel
|
|
|
|
* @nr: input queue number
|
|
|
|
* @bufnr: first filled buffer number
|
|
|
|
* @error: buffers are in error state
|
|
|
|
*
|
|
|
|
* Return codes
|
|
|
|
* < 0 - error
|
|
|
|
* = 0 - no new buffers found
|
|
|
|
* > 0 - number of processed buffers
|
|
|
|
*/
|
|
|
|
int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
|
|
|
|
int *error)
|
|
|
|
{
|
|
|
|
struct qdio_q *q;
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
q = irq_ptr->input_qs[nr];
|
|
|
|
|
|
|
|
/*
|
2011-01-05 14:47:54 +03:00
|
|
|
* Cannot rely on automatic sync after interrupt since queues may
|
|
|
|
* also be examined without interrupt.
|
2010-09-08 01:14:39 +04:00
|
|
|
*/
|
2011-01-05 14:47:54 +03:00
|
|
|
if (need_siga_sync(q))
|
|
|
|
qdio_sync_queues(q);
|
|
|
|
|
2018-10-30 10:21:27 +03:00
|
|
|
qdio_check_outbound_pci_queues(irq_ptr);
|
2010-09-08 01:14:39 +04:00
|
|
|
|
|
|
|
/* Note: upper-layer MUST stop processing immediately here ... */
|
|
|
|
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
|
|
|
return -EIO;
|
|
|
|
|
2019-08-23 12:48:47 +03:00
|
|
|
return __qdio_inspect_queue(q, bufnr, error);
|
2010-09-08 01:14:39 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(qdio_get_next_buffers);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qdio_stop_irq - disable interrupt processing for the device
|
|
|
|
* @cdev: associated ccw_device for the qdio subchannel
|
|
|
|
*
|
|
|
|
* Return codes
|
|
|
|
* 0 - interrupts were already disabled
|
|
|
|
* 1 - interrupts successfully disabled
|
|
|
|
*/
|
2020-03-25 12:35:00 +03:00
|
|
|
int qdio_stop_irq(struct ccw_device *cdev)
|
2010-09-08 01:14:39 +04:00
|
|
|
{
|
|
|
|
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
|
|
|
|
|
|
|
if (!irq_ptr)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2020-03-25 12:35:00 +03:00
|
|
|
if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
|
2010-09-08 01:14:39 +04:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(qdio_stop_irq);
|
|
|
|
|
2008-07-17 19:16:48 +04:00
|
|
|
static int __init init_QDIO(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2011-04-04 11:43:31 +04:00
|
|
|
rc = qdio_debug_init();
|
2008-07-17 19:16:48 +04:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2011-04-04 11:43:31 +04:00
|
|
|
rc = qdio_setup_init();
|
|
|
|
if (rc)
|
|
|
|
goto out_debug;
|
2020-04-02 12:37:50 +03:00
|
|
|
rc = qdio_thinint_init();
|
2008-07-17 19:16:48 +04:00
|
|
|
if (rc)
|
|
|
|
goto out_cache;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_cache:
|
|
|
|
qdio_setup_exit();
|
2011-04-04 11:43:31 +04:00
|
|
|
out_debug:
|
|
|
|
qdio_debug_exit();
|
2008-07-17 19:16:48 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit exit_QDIO(void)
|
|
|
|
{
|
2020-04-02 12:37:50 +03:00
|
|
|
qdio_thinint_exit();
|
2008-07-17 19:16:48 +04:00
|
|
|
qdio_setup_exit();
|
2011-04-04 11:43:31 +04:00
|
|
|
qdio_debug_exit();
|
2008-07-17 19:16:48 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_QDIO);
|
|
|
|
module_exit(exit_QDIO);
|