2017-11-03 13:28:30 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2001-2004 by David Brownell
|
|
|
|
* Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* this file is part of ehci-hcd.c */
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EHCI scheduled transaction support: interrupt, iso, split iso
|
|
|
|
* These are called "periodic" transactions in the EHCI spec.
|
|
|
|
*
|
|
|
|
* Note that for interrupt transfers, the QH/QTD manipulation is shared
|
|
|
|
* with the "asynchronous" transaction support (control/bulk transfers).
|
|
|
|
* The only real difference is in how interrupt transfers are scheduled.
|
|
|
|
*
|
|
|
|
* For ISO, we make an "iso_stream" head to serve the same role as a QH.
|
|
|
|
* It keeps track of every ITD (or SITD) that's linked, and holds enough
|
|
|
|
* pre-calculated schedule data to make appending to the queue be quick.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
static int ehci_get_frame(struct usb_hcd *hcd);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* periodic_next_shadow - return "next" pointer on shadow list
|
|
|
|
* @periodic: host pointer to qh/itd/sitd
|
|
|
|
* @tag: hardware tag for type of this record
|
|
|
|
*/
|
|
|
|
static union ehci_shadow *
|
2007-05-01 20:29:37 +04:00
|
|
|
periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
|
|
|
|
__hc32 tag)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-05-01 20:29:37 +04:00
|
|
|
switch (hc32_to_cpu(ehci, tag)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
case Q_TYPE_QH:
|
|
|
|
return &periodic->qh->qh_next;
|
|
|
|
case Q_TYPE_FSTN:
|
|
|
|
return &periodic->fstn->fstn_next;
|
|
|
|
case Q_TYPE_ITD:
|
|
|
|
return &periodic->itd->itd_next;
|
2016-01-26 04:45:16 +03:00
|
|
|
/* case Q_TYPE_SITD: */
|
2005-04-17 02:20:36 +04:00
|
|
|
default:
|
|
|
|
return &periodic->sitd->sitd_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-14 03:23:29 +04:00
|
|
|
static __hc32 *
|
|
|
|
shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
|
|
|
|
__hc32 tag)
|
|
|
|
{
|
|
|
|
switch (hc32_to_cpu(ehci, tag)) {
|
|
|
|
/* our ehci_shadow.qh is actually software part */
|
|
|
|
case Q_TYPE_QH:
|
|
|
|
return &periodic->qh->hw->hw_next;
|
|
|
|
/* others are hw parts */
|
|
|
|
default:
|
|
|
|
return periodic->hw_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* caller must hold ehci->lock */
|
2016-01-26 04:45:14 +03:00
|
|
|
static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-05-01 20:29:37 +04:00
|
|
|
union ehci_shadow *prev_p = &ehci->pshadow[frame];
|
|
|
|
__hc32 *hw_p = &ehci->periodic[frame];
|
2005-04-17 02:20:36 +04:00
|
|
|
union ehci_shadow here = *prev_p;
|
|
|
|
|
|
|
|
/* find predecessor of "ptr"; hw and shadow lists are in sync */
|
|
|
|
while (here.ptr && here.ptr != ptr) {
|
2007-05-01 20:29:37 +04:00
|
|
|
prev_p = periodic_next_shadow(ehci, prev_p,
|
|
|
|
Q_NEXT_TYPE(ehci, *hw_p));
|
2009-07-14 03:23:29 +04:00
|
|
|
hw_p = shadow_next_periodic(ehci, &here,
|
|
|
|
Q_NEXT_TYPE(ehci, *hw_p));
|
2005-04-17 02:20:36 +04:00
|
|
|
here = *prev_p;
|
|
|
|
}
|
|
|
|
/* an interrupt entry (at list end) could have been shared */
|
|
|
|
if (!here.ptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* update shadow and hardware lists ... the old "next" pointers
|
|
|
|
* from ptr may still be in use, the caller updates them.
|
|
|
|
*/
|
2007-05-01 20:29:37 +04:00
|
|
|
*prev_p = *periodic_next_shadow(ehci, &here,
|
|
|
|
Q_NEXT_TYPE(ehci, *hw_p));
|
2010-11-08 12:58:35 +03:00
|
|
|
|
|
|
|
if (!ehci->use_dummy_qh ||
|
|
|
|
*shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
|
|
|
|
!= EHCI_LIST_END(ehci))
|
|
|
|
*hw_p = *shadow_next_periodic(ehci, &here,
|
|
|
|
Q_NEXT_TYPE(ehci, *hw_p));
|
|
|
|
else
|
2013-10-18 19:15:14 +04:00
|
|
|
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-10-12 06:16:21 +04:00
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* Bandwidth and TT management */
|
|
|
|
|
|
|
|
/* Find the TT data structure for this device; create it if necessary */
|
|
|
|
static struct ehci_tt *find_tt(struct usb_device *udev)
|
|
|
|
{
|
|
|
|
struct usb_tt *utt = udev->tt;
|
|
|
|
struct ehci_tt *tt, **tt_index, **ptt;
|
|
|
|
unsigned port;
|
|
|
|
bool allocated_index = false;
|
|
|
|
|
|
|
|
if (!utt)
|
|
|
|
return NULL; /* Not below a TT */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find/create our data structure.
|
|
|
|
* For hubs with a single TT, we get it directly.
|
|
|
|
* For hubs with multiple TTs, there's an extra level of pointers.
|
|
|
|
*/
|
|
|
|
tt_index = NULL;
|
|
|
|
if (utt->multi) {
|
|
|
|
tt_index = utt->hcpriv;
|
|
|
|
if (!tt_index) { /* Create the index array */
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
|
|
|
tt_index = kcalloc(utt->hub->maxchild,
|
|
|
|
sizeof(*tt_index),
|
|
|
|
GFP_ATOMIC);
|
2013-10-12 06:16:21 +04:00
|
|
|
if (!tt_index)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
utt->hcpriv = tt_index;
|
|
|
|
allocated_index = true;
|
|
|
|
}
|
|
|
|
port = udev->ttport - 1;
|
|
|
|
ptt = &tt_index[port];
|
|
|
|
} else {
|
|
|
|
port = 0;
|
|
|
|
ptt = (struct ehci_tt **) &utt->hcpriv;
|
|
|
|
}
|
|
|
|
|
|
|
|
tt = *ptt;
|
|
|
|
if (!tt) { /* Create the ehci_tt */
|
|
|
|
struct ehci_hcd *ehci =
|
|
|
|
hcd_to_ehci(bus_to_hcd(udev->bus));
|
|
|
|
|
|
|
|
tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
|
|
|
|
if (!tt) {
|
|
|
|
if (allocated_index) {
|
|
|
|
utt->hcpriv = NULL;
|
|
|
|
kfree(tt_index);
|
|
|
|
}
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
list_add_tail(&tt->tt_list, &ehci->tt_list);
|
|
|
|
INIT_LIST_HEAD(&tt->ps_list);
|
|
|
|
tt->usb_tt = utt;
|
|
|
|
tt->tt_port = port;
|
|
|
|
*ptt = tt;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the TT above udev, if it's not in use */
|
|
|
|
static void drop_tt(struct usb_device *udev)
|
|
|
|
{
|
|
|
|
struct usb_tt *utt = udev->tt;
|
|
|
|
struct ehci_tt *tt, **tt_index, **ptt;
|
|
|
|
int cnt, i;
|
|
|
|
|
|
|
|
if (!utt || !utt->hcpriv)
|
|
|
|
return; /* Not below a TT, or never allocated */
|
|
|
|
|
|
|
|
cnt = 0;
|
|
|
|
if (utt->multi) {
|
|
|
|
tt_index = utt->hcpriv;
|
|
|
|
ptt = &tt_index[udev->ttport - 1];
|
|
|
|
|
|
|
|
/* How many entries are left in tt_index? */
|
|
|
|
for (i = 0; i < utt->hub->maxchild; ++i)
|
|
|
|
cnt += !!tt_index[i];
|
|
|
|
} else {
|
|
|
|
tt_index = NULL;
|
|
|
|
ptt = (struct ehci_tt **) &utt->hcpriv;
|
|
|
|
}
|
|
|
|
|
|
|
|
tt = *ptt;
|
|
|
|
if (!tt || !list_empty(&tt->ps_list))
|
|
|
|
return; /* never allocated, or still in use */
|
|
|
|
|
|
|
|
list_del(&tt->tt_list);
|
|
|
|
*ptt = NULL;
|
|
|
|
kfree(tt);
|
|
|
|
if (cnt == 1) {
|
|
|
|
utt->hcpriv = NULL;
|
|
|
|
kfree(tt_index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
|
|
|
|
struct ehci_per_sched *ps)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-10-11 19:29:13 +04:00
|
|
|
dev_dbg(&ps->udev->dev,
|
|
|
|
"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
|
|
|
|
ps->ep->desc.bEndpointAddress,
|
|
|
|
(sign >= 0 ? "reserve" : "release"), type,
|
|
|
|
(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
|
|
|
|
ps->phase, ps->phase_uf, ps->period,
|
|
|
|
ps->usecs, ps->c_usecs, ps->cs_mask);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
|
|
|
|
struct ehci_qh *qh, int sign)
|
|
|
|
{
|
|
|
|
unsigned start_uf;
|
|
|
|
unsigned i, j, m;
|
|
|
|
int usecs = qh->ps.usecs;
|
|
|
|
int c_usecs = qh->ps.c_usecs;
|
2013-10-12 06:16:21 +04:00
|
|
|
int tt_usecs = qh->ps.tt_usecs;
|
|
|
|
struct ehci_tt *tt;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
|
|
|
|
return;
|
|
|
|
start_uf = qh->ps.bw_phase << 3;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
bandwidth_dbg(ehci, sign, "intr", &qh->ps);
|
|
|
|
|
|
|
|
if (sign < 0) { /* Release bandwidth */
|
|
|
|
usecs = -usecs;
|
|
|
|
c_usecs = -c_usecs;
|
2013-10-12 06:16:21 +04:00
|
|
|
tt_usecs = -tt_usecs;
|
2013-10-11 19:29:13 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Entire transaction (high speed) or start-split (full/low speed) */
|
|
|
|
for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
|
|
|
|
i += qh->ps.bw_uperiod)
|
|
|
|
ehci->bandwidth[i] += usecs;
|
|
|
|
|
|
|
|
/* Complete-split (full/low speed) */
|
|
|
|
if (qh->ps.c_usecs) {
|
|
|
|
/* NOTE: adjustments needed for FSTN */
|
|
|
|
for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
|
|
|
|
i += qh->ps.bw_uperiod) {
|
|
|
|
for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
|
|
|
|
if (qh->ps.cs_mask & m)
|
|
|
|
ehci->bandwidth[i+j] += c_usecs;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-12 06:16:21 +04:00
|
|
|
|
|
|
|
/* FS/LS bus bandwidth */
|
|
|
|
if (tt_usecs) {
|
2020-10-11 23:50:08 +03:00
|
|
|
/*
|
|
|
|
* find_tt() will not return any error here as we have
|
|
|
|
* already called find_tt() before calling this function
|
|
|
|
* and checked for any error return. The previous call
|
|
|
|
* would have created the data structure.
|
|
|
|
*/
|
2013-10-12 06:16:21 +04:00
|
|
|
tt = find_tt(qh->ps.udev);
|
|
|
|
if (sign > 0)
|
|
|
|
list_add_tail(&qh->ps.ps_list, &tt->ps_list);
|
|
|
|
else
|
|
|
|
list_del(&qh->ps.ps_list);
|
|
|
|
|
|
|
|
for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
|
|
|
|
i += qh->ps.bw_period)
|
|
|
|
tt->bandwidth[i] += tt_usecs;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2013-10-12 06:16:21 +04:00
|
|
|
static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
|
|
|
|
struct ehci_tt *tt)
|
|
|
|
{
|
|
|
|
struct ehci_per_sched *ps;
|
|
|
|
unsigned uframe, uf, x;
|
|
|
|
u8 *budget_line;
|
|
|
|
|
|
|
|
if (!tt)
|
|
|
|
return;
|
|
|
|
memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
|
|
|
|
|
|
|
|
/* Add up the contributions from all the endpoints using this TT */
|
|
|
|
list_for_each_entry(ps, &tt->ps_list, ps_list) {
|
|
|
|
for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
|
|
|
|
uframe += ps->bw_uperiod) {
|
|
|
|
budget_line = &budget_table[uframe];
|
|
|
|
x = ps->tt_usecs;
|
|
|
|
|
|
|
|
/* propagate the time forward */
|
|
|
|
for (uf = ps->phase_uf; uf < 8; ++uf) {
|
|
|
|
x += budget_line[uf];
|
|
|
|
|
|
|
|
/* Each microframe lasts 125 us */
|
|
|
|
if (x <= 125) {
|
|
|
|
budget_line[uf] = x;
|
|
|
|
break;
|
|
|
|
}
|
2016-01-26 04:45:15 +03:00
|
|
|
budget_line[uf] = 125;
|
|
|
|
x -= 125;
|
2013-10-12 06:16:21 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __maybe_unused same_tt(struct usb_device *dev1,
|
|
|
|
struct usb_device *dev2)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
if (!dev1->tt || !dev2->tt)
|
|
|
|
return 0;
|
|
|
|
if (dev1->tt != dev2->tt)
|
|
|
|
return 0;
|
|
|
|
if (dev1->tt->multi)
|
|
|
|
return dev1->ttport == dev2->ttport;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-05-24 20:39:16 +04:00
|
|
|
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
|
|
|
|
|
|
|
|
static const unsigned char
|
2013-05-28 22:03:10 +04:00
|
|
|
max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
|
2006-05-24 20:39:16 +04:00
|
|
|
|
|
|
|
/* carryover low/fullspeed bandwidth that crosses uframe boundries */
|
|
|
|
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
|
|
|
|
{
|
|
|
|
int i;
|
2016-01-26 04:45:17 +03:00
|
|
|
|
2016-01-26 04:45:13 +03:00
|
|
|
for (i = 0; i < 7; i++) {
|
2006-05-24 20:39:16 +04:00
|
|
|
if (max_tt_usecs[i] < tt_usecs[i]) {
|
|
|
|
tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
|
|
|
|
tt_usecs[i] = max_tt_usecs[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if the device's tt's downstream bus is available for a
|
|
|
|
* periodic transfer of the specified length (usecs), starting at the
|
|
|
|
* specified frame/uframe. Note that (as summarized in section 11.19
|
|
|
|
* of the usb 2.0 spec) TTs can buffer multiple transactions for each
|
|
|
|
* uframe.
|
|
|
|
*
|
|
|
|
* The uframe parameter is when the fullspeed/lowspeed transfer
|
|
|
|
* should be executed in "B-frame" terms, which is the same as the
|
|
|
|
* highspeed ssplit's uframe (which is in "H-frame" terms). For example
|
|
|
|
* a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
|
|
|
|
* See the EHCI spec sec 4.5 and fig 4.7.
|
|
|
|
*
|
|
|
|
* This checks if the full/lowspeed bus, at the specified starting uframe,
|
|
|
|
* has the specified bandwidth available, according to rules listed
|
|
|
|
* in USB 2.0 spec section 11.18.1 fig 11-60.
|
|
|
|
*
|
|
|
|
* This does not check if the transfer would exceed the max ssplit
|
|
|
|
* limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
|
|
|
|
* since proper scheduling limits ssplits to less than 16 per uframe.
|
|
|
|
*/
|
2016-01-26 04:45:14 +03:00
|
|
|
static int tt_available(
|
2006-05-24 20:39:16 +04:00
|
|
|
struct ehci_hcd *ehci,
|
2013-10-12 06:16:21 +04:00
|
|
|
struct ehci_per_sched *ps,
|
|
|
|
struct ehci_tt *tt,
|
2006-05-24 20:39:16 +04:00
|
|
|
unsigned frame,
|
2013-10-12 06:16:21 +04:00
|
|
|
unsigned uframe
|
2006-05-24 20:39:16 +04:00
|
|
|
)
|
|
|
|
{
|
2013-10-12 06:16:21 +04:00
|
|
|
unsigned period = ps->bw_period;
|
|
|
|
unsigned usecs = ps->tt_usecs;
|
|
|
|
|
2006-05-24 20:39:16 +04:00
|
|
|
if ((period == 0) || (uframe >= 7)) /* error */
|
|
|
|
return 0;
|
|
|
|
|
2013-10-12 06:16:21 +04:00
|
|
|
for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
|
|
|
|
frame += period) {
|
|
|
|
unsigned i, uf;
|
|
|
|
unsigned short tt_usecs[8];
|
2006-05-24 20:39:16 +04:00
|
|
|
|
2013-10-12 06:16:21 +04:00
|
|
|
if (tt->bandwidth[frame] + usecs > 900)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
uf = frame << 3;
|
|
|
|
for (i = 0; i < 8; (++i, ++uf))
|
|
|
|
tt_usecs[i] = ehci->tt_budget[uf];
|
2006-05-24 20:39:16 +04:00
|
|
|
|
2013-08-29 12:45:10 +04:00
|
|
|
if (max_tt_usecs[uframe] <= tt_usecs[uframe])
|
2006-05-24 20:39:16 +04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* special case for isoc transfers larger than 125us:
|
|
|
|
* the first and each subsequent fully used uframe
|
|
|
|
* must be empty, so as to not illegally delay
|
|
|
|
* already scheduled transactions
|
|
|
|
*/
|
2016-01-26 04:45:11 +03:00
|
|
|
if (usecs > 125) {
|
2009-04-21 21:37:12 +04:00
|
|
|
int ufs = (usecs / 125);
|
2013-10-12 06:16:21 +04:00
|
|
|
|
2006-05-24 20:39:16 +04:00
|
|
|
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
|
2016-01-26 04:45:11 +03:00
|
|
|
if (tt_usecs[i] > 0)
|
2006-05-24 20:39:16 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tt_usecs[uframe] += usecs;
|
|
|
|
|
|
|
|
carryover_tt_bandwidth(tt_usecs);
|
|
|
|
|
|
|
|
/* fail if the carryover pushed bw past the last uframe's limit */
|
2013-08-29 12:45:10 +04:00
|
|
|
if (max_tt_usecs[7] < tt_usecs[7])
|
2006-05-24 20:39:16 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* return true iff the device's transaction translator is available
|
|
|
|
* for a periodic transfer starting at the specified frame, using
|
|
|
|
* all the uframes in the mask.
|
|
|
|
*/
|
2016-01-26 04:45:14 +03:00
|
|
|
static int tt_no_collision(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
unsigned period,
|
|
|
|
struct usb_device *dev,
|
|
|
|
unsigned frame,
|
|
|
|
u32 uf_mask
|
|
|
|
)
|
|
|
|
{
|
|
|
|
if (period == 0) /* error */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* note bandwidth wastage: split never follows csplit
|
|
|
|
* (different dev or endpoint) until the next uframe.
|
|
|
|
* calling convention doesn't make that distinction.
|
|
|
|
*/
|
|
|
|
for (; frame < ehci->periodic_size; frame += period) {
|
|
|
|
union ehci_shadow here;
|
2007-05-01 20:29:37 +04:00
|
|
|
__hc32 type;
|
2009-07-14 03:23:29 +04:00
|
|
|
struct ehci_qh_hw *hw;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
here = ehci->pshadow[frame];
|
|
|
|
type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
|
2005-04-17 02:20:36 +04:00
|
|
|
while (here.ptr) {
|
2007-05-01 20:29:37 +04:00
|
|
|
switch (hc32_to_cpu(ehci, type)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
case Q_TYPE_ITD:
|
2007-05-01 20:29:37 +04:00
|
|
|
type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
|
2005-04-17 02:20:36 +04:00
|
|
|
here = here.itd->itd_next;
|
|
|
|
continue;
|
|
|
|
case Q_TYPE_QH:
|
2009-07-14 03:23:29 +04:00
|
|
|
hw = here.qh->hw;
|
2013-10-11 19:29:03 +04:00
|
|
|
if (same_tt(dev, here.qh->ps.udev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
u32 mask;
|
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
mask = hc32_to_cpu(ehci,
|
2009-07-14 03:23:29 +04:00
|
|
|
hw->hw_info2);
|
2005-04-17 02:20:36 +04:00
|
|
|
/* "knows" no gap is needed */
|
|
|
|
mask |= mask >> 8;
|
|
|
|
if (mask & uf_mask)
|
|
|
|
break;
|
|
|
|
}
|
2009-07-14 03:23:29 +04:00
|
|
|
type = Q_NEXT_TYPE(ehci, hw->hw_next);
|
2005-04-17 02:20:36 +04:00
|
|
|
here = here.qh->qh_next;
|
|
|
|
continue;
|
|
|
|
case Q_TYPE_SITD:
|
2016-01-26 04:45:14 +03:00
|
|
|
if (same_tt(dev, here.sitd->urb->dev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
u16 mask;
|
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
mask = hc32_to_cpu(ehci, here.sitd
|
2005-04-17 02:20:36 +04:00
|
|
|
->hw_uframe);
|
|
|
|
/* FIXME assumes no gap for IN! */
|
|
|
|
mask |= mask >> 8;
|
|
|
|
if (mask & uf_mask)
|
|
|
|
break;
|
|
|
|
}
|
2007-05-01 20:29:37 +04:00
|
|
|
type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
|
2005-04-17 02:20:36 +04:00
|
|
|
here = here.sitd->sitd_next;
|
|
|
|
continue;
|
2016-01-26 04:45:16 +03:00
|
|
|
/* case Q_TYPE_FSTN: */
|
2005-04-17 02:20:36 +04:00
|
|
|
default:
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
"periodic frame %d bogus type %d\n",
|
|
|
|
frame, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* collision or error */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no collision */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-05-24 20:39:16 +04:00
|
|
|
#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2012-07-11 19:22:10 +04:00
|
|
|
static void enable_periodic(struct ehci_hcd *ehci)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2012-07-11 19:22:05 +04:00
|
|
|
if (ehci->periodic_count++)
|
2012-07-11 19:22:10 +04:00
|
|
|
return;
|
2008-08-27 10:35:04 +04:00
|
|
|
|
2012-07-11 19:22:05 +04:00
|
|
|
/* Stop waiting to turn off the periodic schedule */
|
|
|
|
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:22:05 +04:00
|
|
|
/* Don't start the schedule until PSS is 0 */
|
|
|
|
ehci_poll_PSS(ehci);
|
2012-07-11 19:23:04 +04:00
|
|
|
turn_on_io_watchdog(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-07-11 19:22:10 +04:00
|
|
|
static void disable_periodic(struct ehci_hcd *ehci)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2012-07-11 19:22:05 +04:00
|
|
|
if (--ehci->periodic_count)
|
2012-07-11 19:22:10 +04:00
|
|
|
return;
|
2008-08-27 10:35:04 +04:00
|
|
|
|
2012-07-11 19:22:05 +04:00
|
|
|
/* Don't turn off the schedule until PSS is 1 */
|
|
|
|
ehci_poll_PSS(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* periodic schedule slots have iso tds (normal or split) first, then a
|
|
|
|
* sparse tree for active interrupt transfers.
|
|
|
|
*
|
|
|
|
* this just links in a qh; caller guarantees uframe masks are set right.
|
|
|
|
* no FSTN support (yet; ehci 0.96+)
|
|
|
|
*/
|
2012-07-11 19:22:10 +04:00
|
|
|
static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
unsigned i;
|
2013-10-11 19:29:03 +04:00
|
|
|
unsigned period = qh->ps.period;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
dev_dbg(&qh->ps.udev->dev,
|
2005-04-17 02:20:36 +04:00
|
|
|
"link qh%d-%04x/%p start %d [%d/%d us]\n",
|
2009-07-14 03:23:29 +04:00
|
|
|
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
|
|
|
|
& (QH_CMASK | QH_SMASK),
|
2013-10-11 19:29:03 +04:00
|
|
|
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* high bandwidth, or otherwise every microframe */
|
|
|
|
if (period == 0)
|
|
|
|
period = 1;
|
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
|
2007-05-01 20:29:37 +04:00
|
|
|
union ehci_shadow *prev = &ehci->pshadow[i];
|
|
|
|
__hc32 *hw_p = &ehci->periodic[i];
|
2005-04-17 02:20:36 +04:00
|
|
|
union ehci_shadow here = *prev;
|
2007-05-01 20:29:37 +04:00
|
|
|
__hc32 type = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* skip the iso nodes at list head */
|
|
|
|
while (here.ptr) {
|
2007-05-01 20:29:37 +04:00
|
|
|
type = Q_NEXT_TYPE(ehci, *hw_p);
|
|
|
|
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
2007-05-01 20:29:37 +04:00
|
|
|
prev = periodic_next_shadow(ehci, prev, type);
|
2009-07-14 03:23:29 +04:00
|
|
|
hw_p = shadow_next_periodic(ehci, &here, type);
|
2005-04-17 02:20:36 +04:00
|
|
|
here = *prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* sorting each branch by period (slow-->fast)
|
|
|
|
* enables sharing interior tree nodes
|
|
|
|
*/
|
|
|
|
while (here.ptr && qh != here.qh) {
|
2013-10-11 19:29:03 +04:00
|
|
|
if (qh->ps.period > here.qh->ps.period)
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
prev = &here.qh->qh_next;
|
2009-07-14 03:23:29 +04:00
|
|
|
hw_p = &here.qh->hw->hw_next;
|
2005-04-17 02:20:36 +04:00
|
|
|
here = *prev;
|
|
|
|
}
|
|
|
|
/* link in this qh, unless some earlier pass did that */
|
|
|
|
if (qh != here.qh) {
|
|
|
|
qh->qh_next = here;
|
|
|
|
if (here.qh)
|
2009-07-14 03:23:29 +04:00
|
|
|
qh->hw->hw_next = *hw_p;
|
2016-01-26 04:45:14 +03:00
|
|
|
wmb();
|
2005-04-17 02:20:36 +04:00
|
|
|
prev->qh = qh;
|
2016-01-26 04:45:14 +03:00
|
|
|
*hw_p = QH_NEXT(ehci, qh->qh_dma);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
qh->qh_state = QH_STATE_LINKED;
|
2009-07-31 18:41:40 +04:00
|
|
|
qh->xacterrs = 0;
|
2016-01-25 23:42:04 +03:00
|
|
|
qh->unlink_reason = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
/* update per-qh bandwidth for debugfs */
|
2013-10-11 19:29:13 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
|
|
|
|
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
|
2013-10-11 19:29:03 +04:00
|
|
|
: (qh->ps.usecs * 8);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
list_add(&qh->intr_node, &ehci->intr_qh_list);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* maybe enable periodic schedule processing */
|
2012-07-11 19:23:00 +04:00
|
|
|
++ehci->intr_count;
|
2012-07-11 19:22:10 +04:00
|
|
|
enable_periodic(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-07-11 19:22:10 +04:00
|
|
|
static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
unsigned period;
|
|
|
|
|
2012-07-11 19:22:26 +04:00
|
|
|
/*
|
|
|
|
* If qh is for a low/full-speed device, simply unlinking it
|
|
|
|
* could interfere with an ongoing split transaction. To unlink
|
|
|
|
* it safely would require setting the QH_INACTIVATE bit and
|
|
|
|
* waiting at least one frame, as described in EHCI 4.12.2.5.
|
|
|
|
*
|
|
|
|
* We won't bother with any of this. Instead, we assume that the
|
|
|
|
* only reason for unlinking an interrupt QH while the current URB
|
|
|
|
* is still active is to dequeue all the URBs (flush the whole
|
|
|
|
* endpoint queue).
|
|
|
|
*
|
|
|
|
* If rebalancing the periodic schedule is ever implemented, this
|
|
|
|
* approach will no longer be valid.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* high bandwidth, or otherwise part of every microframe */
|
2013-10-11 19:29:03 +04:00
|
|
|
period = qh->ps.period ? : 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
|
2016-01-26 04:45:14 +03:00
|
|
|
periodic_unlink(ehci, i, qh);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
/* update per-qh bandwidth for debugfs */
|
2013-10-11 19:29:13 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
|
|
|
|
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
|
2013-10-11 19:29:03 +04:00
|
|
|
: (qh->ps.usecs * 8);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
dev_dbg(&qh->ps.udev->dev,
|
2005-04-17 02:20:36 +04:00
|
|
|
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
|
2013-10-11 19:29:03 +04:00
|
|
|
qh->ps.period,
|
2009-07-14 03:23:29 +04:00
|
|
|
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
|
2013-10-11 19:29:03 +04:00
|
|
|
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* qh->qh_next still "live" to HC */
|
|
|
|
qh->qh_state = QH_STATE_UNLINK;
|
|
|
|
qh->qh_next.ptr = NULL;
|
2012-07-11 19:23:00 +04:00
|
|
|
|
|
|
|
if (ehci->qh_scan_next == qh)
|
|
|
|
ehci->qh_scan_next = list_entry(qh->intr_node.next,
|
|
|
|
struct ehci_qh, intr_node);
|
|
|
|
list_del(&qh->intr_node);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-07-03 18:53:10 +04:00
|
|
|
static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
|
|
|
{
|
|
|
|
if (qh->qh_state != QH_STATE_LINKED ||
|
|
|
|
list_empty(&qh->unlink_node))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_del_init(&qh->unlink_node);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
|
|
|
|
* avoiding unnecessary CPU wakeup
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2012-07-11 19:22:26 +04:00
|
|
|
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-03-22 21:31:11 +04:00
|
|
|
/* If the QH isn't linked then there's nothing we can do. */
|
|
|
|
if (qh->qh_state != QH_STATE_LINKED)
|
2009-08-19 20:22:44 +04:00
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-07-03 18:53:10 +04:00
|
|
|
/* if the qh is waiting for unlink, cancel it now */
|
|
|
|
cancel_unlink_wait_intr(ehci, qh);
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
qh_unlink_periodic(ehci, qh);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:22:26 +04:00
|
|
|
/* Make sure the unlinks are visible before starting the timer */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The EHCI spec doesn't say how long it takes the controller to
|
|
|
|
* stop accessing an unlinked interrupt QH. The timer delay is
|
|
|
|
* 9 uframes; presumably that will be long enough.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2012-07-11 19:22:26 +04:00
|
|
|
qh->unlink_cycle = ehci->intr_unlink_cycle;
|
|
|
|
|
|
|
|
/* New entries go at the end of the intr_unlink list */
|
2013-03-22 21:31:45 +04:00
|
|
|
list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
|
2012-07-11 19:22:26 +04:00
|
|
|
|
|
|
|
if (ehci->intr_unlinking)
|
|
|
|
; /* Avoid recursive calls */
|
|
|
|
else if (ehci->rh_state < EHCI_RH_RUNNING)
|
|
|
|
ehci_handle_intr_unlinks(ehci);
|
2013-03-22 21:31:45 +04:00
|
|
|
else if (ehci->intr_unlink.next == &qh->unlink_node) {
|
2012-07-11 19:22:26 +04:00
|
|
|
ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
|
|
|
|
++ehci->intr_unlink_cycle;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-03 18:53:10 +04:00
|
|
|
/*
|
|
|
|
* It is common only one intr URB is scheduled on one qh, and
|
|
|
|
* given complete() is run in tasklet context, introduce a bit
|
|
|
|
* delay to avoid unlink qh too early.
|
|
|
|
*/
|
|
|
|
static void start_unlink_intr_wait(struct ehci_hcd *ehci,
|
|
|
|
struct ehci_qh *qh)
|
|
|
|
{
|
|
|
|
qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
|
|
|
|
|
|
|
|
/* New entries go at the end of the intr_unlink_wait list */
|
|
|
|
list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
|
|
|
|
|
|
|
|
if (ehci->rh_state < EHCI_RH_RUNNING)
|
|
|
|
ehci_handle_start_intr_unlinks(ehci);
|
|
|
|
else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
|
|
|
|
ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
|
|
|
|
++ehci->intr_unlink_wait_cycle;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 19:22:26 +04:00
|
|
|
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
|
|
|
{
|
|
|
|
struct ehci_qh_hw *hw = qh->hw;
|
|
|
|
int rc;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
qh->qh_state = QH_STATE_IDLE;
|
2009-07-14 03:23:29 +04:00
|
|
|
hw->hw_next = EHCI_LIST_END(ehci);
|
2009-08-19 20:22:44 +04:00
|
|
|
|
2013-03-22 21:30:56 +04:00
|
|
|
if (!list_empty(&qh->qtd_list))
|
|
|
|
qh_completions(ehci, qh);
|
2009-08-19 20:22:44 +04:00
|
|
|
|
|
|
|
/* reschedule QH iff another request is queued */
|
2012-07-11 19:22:26 +04:00
|
|
|
if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
|
2009-08-19 20:22:44 +04:00
|
|
|
rc = qh_schedule(ehci, qh);
|
2013-05-29 19:33:52 +04:00
|
|
|
if (rc == 0) {
|
|
|
|
qh_refresh(ehci, qh);
|
|
|
|
qh_link_periodic(ehci, qh);
|
|
|
|
}
|
2009-08-19 20:22:44 +04:00
|
|
|
|
|
|
|
/* An error here likely indicates handshake failure
|
|
|
|
* or no space left in the schedule. Neither fault
|
|
|
|
* should happen often ...
|
|
|
|
*
|
|
|
|
* FIXME kill the now-dysfunctional queued urbs
|
|
|
|
*/
|
2013-05-29 19:33:52 +04:00
|
|
|
else {
|
2009-08-19 20:22:44 +04:00
|
|
|
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
|
|
|
|
qh, rc);
|
2013-05-29 19:33:52 +04:00
|
|
|
}
|
2009-08-19 20:22:44 +04:00
|
|
|
}
|
2012-07-11 19:22:05 +04:00
|
|
|
|
|
|
|
/* maybe turn off periodic schedule */
|
2012-07-11 19:23:00 +04:00
|
|
|
--ehci->intr_count;
|
2012-07-11 19:22:05 +04:00
|
|
|
disable_periodic(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
static int check_period(
|
2006-08-31 01:50:06 +04:00
|
|
|
struct ehci_hcd *ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned frame,
|
|
|
|
unsigned uframe,
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned uperiod,
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned usecs
|
|
|
|
) {
|
|
|
|
/* complete split running into next frame?
|
|
|
|
* given FSTN support, we could sometimes check...
|
|
|
|
*/
|
|
|
|
if (uframe >= 8)
|
|
|
|
return 0;
|
|
|
|
|
USB: EHCI: Allow users to override 80% max periodic bandwidth
There are cases, when 80% max isochronous bandwidth is too limiting.
For example I have two USB video capture cards which stream uncompressed
video, and to stream full NTSC + PAL videos we'd need
NTSC 640x480 YUV422 @30fps ~17.6 MB/s
PAL 720x576 YUV422 @25fps ~19.7 MB/s
isoc bandwidth.
Now, due to limited alt settings in capture devices NTSC one ends up
streaming with max_pkt_size=2688 and PAL with max_pkt_size=2892, both
with interval=1. In terms of microframe time allocation this gives
NTSC ~53us
PAL ~57us
and together
~110us > 100us == 80% of 125us uframe time.
So those two devices can't work together simultaneously because the'd
over allocate isochronous bandwidth.
80% seemed a bit arbitrary to me, and I've tried to raise it to 90% and
both devices started to work together, so I though sometimes it would be
a good idea for users to override hardcoded default of max 80% isoc
bandwidth.
After all, isn't it a user who should decide how to load the bus? If I
can live with 10% or even 5% bulk bandwidth that should be ok. I'm a USB
newcomer, but that 80% set in stone by USB 2.0 specification seems to be
chosen pretty arbitrary to me, just to serve as a reasonable default.
NOTE 1
~~~~~~
for two streams with max_pkt_size=3072 (worst case) both time
allocation would be 60us+60us=120us which is 96% periodic bandwidth
leaving 4% for bulk and control. Alan Stern suggested that bulk then
would be problematic (less than 300*8 bittimes left per microframe), but
I think that is still enough for control traffic.
NOTE 2
~~~~~~
Sarah Sharp expressed concern that maxing out periodic bandwidth
could lead to vendor-specific hardware bugs on host controllers, because
> It's entirely possible that you'll run into
> vendor-specific bugs if you try to pack the schedule with isochronous
> transfers. I don't think any hardware designer would seriously test or
> validate their hardware with a schedule that is basically a violation of
> the USB bus spec (more than 80% for periodic transfers).
So far I've only tested this patch on my HP Mini 5103 with N10 chipset
kirr@mini:~$ lspci
00:00.0 Host bridge: Intel Corporation N10 Family DMI Bridge
00:02.0 VGA compatible controller: Intel Corporation N10 Family Integrated Graphics Controller
00:02.1 Display controller: Intel Corporation N10 Family Integrated Graphics Controller
00:1b.0 Audio device: Intel Corporation N10/ICH 7 Family High Definition Audio Controller (rev 02)
00:1c.0 PCI bridge: Intel Corporation N10/ICH 7 Family PCI Express Port 1 (rev 02)
00:1c.3 PCI bridge: Intel Corporation N10/ICH 7 Family PCI Express Port 4 (rev 02)
00:1d.0 USB Controller: Intel Corporation N10/ICH 7 Family USB UHCI Controller #1 (rev 02)
00:1d.1 USB Controller: Intel Corporation N10/ICH 7 Family USB UHCI Controller #2 (rev 02)
00:1d.2 USB Controller: Intel Corporation N10/ICH 7 Family USB UHCI Controller #3 (rev 02)
00:1d.3 USB Controller: Intel Corporation N10/ICH 7 Family USB UHCI Controller #4 (rev 02)
00:1d.7 USB Controller: Intel Corporation N10/ICH 7 Family USB2 EHCI Controller (rev 02)
00:1e.0 PCI bridge: Intel Corporation 82801 Mobile PCI Bridge (rev e2)
00:1f.0 ISA bridge: Intel Corporation NM10 Family LPC Controller (rev 02)
00:1f.2 SATA controller: Intel Corporation N10/ICH7 Family SATA AHCI Controller (rev 02)
01:00.0 Network controller: Broadcom Corporation BCM4313 802.11b/g/n Wireless LAN Controller (rev 01)
02:00.0 Ethernet controller: Marvell Technology Group Ltd. 88E8059 PCI-E Gigabit Ethernet Controller (rev 11)
and the system works stable with 110us/uframe (~88%) isoc bandwith allocated for
above-mentioned isochronous transfers.
NOTE 3
~~~~~~
This feature is off by default. I mean max periodic bandwidth is set to
100us/uframe by default exactly as it was before the patch. So only those of us
who need the extreme settings are taking the risk - normal users who do not
alter uframe_periodic_max sysfs attribute should not see any change at all.
NOTE 4
~~~~~~
I've tried to update documentation in Documentation/ABI/ thoroughly, but
only "TBD" was put into Documentation/usb/ehci.txt -- the text there seems
to be outdated and much needing refreshing, before it could be amended.
Cc: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Kirill Smelkov <kirr@mns.spb.ru>
Acked-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2011-07-03 20:36:57 +04:00
|
|
|
/* convert "usecs we need" to "max already claimed" */
|
|
|
|
usecs = ehci->uframe_periodic_max - usecs;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
|
|
|
|
uframe += uperiod) {
|
|
|
|
if (ehci->bandwidth[uframe] > usecs)
|
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2016-01-26 04:45:16 +03:00
|
|
|
/* success! */
|
2005-04-17 02:20:36 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
static int check_intr_schedule(
|
2006-08-31 01:50:06 +04:00
|
|
|
struct ehci_hcd *ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned frame,
|
|
|
|
unsigned uframe,
|
2013-10-12 06:16:21 +04:00
|
|
|
struct ehci_qh *qh,
|
2013-10-18 19:13:08 +04:00
|
|
|
unsigned *c_maskp,
|
2013-10-12 06:16:21 +04:00
|
|
|
struct ehci_tt *tt
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
2006-08-31 01:50:06 +04:00
|
|
|
int retval = -ENOSPC;
|
2006-05-24 20:39:16 +04:00
|
|
|
u8 mask = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
2013-10-11 19:29:03 +04:00
|
|
|
if (!qh->ps.c_usecs) {
|
2005-04-17 02:20:36 +04:00
|
|
|
retval = 0;
|
|
|
|
*c_maskp = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2006-05-24 20:39:16 +04:00
|
|
|
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
|
2013-10-12 06:16:21 +04:00
|
|
|
if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
|
2006-05-24 20:39:16 +04:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
|
2013-10-11 19:28:12 +04:00
|
|
|
for (i = uframe+2; i < 8 && i <= uframe+4; i++)
|
2013-10-11 19:29:03 +04:00
|
|
|
if (!check_period(ehci, frame, i,
|
2013-10-11 19:29:13 +04:00
|
|
|
qh->ps.bw_uperiod, qh->ps.c_usecs))
|
2006-05-24 20:39:16 +04:00
|
|
|
goto done;
|
|
|
|
else
|
|
|
|
mask |= 1 << i;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
*c_maskp = mask;
|
2006-05-24 20:39:16 +04:00
|
|
|
}
|
|
|
|
#else
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Make sure this tt's buffer is also available for CSPLITs.
|
|
|
|
* We pessimize a bit; probably the typical full speed case
|
|
|
|
* doesn't need the second CSPLIT.
|
2006-08-31 01:50:06 +04:00
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* NOTE: both SPLIT and CSPLIT could be checked in just
|
|
|
|
* one smart pass...
|
|
|
|
*/
|
|
|
|
mask = 0x03 << (uframe + qh->gap_uf);
|
2013-10-11 19:29:13 +04:00
|
|
|
*c_maskp = mask;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
mask |= 1 << uframe;
|
2013-10-11 19:29:13 +04:00
|
|
|
if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
|
2013-10-11 19:29:03 +04:00
|
|
|
if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
|
2013-10-11 19:29:13 +04:00
|
|
|
qh->ps.bw_uperiod, qh->ps.c_usecs))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
2013-10-11 19:29:03 +04:00
|
|
|
if (!check_period(ehci, frame, uframe + qh->gap_uf,
|
2013-10-11 19:29:13 +04:00
|
|
|
qh->ps.bw_uperiod, qh->ps.c_usecs))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
retval = 0;
|
|
|
|
}
|
2006-05-24 20:39:16 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
done:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "first fit" scheduling policy used the first time through,
|
|
|
|
* or when the previous schedule slot can't be re-used.
|
|
|
|
*/
|
2007-05-01 20:29:37 +04:00
|
|
|
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-10-12 06:16:21 +04:00
|
|
|
int status = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned uframe;
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned c_mask;
|
2009-07-14 03:23:29 +04:00
|
|
|
struct ehci_qh_hw *hw = qh->hw;
|
2013-10-12 06:16:21 +04:00
|
|
|
struct ehci_tt *tt;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-07-14 03:23:29 +04:00
|
|
|
hw->hw_next = EHCI_LIST_END(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* reuse the previous schedule slots, if we can */
|
2013-10-11 19:29:13 +04:00
|
|
|
if (qh->ps.phase != NO_FRAME) {
|
|
|
|
ehci_dbg(ehci, "reused qh %p schedule\n", qh);
|
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
uframe = 0;
|
|
|
|
c_mask = 0;
|
2013-10-12 06:16:21 +04:00
|
|
|
tt = find_tt(qh->ps.udev);
|
|
|
|
if (IS_ERR(tt)) {
|
|
|
|
status = PTR_ERR(tt);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
compute_tt_budget(ehci->tt_budget, tt);
|
2013-10-11 19:29:13 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* else scan the schedule to find a group of slots such that all
|
|
|
|
* uframes have enough periodic bandwidth available.
|
|
|
|
*/
|
2013-10-11 19:29:13 +04:00
|
|
|
/* "normal" case, uframing flexible except with splits */
|
|
|
|
if (qh->ps.bw_period) {
|
|
|
|
int i;
|
|
|
|
unsigned frame;
|
|
|
|
|
2013-10-12 06:16:21 +04:00
|
|
|
for (i = qh->ps.bw_period; i > 0; --i) {
|
2013-10-11 19:29:13 +04:00
|
|
|
frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
|
|
|
|
for (uframe = 0; uframe < 8; uframe++) {
|
|
|
|
status = check_intr_schedule(ehci,
|
2013-10-12 06:16:21 +04:00
|
|
|
frame, uframe, qh, &c_mask, tt);
|
2013-10-11 19:29:13 +04:00
|
|
|
if (status == 0)
|
2013-10-12 06:16:21 +04:00
|
|
|
goto got_it;
|
2009-05-23 01:02:33 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* qh->ps.bw_period == 0 means every uframe */
|
|
|
|
} else {
|
2013-10-12 06:16:21 +04:00
|
|
|
status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
|
2013-10-11 19:29:13 +04:00
|
|
|
}
|
|
|
|
if (status)
|
|
|
|
goto done;
|
2013-10-12 06:16:21 +04:00
|
|
|
|
|
|
|
got_it:
|
2013-10-11 19:29:13 +04:00
|
|
|
qh->ps.phase = (qh->ps.period ? ehci->random_frame &
|
|
|
|
(qh->ps.period - 1) : 0);
|
|
|
|
qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
|
|
|
|
qh->ps.phase_uf = uframe;
|
|
|
|
qh->ps.cs_mask = qh->ps.period ?
|
|
|
|
(c_mask << 8) | (1 << uframe) :
|
|
|
|
QH_SMASK;
|
|
|
|
|
|
|
|
/* reset S-frame and (maybe) C-frame masks */
|
|
|
|
hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
|
|
|
|
hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
|
|
|
|
reserve_release_intr_bandwidth(ehci, qh, 1);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
done:
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
static int intr_submit(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct urb *urb,
|
|
|
|
struct list_head *qtd_list,
|
2005-10-21 11:21:58 +04:00
|
|
|
gfp_t mem_flags
|
2005-04-17 02:20:36 +04:00
|
|
|
) {
|
|
|
|
unsigned epnum;
|
|
|
|
unsigned long flags;
|
|
|
|
struct ehci_qh *qh;
|
2007-08-08 19:48:02 +04:00
|
|
|
int status;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct list_head empty;
|
|
|
|
|
|
|
|
/* get endpoint and transfer/schedule data */
|
2007-08-08 19:48:02 +04:00
|
|
|
epnum = urb->ep->desc.bEndpointAddress;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-23 00:39:10 +04:00
|
|
|
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
|
[PATCH] USB: Fix USB suspend/resume crasher (#2)
This patch closes the IRQ race and makes various other OHCI & EHCI code
path safer vs. suspend/resume.
I've been able to (finally !) successfully suspend and resume various
Mac models, with or without USB mouse plugged, or plugging while asleep,
or unplugging while asleep etc... all without a crash.
Alan, please verify the UHCI bit I did, I only verified that it builds.
It's very simple so I wouldn't expect any issue there. If you aren't
confident, then just drop the hunks that change uhci-hcd.c
I also made the patch a little bit more "safer" by making sure the store
to the interrupt register that disables interrupts is not posted before
I set the flag and drop the spinlock.
Without this patch, you cannot reliably sleep/wakeup any recent Mac, and
I suspect PCs have some more sneaky issues too (they don't frankly crash
with machine checks because x86 tend to silently swallow PCI errors but
that won't last afaik, at least PCI Express will blow up in those
situations, but the USB code may still misbehave).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2005-11-25 01:59:46 +03:00
|
|
|
status = -ESHUTDOWN;
|
2007-08-08 19:48:02 +04:00
|
|
|
goto done_not_linked;
|
[PATCH] USB: Fix USB suspend/resume crasher (#2)
This patch closes the IRQ race and makes various other OHCI & EHCI code
path safer vs. suspend/resume.
I've been able to (finally !) successfully suspend and resume various
Mac models, with or without USB mouse plugged, or plugging while asleep,
or unplugging while asleep etc... all without a crash.
Alan, please verify the UHCI bit I did, I only verified that it builds.
It's very simple so I wouldn't expect any issue there. If you aren't
confident, then just drop the hunks that change uhci-hcd.c
I also made the patch a little bit more "safer" by making sure the store
to the interrupt register that disables interrupts is not posted before
I set the flag and drop the spinlock.
Without this patch, you cannot reliably sleep/wakeup any recent Mac, and
I suspect PCs have some more sneaky issues too (they don't frankly crash
with machine checks because x86 tend to silently swallow PCI errors but
that won't last afaik, at least PCI Express will blow up in those
situations, but the USB code may still misbehave).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2005-11-25 01:59:46 +03:00
|
|
|
}
|
2007-08-08 19:48:02 +04:00
|
|
|
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
|
|
|
|
if (unlikely(status))
|
|
|
|
goto done_not_linked;
|
[PATCH] USB: Fix USB suspend/resume crasher (#2)
This patch closes the IRQ race and makes various other OHCI & EHCI code
path safer vs. suspend/resume.
I've been able to (finally !) successfully suspend and resume various
Mac models, with or without USB mouse plugged, or plugging while asleep,
or unplugging while asleep etc... all without a crash.
Alan, please verify the UHCI bit I did, I only verified that it builds.
It's very simple so I wouldn't expect any issue there. If you aren't
confident, then just drop the hunks that change uhci-hcd.c
I also made the patch a little bit more "safer" by making sure the store
to the interrupt register that disables interrupts is not posted before
I set the flag and drop the spinlock.
Without this patch, you cannot reliably sleep/wakeup any recent Mac, and
I suspect PCs have some more sneaky issues too (they don't frankly crash
with machine checks because x86 tend to silently swallow PCI errors but
that won't last afaik, at least PCI Express will blow up in those
situations, but the USB code may still misbehave).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2005-11-25 01:59:46 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* get qh and force any scheduling errors */
|
2016-01-26 04:45:14 +03:00
|
|
|
INIT_LIST_HEAD(&empty);
|
2007-08-08 19:48:02 +04:00
|
|
|
qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (qh == NULL) {
|
|
|
|
status = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (qh->qh_state == QH_STATE_IDLE) {
|
2016-01-26 04:45:14 +03:00
|
|
|
status = qh_schedule(ehci, qh);
|
|
|
|
if (status)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* then queue the urb's tds to the qh */
|
2007-08-08 19:48:02 +04:00
|
|
|
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
|
2016-01-26 04:45:14 +03:00
|
|
|
BUG_ON(qh == NULL);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-03-22 21:30:43 +04:00
|
|
|
/* stuff into the periodic schedule */
|
|
|
|
if (qh->qh_state == QH_STATE_IDLE) {
|
|
|
|
qh_refresh(ehci, qh);
|
|
|
|
qh_link_periodic(ehci, qh);
|
2013-07-03 18:53:10 +04:00
|
|
|
} else {
|
|
|
|
/* cancel unlink wait for the qh */
|
|
|
|
cancel_unlink_wait_intr(ehci, qh);
|
2013-03-22 21:30:43 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* ... update usbfs periodic stats */
|
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
|
|
|
|
|
|
|
|
done:
|
2007-08-08 19:48:02 +04:00
|
|
|
if (unlikely(status))
|
|
|
|
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
|
|
|
|
done_not_linked:
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (status)
|
2016-01-26 04:45:14 +03:00
|
|
|
qtd_list_free(ehci, urb, qtd_list);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
static void scan_intr(struct ehci_hcd *ehci)
|
|
|
|
{
|
|
|
|
struct ehci_qh *qh;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
|
|
|
|
intr_node) {
|
2013-03-22 21:30:56 +04:00
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
/* clean any finished work for this qh */
|
|
|
|
if (!list_empty(&qh->qtd_list)) {
|
|
|
|
int temp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlinks could happen here; completion reporting
|
|
|
|
* drops the lock. That's why ehci->qh_scan_next
|
|
|
|
* always holds the next qh to scan; if the next qh
|
|
|
|
* gets unlinked then ehci->qh_scan_next is adjusted
|
|
|
|
* in qh_unlink_periodic().
|
|
|
|
*/
|
|
|
|
temp = qh_completions(ehci, qh);
|
2013-07-03 18:53:10 +04:00
|
|
|
if (unlikely(temp))
|
2012-07-11 19:23:00 +04:00
|
|
|
start_unlink_intr(ehci, qh);
|
2013-07-03 18:53:10 +04:00
|
|
|
else if (unlikely(list_empty(&qh->qtd_list) &&
|
|
|
|
qh->qh_state == QH_STATE_LINKED))
|
|
|
|
start_unlink_intr_wait(ehci, qh);
|
2012-07-11 19:23:00 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* ehci_iso_stream ops work with both ITD and SITD */
|
|
|
|
|
|
|
|
static struct ehci_iso_stream *
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_stream_alloc(gfp_t mem_flags)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ehci_iso_stream *stream;
|
|
|
|
|
2016-01-26 04:45:18 +03:00
|
|
|
stream = kzalloc(sizeof(*stream), mem_flags);
|
2016-01-26 04:45:14 +03:00
|
|
|
if (likely(stream != NULL)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
INIT_LIST_HEAD(&stream->td_list);
|
|
|
|
INIT_LIST_HEAD(&stream->free_list);
|
2013-10-11 19:28:52 +04:00
|
|
|
stream->next_uframe = NO_FRAME;
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.phase = NO_FRAME;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_stream_init(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct ehci_iso_stream *stream,
|
2013-10-11 19:29:03 +04:00
|
|
|
struct urb *urb
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
2016-01-26 04:45:14 +03:00
|
|
|
static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
struct usb_device *dev = urb->dev;
|
2005-04-17 02:20:36 +04:00
|
|
|
u32 buf1;
|
|
|
|
unsigned epnum, maxp;
|
|
|
|
int is_input;
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned tmp;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this might be a "high bandwidth" highspeed endpoint,
|
|
|
|
* as encoded in the ep descriptor's wMaxPacket field
|
|
|
|
*/
|
2013-10-11 19:29:03 +04:00
|
|
|
epnum = usb_pipeendpoint(urb->pipe);
|
|
|
|
is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
|
|
|
|
maxp = usb_endpoint_maxp(&urb->ep->desc);
|
2016-01-26 04:45:19 +03:00
|
|
|
buf1 = is_input ? 1 << 11 : 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* knows about ITD vs SITD */
|
|
|
|
if (dev->speed == USB_SPEED_HIGH) {
|
2016-09-28 13:38:18 +03:00
|
|
|
unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
stream->highspeed = 1;
|
|
|
|
|
|
|
|
buf1 |= maxp;
|
|
|
|
maxp *= multi;
|
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
|
|
|
|
stream->buf1 = cpu_to_hc32(ehci, buf1);
|
|
|
|
stream->buf2 = cpu_to_hc32(ehci, multi);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* usbfs wants to report the average usecs per frame tied up
|
|
|
|
* when transfers on this endpoint are scheduled ...
|
|
|
|
*/
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.usecs = HS_USECS_ISO(maxp);
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* period for bandwidth allocation */
|
|
|
|
tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
|
|
|
|
1 << (urb->ep->desc.bInterval - 1));
|
|
|
|
|
|
|
|
/* Allow urb->interval to override */
|
|
|
|
stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
|
|
|
|
|
|
|
|
stream->uperiod = urb->interval;
|
|
|
|
stream->ps.period = urb->interval >> 3;
|
|
|
|
stream->bandwidth = stream->ps.usecs * 8 /
|
|
|
|
stream->ps.bw_uperiod;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
} else {
|
|
|
|
u32 addr;
|
2005-08-14 05:44:58 +04:00
|
|
|
int think_time;
|
2006-01-21 00:49:10 +03:00
|
|
|
int hs_transfers;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
addr = dev->ttport << 24;
|
|
|
|
if (!ehci_is_TDI(ehci)
|
|
|
|
|| (dev->tt->hub !=
|
|
|
|
ehci_to_hcd(ehci)->self.root_hub))
|
|
|
|
addr |= dev->tt->hub->devnum << 16;
|
|
|
|
addr |= epnum << 8;
|
|
|
|
addr |= dev->devnum;
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.usecs = HS_USECS_ISO(maxp);
|
2017-05-02 19:09:11 +03:00
|
|
|
think_time = dev->tt->think_time;
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
|
2005-08-14 05:44:58 +04:00
|
|
|
dev->speed, is_input, 1, maxp));
|
2016-01-26 04:45:14 +03:00
|
|
|
hs_transfers = max(1u, (maxp + 187) / 188);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (is_input) {
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
addr |= 1 << 31;
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.c_usecs = stream->ps.usecs;
|
|
|
|
stream->ps.usecs = HS_USECS_ISO(1);
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.cs_mask = 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-01-21 00:49:10 +03:00
|
|
|
/* c-mask as specified in USB 2.0 11.18.4 3.c */
|
|
|
|
tmp = (1 << (hs_transfers + 2)) - 1;
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.cs_mask |= tmp << (8 + 2);
|
2005-04-17 02:20:36 +04:00
|
|
|
} else
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.cs_mask = smask_out[hs_transfers - 1];
|
2013-10-11 19:29:03 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* period for bandwidth allocation */
|
|
|
|
tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
|
|
|
|
1 << (urb->ep->desc.bInterval - 1));
|
|
|
|
|
|
|
|
/* Allow urb->interval to override */
|
|
|
|
stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
|
|
|
|
stream->ps.bw_uperiod = stream->ps.bw_period << 3;
|
|
|
|
|
|
|
|
stream->ps.period = urb->interval;
|
|
|
|
stream->uperiod = urb->interval << 3;
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.bw_period;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* stream->splits gets created from cs_mask later */
|
2007-05-01 20:29:37 +04:00
|
|
|
stream->address = cpu_to_hc32(ehci, addr);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.udev = dev;
|
|
|
|
stream->ps.ep = urb->ep;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
stream->bEndpointAddress = is_input | epnum;
|
|
|
|
stream->maxp = maxp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ehci_iso_stream *
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
unsigned epnum;
|
|
|
|
struct ehci_iso_stream *stream;
|
|
|
|
struct usb_host_endpoint *ep;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
epnum = usb_pipeendpoint (urb->pipe);
|
|
|
|
if (usb_pipein(urb->pipe))
|
|
|
|
ep = urb->dev->ep_in[epnum];
|
|
|
|
else
|
|
|
|
ep = urb->dev->ep_out[epnum];
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
stream = ep->hcpriv;
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
if (unlikely(stream == NULL)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
stream = iso_stream_alloc(GFP_ATOMIC);
|
2016-01-26 04:45:14 +03:00
|
|
|
if (likely(stream != NULL)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
ep->hcpriv = stream;
|
2013-10-11 19:29:03 +04:00
|
|
|
iso_stream_init(ehci, stream, urb);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2010-03-01 19:18:56 +03:00
|
|
|
/* if dev->ep [epnum] is a QH, hw is set */
|
2016-01-26 04:45:14 +03:00
|
|
|
} else if (unlikely(stream->hw != NULL)) {
|
|
|
|
ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
|
2005-04-17 02:20:36 +04:00
|
|
|
urb->dev->devpath, epnum,
|
|
|
|
usb_pipein(urb->pipe) ? "in" : "out");
|
|
|
|
stream = NULL;
|
|
|
|
}
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* ehci_iso_sched ops can be ITD-only or SITD-only */
|
|
|
|
|
|
|
|
static struct ehci_iso_sched *
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_sched_alloc(unsigned packets, gfp_t mem_flags)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ehci_iso_sched *iso_sched;
|
2016-01-26 04:45:18 +03:00
|
|
|
int size = sizeof(*iso_sched);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
size += packets * sizeof(struct ehci_iso_packet);
|
2006-02-27 23:29:43 +03:00
|
|
|
iso_sched = kzalloc(size, mem_flags);
|
2016-01-26 04:45:14 +03:00
|
|
|
if (likely(iso_sched != NULL))
|
|
|
|
INIT_LIST_HEAD(&iso_sched->td_list);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
return iso_sched;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2007-05-01 20:29:37 +04:00
|
|
|
itd_sched_init(
|
|
|
|
struct ehci_hcd *ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_sched *iso_sched,
|
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
struct urb *urb
|
|
|
|
)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
dma_addr_t dma = urb->transfer_dma;
|
|
|
|
|
|
|
|
/* how many uframes are needed for these transfers */
|
2013-10-11 19:29:03 +04:00
|
|
|
iso_sched->span = urb->number_of_packets * stream->uperiod;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* figure out per-uframe itd fields that we'll need later
|
|
|
|
* when we fit new itds into the schedule.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < urb->number_of_packets; i++) {
|
2016-01-26 04:45:14 +03:00
|
|
|
struct ehci_iso_packet *uframe = &iso_sched->packet[i];
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned length;
|
|
|
|
dma_addr_t buf;
|
|
|
|
u32 trans;
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
length = urb->iso_frame_desc[i].length;
|
|
|
|
buf = dma + urb->iso_frame_desc[i].offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
trans = EHCI_ISOC_ACTIVE;
|
|
|
|
trans |= buf & 0x0fff;
|
2016-01-26 04:45:14 +03:00
|
|
|
if (unlikely(((i + 1) == urb->number_of_packets))
|
2005-04-17 02:20:36 +04:00
|
|
|
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
|
|
|
|
trans |= EHCI_ITD_IOC;
|
|
|
|
trans |= length << 16;
|
2007-05-01 20:29:37 +04:00
|
|
|
uframe->transaction = cpu_to_hc32(ehci, trans);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-05-28 21:46:18 +04:00
|
|
|
/* might need to cross a buffer page within a uframe */
|
2005-04-17 02:20:36 +04:00
|
|
|
uframe->bufp = (buf & ~(u64)0x0fff);
|
|
|
|
buf += length;
|
2016-01-26 04:45:14 +03:00
|
|
|
if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
|
2005-04-17 02:20:36 +04:00
|
|
|
uframe->cross = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_sched_free(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
struct ehci_iso_sched *iso_sched
|
|
|
|
)
|
|
|
|
{
|
|
|
|
if (!iso_sched)
|
|
|
|
return;
|
2016-01-26 04:45:16 +03:00
|
|
|
/* caller must hold ehci->lock! */
|
2016-01-26 04:45:14 +03:00
|
|
|
list_splice(&iso_sched->td_list, &stream->free_list);
|
|
|
|
kfree(iso_sched);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-01-26 04:45:14 +03:00
|
|
|
itd_urb_transaction(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct urb *urb,
|
2005-10-21 11:21:58 +04:00
|
|
|
gfp_t mem_flags
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
|
|
|
struct ehci_itd *itd;
|
|
|
|
dma_addr_t itd_dma;
|
|
|
|
int i;
|
|
|
|
unsigned num_itds;
|
|
|
|
struct ehci_iso_sched *sched;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
|
|
|
|
if (unlikely(sched == NULL))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
itd_sched_init(ehci, sched, stream, urb);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (urb->interval < 8)
|
|
|
|
num_itds = 1 + (sched->span + 7) / 8;
|
|
|
|
else
|
|
|
|
num_itds = urb->number_of_packets;
|
|
|
|
|
|
|
|
/* allocate/init ITDs */
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
for (i = 0; i < num_itds; i++) {
|
|
|
|
|
2012-07-11 19:22:35 +04:00
|
|
|
/*
|
|
|
|
* Use iTDs from the free list, but not iTDs that may
|
|
|
|
* still be in use by the hardware.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2012-07-11 19:22:35 +04:00
|
|
|
if (likely(!list_empty(&stream->free_list))) {
|
|
|
|
itd = list_first_entry(&stream->free_list,
|
2007-05-01 20:29:37 +04:00
|
|
|
struct ehci_itd, itd_list);
|
2012-07-11 19:23:07 +04:00
|
|
|
if (itd->frame == ehci->now_frame)
|
2012-07-11 19:22:35 +04:00
|
|
|
goto alloc_itd;
|
2016-01-26 04:45:14 +03:00
|
|
|
list_del(&itd->itd_list);
|
2005-04-17 02:20:36 +04:00
|
|
|
itd_dma = itd->itd_dma;
|
2008-02-19 23:31:49 +03:00
|
|
|
} else {
|
2012-07-11 19:22:35 +04:00
|
|
|
alloc_itd:
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2018-05-05 00:35:12 +03:00
|
|
|
itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
|
2005-04-17 02:20:36 +04:00
|
|
|
&itd_dma);
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2008-02-19 23:31:49 +03:00
|
|
|
if (!itd) {
|
|
|
|
iso_sched_free(stream, sched);
|
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2018-05-05 00:35:12 +03:00
|
|
|
memset(itd, 0, sizeof(*itd));
|
2005-04-17 02:20:36 +04:00
|
|
|
itd->itd_dma = itd_dma;
|
2013-10-11 19:28:52 +04:00
|
|
|
itd->frame = NO_FRAME;
|
2016-01-26 04:45:14 +03:00
|
|
|
list_add(&itd->itd_list, &sched->td_list);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* temporarily store schedule info in hcpriv */
|
|
|
|
urb->hcpriv = sched;
|
|
|
|
urb->error_count = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
|
|
|
|
struct ehci_iso_stream *stream, int sign)
|
|
|
|
{
|
|
|
|
unsigned uframe;
|
|
|
|
unsigned i, j;
|
|
|
|
unsigned s_mask, c_mask, m;
|
|
|
|
int usecs = stream->ps.usecs;
|
|
|
|
int c_usecs = stream->ps.c_usecs;
|
2013-10-12 06:16:21 +04:00
|
|
|
int tt_usecs = stream->ps.tt_usecs;
|
|
|
|
struct ehci_tt *tt;
|
2013-10-11 19:29:13 +04:00
|
|
|
|
|
|
|
if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
|
|
|
|
return;
|
|
|
|
uframe = stream->ps.bw_phase << 3;
|
|
|
|
|
|
|
|
bandwidth_dbg(ehci, sign, "iso", &stream->ps);
|
|
|
|
|
|
|
|
if (sign < 0) { /* Release bandwidth */
|
|
|
|
usecs = -usecs;
|
|
|
|
c_usecs = -c_usecs;
|
2013-10-12 06:16:21 +04:00
|
|
|
tt_usecs = -tt_usecs;
|
2013-10-11 19:29:13 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!stream->splits) { /* High speed */
|
|
|
|
for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
|
|
|
|
i += stream->ps.bw_uperiod)
|
|
|
|
ehci->bandwidth[i] += usecs;
|
|
|
|
|
|
|
|
} else { /* Full speed */
|
|
|
|
s_mask = stream->ps.cs_mask;
|
|
|
|
c_mask = s_mask >> 8;
|
|
|
|
|
|
|
|
/* NOTE: adjustment needed for frame overflow */
|
|
|
|
for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
|
|
|
|
i += stream->ps.bw_uperiod) {
|
|
|
|
for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
|
|
|
|
(++j, m <<= 1)) {
|
|
|
|
if (s_mask & m)
|
|
|
|
ehci->bandwidth[i+j] += usecs;
|
|
|
|
else if (c_mask & m)
|
|
|
|
ehci->bandwidth[i+j] += c_usecs;
|
|
|
|
}
|
|
|
|
}
|
2013-10-12 06:16:21 +04:00
|
|
|
|
2020-10-11 23:50:08 +03:00
|
|
|
/*
|
|
|
|
* find_tt() will not return any error here as we have
|
|
|
|
* already called find_tt() before calling this function
|
|
|
|
* and checked for any error return. The previous call
|
|
|
|
* would have created the data structure.
|
|
|
|
*/
|
2013-10-12 06:16:21 +04:00
|
|
|
tt = find_tt(stream->ps.udev);
|
|
|
|
if (sign > 0)
|
|
|
|
list_add_tail(&stream->ps.ps_list, &tt->ps_list);
|
|
|
|
else
|
|
|
|
list_del(&stream->ps.ps_list);
|
|
|
|
|
|
|
|
for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
|
|
|
|
i += stream->ps.bw_period)
|
|
|
|
tt->bandwidth[i] += tt_usecs;
|
2013-10-11 19:29:13 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static inline int
|
2016-01-26 04:45:14 +03:00
|
|
|
itd_slot_ok(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
2013-10-11 19:29:13 +04:00
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
unsigned uframe
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned usecs;
|
|
|
|
|
|
|
|
/* convert "usecs we need" to "max already claimed" */
|
|
|
|
usecs = ehci->uframe_periodic_max - stream->ps.usecs;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
|
|
|
|
uframe += stream->ps.bw_uperiod) {
|
|
|
|
if (ehci->bandwidth[uframe] > usecs)
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2016-01-26 04:45:14 +03:00
|
|
|
sitd_slot_ok(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct ehci_iso_stream *stream,
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned uframe,
|
2013-10-12 06:16:21 +04:00
|
|
|
struct ehci_iso_sched *sched,
|
|
|
|
struct ehci_tt *tt
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned mask, tmp;
|
|
|
|
unsigned frame, uf;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
mask = stream->ps.cs_mask << (uframe & 7);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:28:31 +04:00
|
|
|
/* for OUT, don't wrap SSPLIT into H-microframe 7 */
|
2013-10-11 19:29:13 +04:00
|
|
|
if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
|
2013-10-11 19:28:31 +04:00
|
|
|
return 0;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* for IN, don't wrap CSPLIT into the next frame */
|
|
|
|
if (mask & ~0xffff)
|
|
|
|
return 0;
|
|
|
|
|
2012-05-14 21:47:20 +04:00
|
|
|
/* check bandwidth */
|
2013-10-11 19:29:13 +04:00
|
|
|
uframe &= stream->ps.bw_uperiod - 1;
|
2012-05-14 21:47:20 +04:00
|
|
|
frame = uframe >> 3;
|
|
|
|
|
|
|
|
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
|
|
|
|
/* The tt's fullspeed bus bandwidth must be available.
|
|
|
|
* tt_available scheduling guarantees 10+% for control/bulk.
|
|
|
|
*/
|
|
|
|
uf = uframe & 7;
|
2013-10-12 06:16:21 +04:00
|
|
|
if (!tt_available(ehci, &stream->ps, tt, frame, uf))
|
2012-05-14 21:47:20 +04:00
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
/* tt must be idle for start(s), any gap, and csplit.
|
|
|
|
* assume scheduling slop leaves 10+% for control/bulk.
|
|
|
|
*/
|
2013-10-11 19:29:13 +04:00
|
|
|
if (!tt_no_collision(ehci, stream->ps.bw_period,
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.udev, frame, mask))
|
2012-05-14 21:47:20 +04:00
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
do {
|
2013-10-11 19:29:13 +04:00
|
|
|
unsigned max_used;
|
|
|
|
unsigned i;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* check starts (OUT uses more than one) */
|
2013-10-11 19:29:13 +04:00
|
|
|
uf = uframe;
|
2013-10-11 19:29:03 +04:00
|
|
|
max_used = ehci->uframe_periodic_max - stream->ps.usecs;
|
2013-10-11 19:29:13 +04:00
|
|
|
for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
|
|
|
|
if (ehci->bandwidth[uf] > max_used)
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for IN, check CSPLIT */
|
2013-10-11 19:29:03 +04:00
|
|
|
if (stream->ps.c_usecs) {
|
|
|
|
max_used = ehci->uframe_periodic_max -
|
|
|
|
stream->ps.c_usecs;
|
2013-10-11 19:29:13 +04:00
|
|
|
uf = uframe & ~7;
|
|
|
|
tmp = 1 << (2+8);
|
|
|
|
for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
|
|
|
|
if ((stream->ps.cs_mask & tmp) == 0)
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
2013-10-11 19:29:13 +04:00
|
|
|
if (ehci->bandwidth[uf+i] > max_used)
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
2013-10-11 19:29:13 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
uframe += stream->ps.bw_uperiod;
|
|
|
|
} while (uframe < EHCI_BANDWIDTH_SIZE);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.cs_mask <<= uframe & 7;
|
|
|
|
stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
|
2005-04-17 02:20:36 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This scheduler plans almost as far into the future as it has actual
|
|
|
|
* periodic schedule slots. (Affected by TUNE_FLS, which defaults to
|
|
|
|
* "as small as possible" to be cache-friendlier.) That limits the size
|
|
|
|
* transfers you can stream reliably; avoid more than 64 msec per urb.
|
|
|
|
* Also avoid queue depths of less than ehci's worst irq latency (affected
|
|
|
|
* by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
|
|
|
|
* and other factors); or more than about 230 msec total (for portability,
|
|
|
|
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_stream_schedule(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct urb *urb,
|
|
|
|
struct ehci_iso_stream *stream
|
|
|
|
)
|
|
|
|
{
|
2013-09-03 21:59:03 +04:00
|
|
|
u32 now, base, next, start, period, span, now2;
|
|
|
|
u32 wrap = 0, skip = 0;
|
|
|
|
int status = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned mod = ehci->periodic_size << 3;
|
|
|
|
struct ehci_iso_sched *sched = urb->hcpriv;
|
2013-09-03 21:59:03 +04:00
|
|
|
bool empty = list_empty(&stream->td_list);
|
2013-10-11 19:29:36 +04:00
|
|
|
bool new_stream = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
period = stream->uperiod;
|
2010-07-14 19:03:46 +04:00
|
|
|
span = sched->span;
|
2013-10-11 19:29:13 +04:00
|
|
|
if (!stream->highspeed)
|
2010-07-14 19:03:46 +04:00
|
|
|
span <<= 3;
|
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* Start a new isochronous stream? */
|
2013-09-03 21:59:03 +04:00
|
|
|
if (unlikely(empty && !hcd_periodic_completion_in_progress(
|
|
|
|
ehci_to_hcd(ehci), urb->ep))) {
|
2013-09-03 21:58:59 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* Schedule the endpoint */
|
|
|
|
if (stream->ps.phase == NO_FRAME) {
|
|
|
|
int done = 0;
|
2013-10-12 06:16:21 +04:00
|
|
|
struct ehci_tt *tt = find_tt(stream->ps.udev);
|
|
|
|
|
|
|
|
if (IS_ERR(tt)) {
|
|
|
|
status = PTR_ERR(tt);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
compute_tt_budget(ehci->tt_budget, tt);
|
2013-09-03 21:58:59 +04:00
|
|
|
|
2013-10-11 19:29:36 +04:00
|
|
|
start = ((-(++ehci->random_frame)) << 3) & (period - 1);
|
2013-10-11 19:29:13 +04:00
|
|
|
|
|
|
|
/* find a uframe slot with enough bandwidth.
|
|
|
|
* Early uframes are more precious because full-speed
|
|
|
|
* iso IN transfers can't use late uframes,
|
|
|
|
* and therefore they should be allocated last.
|
|
|
|
*/
|
|
|
|
next = start;
|
|
|
|
start += period;
|
|
|
|
do {
|
|
|
|
start--;
|
|
|
|
/* check schedule: enough space? */
|
|
|
|
if (stream->highspeed) {
|
|
|
|
if (itd_slot_ok(ehci, stream, start))
|
|
|
|
done = 1;
|
|
|
|
} else {
|
|
|
|
if ((start % 8) >= 6)
|
|
|
|
continue;
|
|
|
|
if (sitd_slot_ok(ehci, stream, start,
|
2013-10-12 06:16:21 +04:00
|
|
|
sched, tt))
|
2013-10-11 19:29:13 +04:00
|
|
|
done = 1;
|
|
|
|
}
|
|
|
|
} while (start > next && !done);
|
|
|
|
|
|
|
|
/* no room in the schedule */
|
|
|
|
if (!done) {
|
|
|
|
ehci_dbg(ehci, "iso sched full %p", urb);
|
|
|
|
status = -ENOSPC;
|
|
|
|
goto fail;
|
2013-09-03 21:58:59 +04:00
|
|
|
}
|
2013-10-11 19:29:13 +04:00
|
|
|
stream->ps.phase = (start >> 3) &
|
|
|
|
(stream->ps.period - 1);
|
|
|
|
stream->ps.bw_phase = stream->ps.phase &
|
|
|
|
(stream->ps.bw_period - 1);
|
|
|
|
stream->ps.phase_uf = start & 7;
|
|
|
|
reserve_release_iso_bandwidth(ehci, stream, 1);
|
|
|
|
}
|
2013-09-03 21:58:59 +04:00
|
|
|
|
2013-10-11 19:29:13 +04:00
|
|
|
/* New stream is already scheduled; use the upcoming slot */
|
|
|
|
else {
|
|
|
|
start = (stream->ps.phase << 3) + stream->ps.phase_uf;
|
2013-09-03 21:58:59 +04:00
|
|
|
}
|
2013-09-03 21:59:03 +04:00
|
|
|
|
2013-10-11 19:29:36 +04:00
|
|
|
stream->next_uframe = start;
|
|
|
|
new_stream = true;
|
2013-09-03 21:58:59 +04:00
|
|
|
}
|
|
|
|
|
2013-10-11 19:29:36 +04:00
|
|
|
now = ehci_read_frame_index(ehci) & (mod - 1);
|
|
|
|
|
|
|
|
/* Take the isochronous scheduling threshold into account */
|
|
|
|
if (ehci->i_thresh)
|
|
|
|
next = now + ehci->i_thresh; /* uframe cache */
|
|
|
|
else
|
|
|
|
next = (now + 2 + 7) & ~0x07; /* full frame cache */
|
|
|
|
|
2014-12-04 18:21:56 +03:00
|
|
|
/* If needed, initialize last_iso_frame so that this URB will be seen */
|
|
|
|
if (ehci->isoc_count == 0)
|
|
|
|
ehci->last_iso_frame = now >> 3;
|
|
|
|
|
2013-10-11 19:29:36 +04:00
|
|
|
/*
|
|
|
|
* Use ehci->last_iso_frame as the base. There can't be any
|
|
|
|
* TDs scheduled for earlier than that.
|
|
|
|
*/
|
|
|
|
base = ehci->last_iso_frame << 3;
|
|
|
|
next = (next - base) & (mod - 1);
|
|
|
|
start = (stream->next_uframe - base) & (mod - 1);
|
|
|
|
|
|
|
|
if (unlikely(new_stream))
|
|
|
|
goto do_ASAP;
|
|
|
|
|
2013-09-03 21:58:59 +04:00
|
|
|
/*
|
2013-10-11 19:29:13 +04:00
|
|
|
* Typical case: reuse current schedule, stream may still be active.
|
2008-05-21 00:59:10 +04:00
|
|
|
* Hopefully there are no gaps from the host falling behind
|
2012-10-01 18:32:01 +04:00
|
|
|
* (irq delays etc). If there are, the behavior depends on
|
|
|
|
* whether URB_ISO_ASAP is set.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2013-09-03 21:59:03 +04:00
|
|
|
now2 = (now - base) & (mod - 1);
|
USB: ehci: Respect IST when scheduling new split iTDs.
The EHCI specification says that an EHCI host controller may cache part of
the isochronous schedule. The EHCI controller must advertise how much it
caches in the schedule through the HCCPARAMS register isochronous
scheduling threshold (IST) bits.
In theory, adding new iTDs within the IST should be harmless. The HW will
follow the old cached linked list and miss the new iTD. SW will notice HW
missed the iTD and return 0 for the transfer length.
However, Intel ICH9 chipsets (and some later chipsets) have issues when SW
attempts to schedule a split transaction within the IST. All transfers
will cease being sent out that port, and the drivers will see isochronous
packets complete with a length of zero. Start of frames may or may not
also disappear, causing the device to go into auto-suspend. This "bus
stall" will continue until a control or bulk transfer is queued to a
device under that roothub.
Most drivers will never cause this behavior, because they use multiple
URBs with multiple packets to keep the bus busy. If you limit the number
of URBs to one, you may be able to hit this bug.
Make sure the EHCI driver does not schedule full-speed transfers within
the IST under an Intel chipset. Make sure that when we fall behind the
current microframe plus IST, we schedule the new transfer at the next
periodic interval after the IST.
Don't change the scheduling for new transfers, since the schedule slop will
always be greater than the IST. Allow high speed isochronous transfers to
be scheduled within the IST, since this doesn't trigger the Intel chipset
bug.
Make sure that if the host caches the full frame, the EHCI driver's
internal isochronous threshold (ehci->i_thresh) is set to
8 microframes + 2 microframes wiggle room. This is similar to what is done in
the case where the host caches less than the full frame.
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: David Brownell <dbrownell@users.sourceforge.net>
Cc: Clemens Ladisch <clemens@ladisch.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2009-10-27 20:55:05 +03:00
|
|
|
|
2014-12-04 18:22:57 +03:00
|
|
|
/* Is the schedule about to wrap around? */
|
2013-09-03 21:59:03 +04:00
|
|
|
if (unlikely(!empty && start < period)) {
|
2014-12-04 18:22:57 +03:00
|
|
|
ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
|
2013-09-03 21:59:03 +04:00
|
|
|
urb, stream->next_uframe, base, period, mod);
|
2014-12-04 18:22:57 +03:00
|
|
|
status = -EFBIG;
|
2013-09-03 21:59:03 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2008-05-21 00:59:10 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
/* Is the next packet scheduled after the base time? */
|
|
|
|
if (likely(!empty || start <= now2 + period)) {
|
2012-09-29 00:01:23 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
/* URB_ISO_ASAP: make sure that start >= next */
|
|
|
|
if (unlikely(start < next &&
|
|
|
|
(urb->transfer_flags & URB_ISO_ASAP)))
|
|
|
|
goto do_ASAP;
|
2012-10-01 18:32:01 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
/* Otherwise use start, if it's not in the past */
|
|
|
|
if (likely(start >= now2))
|
|
|
|
goto use_start;
|
2012-10-01 18:32:01 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
/* Otherwise we got an underrun while the queue was empty */
|
|
|
|
} else {
|
|
|
|
if (urb->transfer_flags & URB_ISO_ASAP)
|
|
|
|
goto do_ASAP;
|
|
|
|
wrap = mod;
|
|
|
|
now2 += mod;
|
|
|
|
}
|
2012-09-29 00:01:23 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
/* How many uframes and packets do we need to skip? */
|
|
|
|
skip = (now2 - start + period - 1) & -period;
|
|
|
|
if (skip >= span) { /* Entirely in the past? */
|
|
|
|
ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
|
|
|
|
urb, start + base, span - period, now2 + base,
|
|
|
|
base);
|
|
|
|
|
|
|
|
/* Try to keep the last TD intact for scanning later */
|
|
|
|
skip = span - period;
|
|
|
|
|
|
|
|
/* Will it come before the current scan position? */
|
|
|
|
if (empty) {
|
|
|
|
skip = span; /* Skip the entire URB */
|
|
|
|
status = 1; /* and give it back immediately */
|
|
|
|
iso_sched_free(stream, sched);
|
|
|
|
sched = NULL;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2013-09-03 21:59:03 +04:00
|
|
|
urb->error_count = skip / period;
|
|
|
|
if (sched)
|
|
|
|
sched->first_packet = urb->error_count;
|
|
|
|
goto use_start;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
do_ASAP:
|
|
|
|
/* Use the first slot after "next" */
|
|
|
|
start = next + ((start - next) & (period - 1));
|
|
|
|
|
|
|
|
use_start:
|
USB: EHCI: reorganize isochronous scheduler routine
This patch (as1408) rearranges the scheduling code in ehci-hcd, partly
to improve its structure, but mainly to change the way it works.
Whether or not a transfer exceeds the hardware schedule length will
now be determined by looking at the last frame the transfer would use,
instead of the first available frame following the end of the transfer.
The benefit of this change is that it allows the driver to accept
valid URBs which would otherwise be rejected. For example, suppose
the schedule length is 1024 frames, the endpoint period is 256 frames,
and a four-packet URB is submitted. The four transfers would occupy
slots that are 0, 256, 512, and 768 frames past the current frame
(plus an extra slop factor). These don't exceed the 1024-frame limit,
so the URB should be accepted. But the current code notices that the
next available slot would be 1024 frames (plus slop) in the future,
which is beyond the limit, and so the URB is rejected unnecessarily.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: David Brownell <david-b@pacbell.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2010-07-14 19:03:53 +04:00
|
|
|
/* Tried to schedule too far into the future? */
|
2013-09-03 21:59:03 +04:00
|
|
|
if (unlikely(start + span - period >= mod + wrap)) {
|
2012-09-29 00:01:23 +04:00
|
|
|
ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
|
2013-09-03 21:59:03 +04:00
|
|
|
urb, start, span - period, mod + wrap);
|
USB: EHCI: reorganize isochronous scheduler routine
This patch (as1408) rearranges the scheduling code in ehci-hcd, partly
to improve its structure, but mainly to change the way it works.
Whether or not a transfer exceeds the hardware schedule length will
now be determined by looking at the last frame the transfer would use,
instead of the first available frame following the end of the transfer.
The benefit of this change is that it allows the driver to accept
valid URBs which would otherwise be rejected. For example, suppose
the schedule length is 1024 frames, the endpoint period is 256 frames,
and a four-packet URB is submitted. The four transfers would occupy
slots that are 0, 256, 512, and 768 frames past the current frame
(plus an extra slop factor). These don't exceed the 1024-frame limit,
so the URB should be accepted. But the current code notices that the
next available slot would be 1024 frames (plus slop) in the future,
which is beyond the limit, and so the URB is rejected unnecessarily.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: David Brownell <david-b@pacbell.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2010-07-14 19:03:53 +04:00
|
|
|
status = -EFBIG;
|
|
|
|
goto fail;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-09-03 21:59:03 +04:00
|
|
|
start += base;
|
|
|
|
stream->next_uframe = (start + skip) & (mod - 1);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* report high speed start in uframes; full speed, in frames */
|
2013-09-03 21:59:03 +04:00
|
|
|
urb->start_frame = start & (mod - 1);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!stream->highspeed)
|
|
|
|
urb->start_frame >>= 3;
|
2013-09-03 21:59:03 +04:00
|
|
|
return status;
|
USB: EHCI: reorganize isochronous scheduler routine
This patch (as1408) rearranges the scheduling code in ehci-hcd, partly
to improve its structure, but mainly to change the way it works.
Whether or not a transfer exceeds the hardware schedule length will
now be determined by looking at the last frame the transfer would use,
instead of the first available frame following the end of the transfer.
The benefit of this change is that it allows the driver to accept
valid URBs which would otherwise be rejected. For example, suppose
the schedule length is 1024 frames, the endpoint period is 256 frames,
and a four-packet URB is submitted. The four transfers would occupy
slots that are 0, 256, 512, and 768 frames past the current frame
(plus an extra slop factor). These don't exceed the 1024-frame limit,
so the URB should be accepted. But the current code notices that the
next available slot would be 1024 frames (plus slop) in the future,
which is beyond the limit, and so the URB is rejected unnecessarily.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
CC: David Brownell <david-b@pacbell.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2010-07-14 19:03:53 +04:00
|
|
|
|
|
|
|
fail:
|
|
|
|
iso_sched_free(stream, sched);
|
|
|
|
urb->hcpriv = NULL;
|
|
|
|
return status;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
static inline void
|
2007-05-01 20:29:37 +04:00
|
|
|
itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
|
|
|
|
struct ehci_itd *itd)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2005-05-28 21:46:18 +04:00
|
|
|
/* it's been recently zeroed */
|
2007-05-01 20:29:37 +04:00
|
|
|
itd->hw_next = EHCI_LIST_END(ehci);
|
2016-01-26 04:45:14 +03:00
|
|
|
itd->hw_bufp[0] = stream->buf0;
|
|
|
|
itd->hw_bufp[1] = stream->buf1;
|
|
|
|
itd->hw_bufp[2] = stream->buf2;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
itd->index[i] = -1;
|
|
|
|
|
|
|
|
/* All other fields are filled when scheduling */
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2007-05-01 20:29:37 +04:00
|
|
|
itd_patch(
|
|
|
|
struct ehci_hcd *ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_itd *itd,
|
|
|
|
struct ehci_iso_sched *iso_sched,
|
|
|
|
unsigned index,
|
2005-05-28 21:46:18 +04:00
|
|
|
u16 uframe
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
2016-01-26 04:45:14 +03:00
|
|
|
struct ehci_iso_packet *uf = &iso_sched->packet[index];
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned pg = itd->pg;
|
|
|
|
|
2016-01-26 04:45:16 +03:00
|
|
|
/* BUG_ON(pg == 6 && uf->cross); */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
uframe &= 0x07;
|
2016-01-26 04:45:14 +03:00
|
|
|
itd->index[uframe] = index;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
itd->hw_transaction[uframe] = uf->transaction;
|
|
|
|
itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
|
|
|
|
itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
|
|
|
|
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* iso_frame_desc[].offset must be strictly increasing */
|
2016-01-26 04:45:14 +03:00
|
|
|
if (unlikely(uf->cross)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
u64 bufp = uf->bufp + 4096;
|
2007-05-01 20:29:37 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
itd->pg = ++pg;
|
2007-05-01 20:29:37 +04:00
|
|
|
itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
|
|
|
|
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2016-01-26 04:45:14 +03:00
|
|
|
itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2010-03-01 11:12:50 +03:00
|
|
|
union ehci_shadow *prev = &ehci->pshadow[frame];
|
|
|
|
__hc32 *hw_p = &ehci->periodic[frame];
|
|
|
|
union ehci_shadow here = *prev;
|
|
|
|
__hc32 type = 0;
|
|
|
|
|
|
|
|
/* skip any iso nodes which might belong to previous microframes */
|
|
|
|
while (here.ptr) {
|
|
|
|
type = Q_NEXT_TYPE(ehci, *hw_p);
|
|
|
|
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
|
|
|
|
break;
|
|
|
|
prev = periodic_next_shadow(ehci, prev, type);
|
|
|
|
hw_p = shadow_next_periodic(ehci, &here, type);
|
|
|
|
here = *prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
itd->itd_next = here;
|
|
|
|
itd->hw_next = *hw_p;
|
|
|
|
prev->itd = itd;
|
2005-04-17 02:20:36 +04:00
|
|
|
itd->frame = frame;
|
2016-01-26 04:45:14 +03:00
|
|
|
wmb();
|
2010-03-01 11:12:50 +03:00
|
|
|
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* fit urb's itds into the selected schedule slot; activate as needed */
|
2012-07-11 19:22:10 +04:00
|
|
|
static void itd_link_urb(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct urb *urb,
|
|
|
|
unsigned mod,
|
|
|
|
struct ehci_iso_stream *stream
|
|
|
|
)
|
|
|
|
{
|
2005-05-28 21:46:18 +04:00
|
|
|
int packet;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned next_uframe, uframe, frame;
|
|
|
|
struct ehci_iso_sched *iso_sched = urb->hcpriv;
|
|
|
|
struct ehci_itd *itd;
|
|
|
|
|
2010-07-14 19:03:36 +04:00
|
|
|
next_uframe = stream->next_uframe & (mod - 1);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
if (unlikely(list_empty(&stream->td_list)))
|
2005-04-17 02:20:36 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_allocated
|
|
|
|
+= stream->bandwidth;
|
2010-12-07 05:10:08 +03:00
|
|
|
|
|
|
|
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
|
2011-03-01 09:57:05 +03:00
|
|
|
if (ehci->amd_pll_fix == 1)
|
|
|
|
usb_amd_quirk_pll_disable();
|
2010-12-07 05:10:08 +03:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
|
|
|
|
|
|
|
|
/* fill iTDs uframe by uframe */
|
2013-09-03 21:59:03 +04:00
|
|
|
for (packet = iso_sched->first_packet, itd = NULL;
|
|
|
|
packet < urb->number_of_packets;) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (itd == NULL) {
|
|
|
|
/* ASSERT: we have all necessary itds */
|
2016-01-26 04:45:16 +03:00
|
|
|
/* BUG_ON(list_empty(&iso_sched->td_list)); */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* ASSERT: no itds for this endpoint in this uframe */
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
itd = list_entry(iso_sched->td_list.next,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_itd, itd_list);
|
2016-01-26 04:45:14 +03:00
|
|
|
list_move_tail(&itd->itd_list, &stream->td_list);
|
2012-07-11 19:22:39 +04:00
|
|
|
itd->stream = stream;
|
2009-02-26 03:47:48 +03:00
|
|
|
itd->urb = urb;
|
2016-01-26 04:45:14 +03:00
|
|
|
itd_init(ehci, stream, itd);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
uframe = next_uframe & 0x07;
|
|
|
|
frame = next_uframe >> 3;
|
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
itd_patch(ehci, itd, iso_sched, packet, uframe);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
next_uframe += stream->uperiod;
|
2010-07-14 19:03:36 +04:00
|
|
|
next_uframe &= mod - 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
packet++;
|
|
|
|
|
|
|
|
/* link completed itds into the schedule */
|
|
|
|
if (((next_uframe >> 3) != frame)
|
|
|
|
|| packet == urb->number_of_packets) {
|
2010-07-14 19:03:36 +04:00
|
|
|
itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
|
2005-04-17 02:20:36 +04:00
|
|
|
itd = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
stream->next_uframe = next_uframe;
|
|
|
|
|
|
|
|
/* don't need that schedule data any more */
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_sched_free(stream, iso_sched);
|
2012-11-08 19:17:01 +04:00
|
|
|
urb->hcpriv = stream;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
++ehci->isoc_count;
|
2012-07-11 19:22:10 +04:00
|
|
|
enable_periodic(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
|
|
|
|
|
2007-12-17 09:37:40 +03:00
|
|
|
/* Process and recycle a completed ITD. Return true iff its urb completed,
|
|
|
|
* and hence its completion callback probably added things to the hardware
|
|
|
|
* schedule.
|
|
|
|
*
|
|
|
|
* Note that we carefully avoid recycling this descriptor until after any
|
|
|
|
* completion callback runs, so that it won't be reused quickly. That is,
|
|
|
|
* assuming (a) no more than two urbs per frame on this endpoint, and also
|
|
|
|
* (b) only this endpoint's completions submit URBs. It seems some silicon
|
|
|
|
* corrupts things if you reuse completed descriptors very quickly...
|
|
|
|
*/
|
2012-07-11 19:23:07 +04:00
|
|
|
static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
|
|
|
|
{
|
2005-04-17 02:20:36 +04:00
|
|
|
struct urb *urb = itd->urb;
|
|
|
|
struct usb_iso_packet_descriptor *desc;
|
|
|
|
u32 t;
|
|
|
|
unsigned uframe;
|
|
|
|
int urb_index = -1;
|
|
|
|
struct ehci_iso_stream *stream = itd->stream;
|
2012-07-11 19:23:07 +04:00
|
|
|
bool retval = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* for each uframe with a packet */
|
|
|
|
for (uframe = 0; uframe < 8; uframe++) {
|
2016-01-26 04:45:14 +03:00
|
|
|
if (likely(itd->index[uframe] == -1))
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
|
|
|
urb_index = itd->index[uframe];
|
2016-01-26 04:45:14 +03:00
|
|
|
desc = &urb->iso_frame_desc[urb_index];
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
|
|
|
|
itd->hw_transaction[uframe] = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* report transfer status */
|
2016-01-26 04:45:14 +03:00
|
|
|
if (unlikely(t & ISO_ERRS)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
urb->error_count++;
|
|
|
|
if (t & EHCI_ISOC_BUF_ERR)
|
2016-01-26 04:45:14 +03:00
|
|
|
desc->status = usb_pipein(urb->pipe)
|
2005-04-17 02:20:36 +04:00
|
|
|
? -ENOSR /* hc couldn't read */
|
|
|
|
: -ECOMM; /* hc couldn't write */
|
|
|
|
else if (t & EHCI_ISOC_BABBLE)
|
|
|
|
desc->status = -EOVERFLOW;
|
|
|
|
else /* (t & EHCI_ISOC_XACTERR) */
|
|
|
|
desc->status = -EPROTO;
|
|
|
|
|
|
|
|
/* HC need not update length with this error */
|
2009-06-29 22:34:59 +04:00
|
|
|
if (!(t & EHCI_ISOC_BABBLE)) {
|
|
|
|
desc->actual_length = EHCI_ITD_LENGTH(t);
|
|
|
|
urb->actual_length += desc->actual_length;
|
|
|
|
}
|
2016-01-26 04:45:14 +03:00
|
|
|
} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
desc->status = 0;
|
2009-06-29 22:34:59 +04:00
|
|
|
desc->actual_length = EHCI_ITD_LENGTH(t);
|
|
|
|
urb->actual_length += desc->actual_length;
|
2008-05-21 00:59:10 +04:00
|
|
|
} else {
|
|
|
|
/* URB was too late */
|
2012-10-01 18:32:01 +04:00
|
|
|
urb->error_count++;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle completion now? */
|
2016-01-26 04:45:14 +03:00
|
|
|
if (likely((urb_index + 1) != urb->number_of_packets))
|
2007-12-17 09:37:40 +03:00
|
|
|
goto done;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:16 +03:00
|
|
|
/*
|
|
|
|
* ASSERT: it's really the last itd for this urb
|
|
|
|
* list_for_each_entry (itd, &stream->td_list, itd_list)
|
|
|
|
* BUG_ON(itd->urb == urb);
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
|
2007-12-31 10:45:19 +03:00
|
|
|
/* give urb back to the driver; completion often (re)submits */
|
2007-08-24 23:40:19 +04:00
|
|
|
ehci_urb_done(ehci, urb, 0);
|
2007-12-17 09:37:40 +03:00
|
|
|
retval = true;
|
2005-04-17 02:20:36 +04:00
|
|
|
urb = NULL;
|
2012-07-11 19:23:00 +04:00
|
|
|
|
|
|
|
--ehci->isoc_count;
|
2012-07-11 19:22:10 +04:00
|
|
|
disable_periodic(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
|
2010-12-07 05:10:08 +03:00
|
|
|
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
|
2011-03-01 09:57:05 +03:00
|
|
|
if (ehci->amd_pll_fix == 1)
|
|
|
|
usb_amd_quirk_pll_enable();
|
2010-12-07 05:10:08 +03:00
|
|
|
}
|
|
|
|
|
2013-08-29 12:45:10 +04:00
|
|
|
if (unlikely(list_is_singular(&stream->td_list)))
|
2005-04-17 02:20:36 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_allocated
|
|
|
|
-= stream->bandwidth;
|
2009-02-09 03:07:58 +03:00
|
|
|
|
2007-12-17 09:37:40 +03:00
|
|
|
done:
|
|
|
|
itd->urb = NULL;
|
2012-07-11 19:22:35 +04:00
|
|
|
|
|
|
|
/* Add to the end of the free list for later reuse */
|
|
|
|
list_move_tail(&itd->itd_list, &stream->free_list);
|
|
|
|
|
|
|
|
/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
|
|
|
|
if (list_empty(&stream->td_list)) {
|
|
|
|
list_splice_tail_init(&stream->free_list,
|
|
|
|
&ehci->cached_itd_list);
|
|
|
|
start_free_itds(ehci);
|
2009-02-09 03:07:58 +03:00
|
|
|
}
|
2012-07-11 19:22:35 +04:00
|
|
|
|
2007-12-17 09:37:40 +03:00
|
|
|
return retval;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
|
2005-10-21 11:21:58 +04:00
|
|
|
gfp_t mem_flags)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int status = -EINVAL;
|
|
|
|
unsigned long flags;
|
|
|
|
struct ehci_iso_stream *stream;
|
|
|
|
|
|
|
|
/* Get iso_stream head */
|
2016-01-26 04:45:14 +03:00
|
|
|
stream = iso_stream_find(ehci, urb);
|
|
|
|
if (unlikely(stream == NULL)) {
|
|
|
|
ehci_dbg(ehci, "can't get iso stream\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-10-11 19:29:03 +04:00
|
|
|
if (unlikely(urb->interval != stream->uperiod)) {
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->uperiod, urb->interval);
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef EHCI_URB_TRACE
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
|
2008-03-04 03:08:34 +03:00
|
|
|
__func__, urb->dev->devpath, urb,
|
2016-01-26 04:45:14 +03:00
|
|
|
usb_pipeendpoint(urb->pipe),
|
|
|
|
usb_pipein(urb->pipe) ? "in" : "out",
|
2005-04-17 02:20:36 +04:00
|
|
|
urb->transfer_buffer_length,
|
|
|
|
urb->number_of_packets, urb->interval,
|
|
|
|
stream);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* allocate ITDs w/o locking anything */
|
2016-01-26 04:45:14 +03:00
|
|
|
status = itd_urb_transaction(stream, ehci, urb, mem_flags);
|
|
|
|
if (unlikely(status < 0)) {
|
|
|
|
ehci_dbg(ehci, "can't init itds\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* schedule ... need to lock */
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2010-06-23 00:39:10 +04:00
|
|
|
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
|
[PATCH] USB: Fix USB suspend/resume crasher (#2)
This patch closes the IRQ race and makes various other OHCI & EHCI code
path safer vs. suspend/resume.
I've been able to (finally !) successfully suspend and resume various
Mac models, with or without USB mouse plugged, or plugging while asleep,
or unplugging while asleep etc... all without a crash.
Alan, please verify the UHCI bit I did, I only verified that it builds.
It's very simple so I wouldn't expect any issue there. If you aren't
confident, then just drop the hunks that change uhci-hcd.c
I also made the patch a little bit more "safer" by making sure the store
to the interrupt register that disables interrupts is not posted before
I set the flag and drop the spinlock.
Without this patch, you cannot reliably sleep/wakeup any recent Mac, and
I suspect PCs have some more sneaky issues too (they don't frankly crash
with machine checks because x86 tend to silently swallow PCI errors but
that won't last afaik, at least PCI Express will blow up in those
situations, but the USB code may still misbehave).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2005-11-25 01:59:46 +03:00
|
|
|
status = -ESHUTDOWN;
|
2007-08-08 19:48:02 +04:00
|
|
|
goto done_not_linked;
|
|
|
|
}
|
|
|
|
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
|
|
|
|
if (unlikely(status))
|
|
|
|
goto done_not_linked;
|
|
|
|
status = iso_stream_schedule(ehci, urb, stream);
|
2013-09-03 21:59:03 +04:00
|
|
|
if (likely(status == 0)) {
|
2016-01-26 04:45:14 +03:00
|
|
|
itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
|
2013-09-03 21:59:03 +04:00
|
|
|
} else if (status > 0) {
|
|
|
|
status = 0;
|
|
|
|
ehci_urb_done(ehci, urb, 0);
|
|
|
|
} else {
|
2007-08-08 19:48:02 +04:00
|
|
|
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
|
2013-09-03 21:59:03 +04:00
|
|
|
}
|
2012-07-11 19:22:39 +04:00
|
|
|
done_not_linked:
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2012-07-11 19:22:39 +04:00
|
|
|
done:
|
2005-04-17 02:20:36 +04:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "Split ISO TDs" ... used for USB 1.1 devices going through the
|
|
|
|
* TTs in USB 2.0 hubs. These need microframe scheduling.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline void
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd_sched_init(
|
|
|
|
struct ehci_hcd *ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_sched *iso_sched,
|
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
struct urb *urb
|
|
|
|
)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
dma_addr_t dma = urb->transfer_dma;
|
|
|
|
|
|
|
|
/* how many frames are needed for these transfers */
|
2013-10-11 19:29:03 +04:00
|
|
|
iso_sched->span = urb->number_of_packets * stream->ps.period;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* figure out per-frame sitd fields that we'll need later
|
|
|
|
* when we fit new sitds into the schedule.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < urb->number_of_packets; i++) {
|
2016-01-26 04:45:14 +03:00
|
|
|
struct ehci_iso_packet *packet = &iso_sched->packet[i];
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned length;
|
|
|
|
dma_addr_t buf;
|
|
|
|
u32 trans;
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
length = urb->iso_frame_desc[i].length & 0x03ff;
|
|
|
|
buf = dma + urb->iso_frame_desc[i].offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
trans = SITD_STS_ACTIVE;
|
|
|
|
if (((i + 1) == urb->number_of_packets)
|
|
|
|
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
|
|
|
|
trans |= SITD_IOC;
|
|
|
|
trans |= length << 16;
|
2007-05-01 20:29:37 +04:00
|
|
|
packet->transaction = cpu_to_hc32(ehci, trans);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* might need to cross a buffer page within a td */
|
|
|
|
packet->bufp = buf;
|
|
|
|
packet->buf1 = (buf + length) & ~0x0fff;
|
|
|
|
if (packet->buf1 != (buf & ~(u64)0x0fff))
|
|
|
|
packet->cross = 1;
|
|
|
|
|
2006-08-31 01:50:06 +04:00
|
|
|
/* OUT uses multiple start-splits */
|
2005-04-17 02:20:36 +04:00
|
|
|
if (stream->bEndpointAddress & USB_DIR_IN)
|
|
|
|
continue;
|
|
|
|
length = (length + 187) / 188;
|
|
|
|
if (length > 1) /* BEGIN vs ALL */
|
|
|
|
length |= 1 << 3;
|
|
|
|
packet->buf1 |= length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-01-26 04:45:14 +03:00
|
|
|
sitd_urb_transaction(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct urb *urb,
|
2005-10-21 11:21:58 +04:00
|
|
|
gfp_t mem_flags
|
2005-04-17 02:20:36 +04:00
|
|
|
)
|
|
|
|
{
|
|
|
|
struct ehci_sitd *sitd;
|
|
|
|
dma_addr_t sitd_dma;
|
|
|
|
int i;
|
|
|
|
struct ehci_iso_sched *iso_sched;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (iso_sched == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd_sched_init(ehci, iso_sched, stream, urb);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* allocate/init sITDs */
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
for (i = 0; i < urb->number_of_packets; i++) {
|
|
|
|
|
|
|
|
/* NOTE: for now, we don't try to handle wraparound cases
|
|
|
|
* for IN (using sitd->hw_backpointer, like a FSTN), which
|
|
|
|
* means we never need two sitds for full speed packets.
|
|
|
|
*/
|
|
|
|
|
2012-07-11 19:22:35 +04:00
|
|
|
/*
|
|
|
|
* Use siTDs from the free list, but not siTDs that may
|
|
|
|
* still be in use by the hardware.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2012-07-11 19:22:35 +04:00
|
|
|
if (likely(!list_empty(&stream->free_list))) {
|
|
|
|
sitd = list_first_entry(&stream->free_list,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_sitd, sitd_list);
|
2012-07-11 19:23:07 +04:00
|
|
|
if (sitd->frame == ehci->now_frame)
|
2012-07-11 19:22:35 +04:00
|
|
|
goto alloc_sitd;
|
2016-01-26 04:45:14 +03:00
|
|
|
list_del(&sitd->sitd_list);
|
2005-04-17 02:20:36 +04:00
|
|
|
sitd_dma = sitd->sitd_dma;
|
2008-02-19 23:31:49 +03:00
|
|
|
} else {
|
2012-07-11 19:22:35 +04:00
|
|
|
alloc_sitd:
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2018-05-05 00:35:12 +03:00
|
|
|
sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
|
2005-04-17 02:20:36 +04:00
|
|
|
&sitd_dma);
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2008-02-19 23:31:49 +03:00
|
|
|
if (!sitd) {
|
|
|
|
iso_sched_free(stream, iso_sched);
|
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2018-05-05 00:35:12 +03:00
|
|
|
memset(sitd, 0, sizeof(*sitd));
|
2005-04-17 02:20:36 +04:00
|
|
|
sitd->sitd_dma = sitd_dma;
|
2013-10-11 19:28:52 +04:00
|
|
|
sitd->frame = NO_FRAME;
|
2016-01-26 04:45:14 +03:00
|
|
|
list_add(&sitd->sitd_list, &iso_sched->td_list);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* temporarily store schedule info in hcpriv */
|
|
|
|
urb->hcpriv = iso_sched;
|
|
|
|
urb->error_count = 0;
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
static inline void
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd_patch(
|
|
|
|
struct ehci_hcd *ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_stream *stream,
|
|
|
|
struct ehci_sitd *sitd,
|
|
|
|
struct ehci_iso_sched *iso_sched,
|
|
|
|
unsigned index
|
|
|
|
)
|
|
|
|
{
|
2016-01-26 04:45:14 +03:00
|
|
|
struct ehci_iso_packet *uf = &iso_sched->packet[index];
|
2016-01-26 04:45:12 +03:00
|
|
|
u64 bufp;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd->hw_next = EHCI_LIST_END(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
sitd->hw_fullspeed_ep = stream->address;
|
|
|
|
sitd->hw_uframe = stream->splits;
|
|
|
|
sitd->hw_results = uf->transaction;
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd->hw_backpointer = EHCI_LIST_END(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
bufp = uf->bufp;
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
|
|
|
|
sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (uf->cross)
|
|
|
|
bufp += 4096;
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
|
2005-04-17 02:20:36 +04:00
|
|
|
sitd->index = index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2016-01-26 04:45:14 +03:00
|
|
|
sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
|
2016-01-26 04:45:14 +03:00
|
|
|
sitd->sitd_next = ehci->pshadow[frame];
|
|
|
|
sitd->hw_next = ehci->periodic[frame];
|
|
|
|
ehci->pshadow[frame].sitd = sitd;
|
2005-04-17 02:20:36 +04:00
|
|
|
sitd->frame = frame;
|
2016-01-26 04:45:14 +03:00
|
|
|
wmb();
|
2007-05-01 20:29:37 +04:00
|
|
|
ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* fit urb's sitds into the selected schedule slot; activate as needed */
|
2012-07-11 19:22:10 +04:00
|
|
|
static void sitd_link_urb(
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_hcd *ehci,
|
|
|
|
struct urb *urb,
|
|
|
|
unsigned mod,
|
|
|
|
struct ehci_iso_stream *stream
|
|
|
|
)
|
|
|
|
{
|
|
|
|
int packet;
|
|
|
|
unsigned next_uframe;
|
|
|
|
struct ehci_iso_sched *sched = urb->hcpriv;
|
|
|
|
struct ehci_sitd *sitd;
|
|
|
|
|
|
|
|
next_uframe = stream->next_uframe;
|
|
|
|
|
2013-08-29 12:45:10 +04:00
|
|
|
if (list_empty(&stream->td_list))
|
2005-04-17 02:20:36 +04:00
|
|
|
/* usbfs ignores TT bandwidth */
|
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_allocated
|
|
|
|
+= stream->bandwidth;
|
2010-12-07 05:10:08 +03:00
|
|
|
|
|
|
|
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
|
2011-03-01 09:57:05 +03:00
|
|
|
if (ehci->amd_pll_fix == 1)
|
|
|
|
usb_amd_quirk_pll_disable();
|
2010-12-07 05:10:08 +03:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
|
|
|
|
|
|
|
|
/* fill sITDs frame by frame */
|
2013-09-03 21:59:03 +04:00
|
|
|
for (packet = sched->first_packet, sitd = NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
packet < urb->number_of_packets;
|
|
|
|
packet++) {
|
|
|
|
|
|
|
|
/* ASSERT: we have all necessary sitds */
|
2016-01-26 04:45:14 +03:00
|
|
|
BUG_ON(list_empty(&sched->td_list));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* ASSERT: no itds for this endpoint in this frame */
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
sitd = list_entry(sched->td_list.next,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_sitd, sitd_list);
|
2016-01-26 04:45:14 +03:00
|
|
|
list_move_tail(&sitd->sitd_list, &stream->td_list);
|
2012-07-11 19:22:39 +04:00
|
|
|
sitd->stream = stream;
|
2009-02-26 03:47:48 +03:00
|
|
|
sitd->urb = urb;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-01 20:29:37 +04:00
|
|
|
sitd_patch(ehci, stream, sitd, sched, packet);
|
2010-07-14 19:03:36 +04:00
|
|
|
sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
|
2005-04-17 02:20:36 +04:00
|
|
|
sitd);
|
|
|
|
|
2013-10-11 19:29:03 +04:00
|
|
|
next_uframe += stream->uperiod;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2010-07-14 19:03:36 +04:00
|
|
|
stream->next_uframe = next_uframe & (mod - 1);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* don't need that schedule data any more */
|
2016-01-26 04:45:14 +03:00
|
|
|
iso_sched_free(stream, sched);
|
2012-11-08 19:17:01 +04:00
|
|
|
urb->hcpriv = stream;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
++ehci->isoc_count;
|
2012-07-11 19:22:10 +04:00
|
|
|
enable_periodic(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
|
2006-08-31 01:50:06 +04:00
|
|
|
| SITD_STS_XACT | SITD_STS_MMF)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-12-17 09:37:40 +03:00
|
|
|
/* Process and recycle a completed SITD. Return true iff its urb completed,
|
|
|
|
* and hence its completion callback probably added things to the hardware
|
|
|
|
* schedule.
|
|
|
|
*
|
|
|
|
* Note that we carefully avoid recycling this descriptor until after any
|
|
|
|
* completion callback runs, so that it won't be reused quickly. That is,
|
|
|
|
* assuming (a) no more than two urbs per frame on this endpoint, and also
|
|
|
|
* (b) only this endpoint's completions submit URBs. It seems some silicon
|
|
|
|
* corrupts things if you reuse completed descriptors very quickly...
|
|
|
|
*/
|
2012-07-11 19:23:07 +04:00
|
|
|
static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
|
|
|
|
{
|
2005-04-17 02:20:36 +04:00
|
|
|
struct urb *urb = sitd->urb;
|
|
|
|
struct usb_iso_packet_descriptor *desc;
|
|
|
|
u32 t;
|
2016-01-26 04:45:12 +03:00
|
|
|
int urb_index;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ehci_iso_stream *stream = sitd->stream;
|
2012-07-11 19:23:07 +04:00
|
|
|
bool retval = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
urb_index = sitd->index;
|
2016-01-26 04:45:14 +03:00
|
|
|
desc = &urb->iso_frame_desc[urb_index];
|
2007-05-01 20:29:37 +04:00
|
|
|
t = hc32_to_cpup(ehci, &sitd->hw_results);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* report transfer status */
|
2012-10-01 18:32:01 +04:00
|
|
|
if (unlikely(t & SITD_ERRS)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
urb->error_count++;
|
|
|
|
if (t & SITD_STS_DBE)
|
2016-01-26 04:45:14 +03:00
|
|
|
desc->status = usb_pipein(urb->pipe)
|
2005-04-17 02:20:36 +04:00
|
|
|
? -ENOSR /* hc couldn't read */
|
|
|
|
: -ECOMM; /* hc couldn't write */
|
|
|
|
else if (t & SITD_STS_BABBLE)
|
|
|
|
desc->status = -EOVERFLOW;
|
|
|
|
else /* XACT, MMF, etc */
|
|
|
|
desc->status = -EPROTO;
|
2012-10-01 18:32:01 +04:00
|
|
|
} else if (unlikely(t & SITD_STS_ACTIVE)) {
|
|
|
|
/* URB was too late */
|
|
|
|
urb->error_count++;
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
|
|
|
desc->status = 0;
|
2009-06-29 22:34:59 +04:00
|
|
|
desc->actual_length = desc->length - SITD_LENGTH(t);
|
|
|
|
urb->actual_length += desc->actual_length;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* handle completion now? */
|
|
|
|
if ((urb_index + 1) != urb->number_of_packets)
|
2007-12-17 09:37:40 +03:00
|
|
|
goto done;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-01-26 04:45:16 +03:00
|
|
|
/*
|
|
|
|
* ASSERT: it's really the last sitd for this urb
|
|
|
|
* list_for_each_entry (sitd, &stream->td_list, sitd_list)
|
|
|
|
* BUG_ON(sitd->urb == urb);
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
|
2007-12-31 10:45:19 +03:00
|
|
|
/* give urb back to the driver; completion often (re)submits */
|
2007-08-24 23:40:19 +04:00
|
|
|
ehci_urb_done(ehci, urb, 0);
|
2007-12-17 09:37:40 +03:00
|
|
|
retval = true;
|
2005-04-17 02:20:36 +04:00
|
|
|
urb = NULL;
|
2012-07-11 19:23:00 +04:00
|
|
|
|
|
|
|
--ehci->isoc_count;
|
2012-07-11 19:22:10 +04:00
|
|
|
disable_periodic(ehci);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
|
2010-12-07 05:10:08 +03:00
|
|
|
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
|
2011-03-01 09:57:05 +03:00
|
|
|
if (ehci->amd_pll_fix == 1)
|
|
|
|
usb_amd_quirk_pll_enable();
|
2010-12-07 05:10:08 +03:00
|
|
|
}
|
|
|
|
|
2013-08-29 12:45:10 +04:00
|
|
|
if (list_is_singular(&stream->td_list))
|
2005-04-17 02:20:36 +04:00
|
|
|
ehci_to_hcd(ehci)->self.bandwidth_allocated
|
|
|
|
-= stream->bandwidth;
|
2010-04-09 00:56:37 +04:00
|
|
|
|
2007-12-17 09:37:40 +03:00
|
|
|
done:
|
|
|
|
sitd->urb = NULL;
|
2012-07-11 19:22:35 +04:00
|
|
|
|
|
|
|
/* Add to the end of the free list for later reuse */
|
|
|
|
list_move_tail(&sitd->sitd_list, &stream->free_list);
|
|
|
|
|
|
|
|
/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
|
|
|
|
if (list_empty(&stream->td_list)) {
|
|
|
|
list_splice_tail_init(&stream->free_list,
|
|
|
|
&ehci->cached_sitd_list);
|
|
|
|
start_free_itds(ehci);
|
2010-04-09 00:56:37 +04:00
|
|
|
}
|
2012-07-11 19:22:35 +04:00
|
|
|
|
2007-12-17 09:37:40 +03:00
|
|
|
return retval;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-26 04:45:14 +03:00
|
|
|
static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
|
2005-10-21 11:21:58 +04:00
|
|
|
gfp_t mem_flags)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int status = -EINVAL;
|
|
|
|
unsigned long flags;
|
|
|
|
struct ehci_iso_stream *stream;
|
|
|
|
|
|
|
|
/* Get iso_stream head */
|
2016-01-26 04:45:14 +03:00
|
|
|
stream = iso_stream_find(ehci, urb);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (stream == NULL) {
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci, "can't get iso stream\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-10-11 19:29:03 +04:00
|
|
|
if (urb->interval != stream->ps.period) {
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
|
2013-10-11 19:29:03 +04:00
|
|
|
stream->ps.period, urb->interval);
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef EHCI_URB_TRACE
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci,
|
2005-04-17 02:20:36 +04:00
|
|
|
"submit %p dev%s ep%d%s-iso len %d\n",
|
|
|
|
urb, urb->dev->devpath,
|
2016-01-26 04:45:14 +03:00
|
|
|
usb_pipeendpoint(urb->pipe),
|
|
|
|
usb_pipein(urb->pipe) ? "in" : "out",
|
2005-04-17 02:20:36 +04:00
|
|
|
urb->transfer_buffer_length);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* allocate SITDs */
|
2016-01-26 04:45:14 +03:00
|
|
|
status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (status < 0) {
|
2016-01-26 04:45:14 +03:00
|
|
|
ehci_dbg(ehci, "can't init sitds\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* schedule ... need to lock */
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_lock_irqsave(&ehci->lock, flags);
|
2010-06-23 00:39:10 +04:00
|
|
|
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
|
[PATCH] USB: Fix USB suspend/resume crasher (#2)
This patch closes the IRQ race and makes various other OHCI & EHCI code
path safer vs. suspend/resume.
I've been able to (finally !) successfully suspend and resume various
Mac models, with or without USB mouse plugged, or plugging while asleep,
or unplugging while asleep etc... all without a crash.
Alan, please verify the UHCI bit I did, I only verified that it builds.
It's very simple so I wouldn't expect any issue there. If you aren't
confident, then just drop the hunks that change uhci-hcd.c
I also made the patch a little bit more "safer" by making sure the store
to the interrupt register that disables interrupts is not posted before
I set the flag and drop the spinlock.
Without this patch, you cannot reliably sleep/wakeup any recent Mac, and
I suspect PCs have some more sneaky issues too (they don't frankly crash
with machine checks because x86 tend to silently swallow PCI errors but
that won't last afaik, at least PCI Express will blow up in those
situations, but the USB code may still misbehave).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2005-11-25 01:59:46 +03:00
|
|
|
status = -ESHUTDOWN;
|
2007-08-08 19:48:02 +04:00
|
|
|
goto done_not_linked;
|
|
|
|
}
|
|
|
|
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
|
|
|
|
if (unlikely(status))
|
|
|
|
goto done_not_linked;
|
|
|
|
status = iso_stream_schedule(ehci, urb, stream);
|
2013-09-03 21:59:03 +04:00
|
|
|
if (likely(status == 0)) {
|
2016-01-26 04:45:14 +03:00
|
|
|
sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
|
2013-09-03 21:59:03 +04:00
|
|
|
} else if (status > 0) {
|
|
|
|
status = 0;
|
|
|
|
ehci_urb_done(ehci, urb, 0);
|
|
|
|
} else {
|
2007-08-08 19:48:02 +04:00
|
|
|
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
|
2013-09-03 21:59:03 +04:00
|
|
|
}
|
2012-07-11 19:22:39 +04:00
|
|
|
done_not_linked:
|
2016-01-26 04:45:14 +03:00
|
|
|
spin_unlock_irqrestore(&ehci->lock, flags);
|
2012-07-11 19:22:39 +04:00
|
|
|
done:
|
2005-04-17 02:20:36 +04:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2012-07-11 19:23:00 +04:00
|
|
|
static void scan_isoc(struct ehci_hcd *ehci)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2016-01-26 04:45:10 +03:00
|
|
|
unsigned uf, now_frame, frame;
|
|
|
|
unsigned fmask = ehci->periodic_size - 1;
|
|
|
|
bool modified, live;
|
|
|
|
union ehci_shadow q, *q_p;
|
|
|
|
__hc32 type, *hw_p;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When running, scan from last scan point up to "now"
|
|
|
|
* else clean up by scanning everything that's left.
|
|
|
|
* Touches as few pages as possible: cache-friendly.
|
|
|
|
*/
|
2012-07-11 19:21:48 +04:00
|
|
|
if (ehci->rh_state >= EHCI_RH_RUNNING) {
|
2012-07-11 19:23:07 +04:00
|
|
|
uf = ehci_read_frame_index(ehci);
|
|
|
|
now_frame = (uf >> 3) & fmask;
|
|
|
|
live = true;
|
2009-02-09 03:07:58 +03:00
|
|
|
} else {
|
2012-09-29 00:01:23 +04:00
|
|
|
now_frame = (ehci->last_iso_frame - 1) & fmask;
|
2012-07-11 19:23:07 +04:00
|
|
|
live = false;
|
2009-02-09 03:07:58 +03:00
|
|
|
}
|
2012-07-11 19:23:07 +04:00
|
|
|
ehci->now_frame = now_frame;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-31 01:35:02 +04:00
|
|
|
frame = ehci->last_iso_frame;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
restart:
|
2016-01-26 04:45:10 +03:00
|
|
|
/* Scan each element in frame's queue for completions */
|
|
|
|
q_p = &ehci->pshadow[frame];
|
|
|
|
hw_p = &ehci->periodic[frame];
|
|
|
|
q.ptr = q_p->ptr;
|
|
|
|
type = Q_NEXT_TYPE(ehci, *hw_p);
|
|
|
|
modified = false;
|
|
|
|
|
|
|
|
while (q.ptr != NULL) {
|
|
|
|
switch (hc32_to_cpu(ehci, type)) {
|
|
|
|
case Q_TYPE_ITD:
|
|
|
|
/*
|
|
|
|
* If this ITD is still active, leave it for
|
|
|
|
* later processing ... check the next entry.
|
|
|
|
* No need to check for activity unless the
|
|
|
|
* frame is current.
|
|
|
|
*/
|
|
|
|
if (frame == now_frame && live) {
|
|
|
|
rmb();
|
|
|
|
for (uf = 0; uf < 8; uf++) {
|
|
|
|
if (q.itd->hw_transaction[uf] &
|
|
|
|
ITD_ACTIVE(ehci))
|
2008-05-21 00:59:10 +04:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2016-01-26 04:45:10 +03:00
|
|
|
if (uf < 8) {
|
|
|
|
q_p = &q.itd->itd_next;
|
|
|
|
hw_p = &q.itd->hw_next;
|
2007-05-01 20:29:37 +04:00
|
|
|
type = Q_NEXT_TYPE(ehci,
|
2016-01-26 04:45:10 +03:00
|
|
|
q.itd->hw_next);
|
2005-04-17 02:20:36 +04:00
|
|
|
q = *q_p;
|
|
|
|
break;
|
|
|
|
}
|
2016-01-26 04:45:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take finished ITDs out of the schedule
|
|
|
|
* and process them: recycle, maybe report
|
|
|
|
* URB completion. HC won't cache the
|
|
|
|
* pointer for much longer, if at all.
|
|
|
|
*/
|
|
|
|
*q_p = q.itd->itd_next;
|
|
|
|
if (!ehci->use_dummy_qh ||
|
|
|
|
q.itd->hw_next != EHCI_LIST_END(ehci))
|
|
|
|
*hw_p = q.itd->hw_next;
|
|
|
|
else
|
|
|
|
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
|
|
|
|
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
|
|
|
|
wmb();
|
|
|
|
modified = itd_complete(ehci, q.itd);
|
|
|
|
q = *q_p;
|
|
|
|
break;
|
|
|
|
case Q_TYPE_SITD:
|
|
|
|
/*
|
|
|
|
* If this SITD is still active, leave it for
|
|
|
|
* later processing ... check the next entry.
|
|
|
|
* No need to check for activity unless the
|
|
|
|
* frame is current.
|
|
|
|
*/
|
|
|
|
if (((frame == now_frame) ||
|
|
|
|
(((frame + 1) & fmask) == now_frame))
|
|
|
|
&& live
|
|
|
|
&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
|
2008-01-07 11:47:42 +03:00
|
|
|
|
2016-01-26 04:45:10 +03:00
|
|
|
q_p = &q.sitd->sitd_next;
|
|
|
|
hw_p = &q.sitd->hw_next;
|
2007-05-01 20:29:37 +04:00
|
|
|
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
|
2005-04-17 02:20:36 +04:00
|
|
|
q = *q_p;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-01-26 04:45:10 +03:00
|
|
|
/*
|
|
|
|
* Take finished SITDs out of the schedule
|
|
|
|
* and process them: recycle, maybe report
|
|
|
|
* URB completion.
|
|
|
|
*/
|
|
|
|
*q_p = q.sitd->sitd_next;
|
|
|
|
if (!ehci->use_dummy_qh ||
|
|
|
|
q.sitd->hw_next != EHCI_LIST_END(ehci))
|
|
|
|
*hw_p = q.sitd->hw_next;
|
|
|
|
else
|
|
|
|
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
|
|
|
|
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
|
|
|
|
wmb();
|
|
|
|
modified = sitd_complete(ehci, q.sitd);
|
|
|
|
q = *q_p;
|
2008-01-07 11:47:42 +03:00
|
|
|
break;
|
2016-01-26 04:45:10 +03:00
|
|
|
default:
|
|
|
|
ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
|
|
|
|
type, frame, q.ptr);
|
|
|
|
/* BUG(); */
|
2020-07-07 22:50:23 +03:00
|
|
|
fallthrough;
|
2016-01-26 04:45:10 +03:00
|
|
|
case Q_TYPE_QH:
|
|
|
|
case Q_TYPE_FSTN:
|
|
|
|
/* End of the iTDs and siTDs */
|
|
|
|
q.ptr = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2013-01-31 01:35:02 +04:00
|
|
|
|
2016-01-26 04:45:10 +03:00
|
|
|
/* Assume completion callbacks modify the queue */
|
|
|
|
if (unlikely(modified && ehci->isoc_count > 0))
|
|
|
|
goto restart;
|
2006-08-31 01:50:06 +04:00
|
|
|
}
|
2016-01-26 04:45:10 +03:00
|
|
|
|
|
|
|
/* Stop when we have reached the current frame */
|
|
|
|
if (frame == now_frame)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* The last frame may still have active siTDs */
|
|
|
|
ehci->last_iso_frame = frame;
|
|
|
|
frame = (frame + 1) & fmask;
|
|
|
|
|
|
|
|
goto restart;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|