xhci: Be less verbose during URB cancellation.

With devices that can need up to 128 segments (with 64 TRBs per
segment), we can't afford to print out the entire endpoint ring every
time an URB is canceled.  Instead, print the offset of the TRB, along
with device pathname and endpoint number.

Only print DMA addresses, since virtual addresses of internal structures
are not useful.  Change the cancellation code to be more clear about
what steps of the cancellation it is in the process of doing (queueing
the request, handling the stop endpoint command, turning the TDs into
no-ops, or moving the dequeue pointers).

Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
This commit is contained in:
Sarah Sharp 2011-12-19 16:56:04 -08:00
Родитель 1ba6108f5f
Коммит 79688acfb5
2 изменённых файлов: 16 добавлений и 16 удалений

Просмотреть файл

@ -552,12 +552,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cpu_to_le32(TRB_CYCLE); cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32( cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP)); TRB_TYPE(TRB_TR_NOOP));
xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
"in seg %p (0x%llx dma)\n", (unsigned long long)
cur_trb, xhci_trb_virt_to_dma(cur_seg, cur_trb));
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
cur_seg,
(unsigned long long)cur_seg->dma);
} }
if (cur_trb == cur_td->last_trb) if (cur_trb == cur_td->last_trb)
break; break;
@ -697,9 +694,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/ */
list_for_each(entry, &ep->cancelled_td_list) { list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
cur_td->first_trb, (unsigned long long)xhci_trb_virt_to_dma(
(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) { if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking /* This shouldn't happen unless a driver is mucking

Просмотреть файл

@ -1333,9 +1333,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done; goto done;
} }
xhci_dbg(xhci, "Cancel URB %p\n", urb);
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb); ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@ -1344,12 +1341,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done; goto done;
} }
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
urb_priv = urb->hcpriv; urb_priv = urb->hcpriv;
i = urb_priv->td_cnt;
if (i < urb_priv->length)
xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
"starting at offset 0x%llx\n",
urb, urb->dev->devpath,
urb->ep->desc.bEndpointAddress,
(unsigned long long) xhci_trb_virt_to_dma(
urb_priv->td[i]->start_seg,
urb_priv->td[i]->first_trb));
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { for (; i < urb_priv->length; i++) {
td = urb_priv->td[i]; td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
} }