Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (22 commits) WUSB: correct format of wusb_chid sysfs file WUSB: fix oops when completing URBs for disconnected devices WUSB: disconnect all devices when stopping a WUSB HCD USB: whci-hcd: check return value of usb_hcd_link_urb_to_ep() USB: whci-hcd: provide a endpoint_reset method USB: add reset endpoint operations USB device codes for Motorola phone. usb-storage: fix mistake in Makefile USB: usb-serial ch341: support for DTR/RTS/CTS Revert USB: usb-serial ch341: support for DTR/RTS/CTS USB: musb: fix possible panic while resuming USB: musb: fix isochronous TXDMA (take 2) USB: musb: sanitize clearing TXCSR DMA bits (take 2) USB: musb: bugfixes for multi-packet TXDMA support USB: musb_host, fix ep0 fifo flushing USB: usb-storage: augment unusual_devs entry for Simple Tech/Datafab USB: musb_host, minor enqueue locking fix (v2) USB: fix oops in cdc-wdm in case of malformed descriptors USB: qcserial: Add extra device IDs USB: option: Add ids for D-Link DWM-652 3.5G modem ...
This commit is contained in:
Коммит
dd26bf6d95
|
@ -1025,6 +1025,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
{
|
||||
struct urb *urb = &sc->work_urb;
|
||||
struct bulk_cs_wrap *bcs;
|
||||
int endp;
|
||||
int len;
|
||||
int rc;
|
||||
|
||||
|
@ -1033,6 +1034,10 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
return;
|
||||
}
|
||||
|
||||
endp = usb_pipeendpoint(sc->last_pipe);
|
||||
if (usb_pipein(sc->last_pipe))
|
||||
endp |= USB_DIR_IN;
|
||||
|
||||
if (cmd->state == UB_CMDST_CLEAR) {
|
||||
if (urb->status == -EPIPE) {
|
||||
/*
|
||||
|
@ -1048,9 +1053,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
* We ignore the result for the halt clear.
|
||||
*/
|
||||
|
||||
/* reset the endpoint toggle */
|
||||
usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
|
||||
usb_pipeout(sc->last_pipe), 0);
|
||||
usb_reset_endpoint(sc->dev, endp);
|
||||
|
||||
ub_state_sense(sc, cmd);
|
||||
|
||||
|
@ -1065,9 +1068,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
* We ignore the result for the halt clear.
|
||||
*/
|
||||
|
||||
/* reset the endpoint toggle */
|
||||
usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
|
||||
usb_pipeout(sc->last_pipe), 0);
|
||||
usb_reset_endpoint(sc->dev, endp);
|
||||
|
||||
ub_state_stat(sc, cmd);
|
||||
|
||||
|
@ -1082,9 +1083,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
* We ignore the result for the halt clear.
|
||||
*/
|
||||
|
||||
/* reset the endpoint toggle */
|
||||
usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
|
||||
usb_pipeout(sc->last_pipe), 0);
|
||||
usb_reset_endpoint(sc->dev, endp);
|
||||
|
||||
ub_state_stat_counted(sc, cmd);
|
||||
|
||||
|
@ -2119,8 +2118,7 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
|
|||
del_timer_sync(&timer);
|
||||
usb_kill_urb(&sc->work_urb);
|
||||
|
||||
/* reset the endpoint toggle */
|
||||
usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
|
||||
usb_reset_endpoint(sc->dev, endp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -149,14 +149,7 @@ static void usb_ctrl_complete(struct urb *urb)
|
|||
if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) {
|
||||
/* Special case handling for pipe reset */
|
||||
le16_to_cpus(&ctrl_msg->dr.wIndex);
|
||||
|
||||
/* toggle is reset on clear */
|
||||
usb_settoggle(adapter->usb_dev,
|
||||
ctrl_msg->dr.wIndex & ~USB_DIR_IN,
|
||||
(ctrl_msg->dr.wIndex & USB_DIR_IN) == 0,
|
||||
0);
|
||||
|
||||
|
||||
usb_reset_endpoint(adapter->usb_dev, ctrl_msg->dr.wIndex);
|
||||
}
|
||||
|
||||
if (ctrl_msg->complete)
|
||||
|
|
|
@ -1461,7 +1461,6 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
|
|||
return ret;
|
||||
}
|
||||
|
||||
usb_settoggle(hdw->usb_dev, 0 & 0xf, !(0 & USB_DIR_IN), 0);
|
||||
usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f));
|
||||
|
||||
pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
|
||||
|
|
|
@ -652,7 +652,7 @@ next_desc:
|
|||
|
||||
iface = &intf->altsetting[0];
|
||||
ep = &iface->endpoint[0].desc;
|
||||
if (!usb_endpoint_is_int_in(ep)) {
|
||||
if (!ep || !usb_endpoint_is_int_in(ep)) {
|
||||
rv = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -841,7 +841,7 @@ static int proc_resetep(struct dev_state *ps, void __user *arg)
|
|||
ret = checkintf(ps, ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
usb_settoggle(ps->dev, ep & 0xf, !(ep & USB_DIR_IN), 0);
|
||||
usb_reset_endpoint(ps->dev, ep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1539,6 +1539,32 @@ void usb_hcd_disable_endpoint(struct usb_device *udev,
|
|||
hcd->driver->endpoint_disable(hcd, ep);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_hcd_reset_endpoint - reset host endpoint state
|
||||
* @udev: USB device.
|
||||
* @ep: the endpoint to reset.
|
||||
*
|
||||
* Resets any host endpoint state such as the toggle bit, sequence
|
||||
* number and current window.
|
||||
*/
|
||||
void usb_hcd_reset_endpoint(struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep)
|
||||
{
|
||||
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
|
||||
|
||||
if (hcd->driver->endpoint_reset)
|
||||
hcd->driver->endpoint_reset(hcd, ep);
|
||||
else {
|
||||
int epnum = usb_endpoint_num(&ep->desc);
|
||||
int is_out = usb_endpoint_dir_out(&ep->desc);
|
||||
int is_control = usb_endpoint_xfer_control(&ep->desc);
|
||||
|
||||
usb_settoggle(udev, epnum, is_out, 0);
|
||||
if (is_control)
|
||||
usb_settoggle(udev, epnum, !is_out, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Protect against drivers that try to unlink URBs after the device
|
||||
* is gone, by waiting until all unlinks for @udev are finished.
|
||||
* Since we don't currently track URBs by device, simply wait until
|
||||
|
|
|
@ -206,6 +206,11 @@ struct hc_driver {
|
|||
void (*endpoint_disable)(struct usb_hcd *hcd,
|
||||
struct usb_host_endpoint *ep);
|
||||
|
||||
/* (optional) reset any endpoint state such as sequence number
|
||||
and current window */
|
||||
void (*endpoint_reset)(struct usb_hcd *hcd,
|
||||
struct usb_host_endpoint *ep);
|
||||
|
||||
/* root hub support */
|
||||
int (*hub_status_data) (struct usb_hcd *hcd, char *buf);
|
||||
int (*hub_control) (struct usb_hcd *hcd,
|
||||
|
@ -234,6 +239,8 @@ extern void usb_hcd_flush_endpoint(struct usb_device *udev,
|
|||
struct usb_host_endpoint *ep);
|
||||
extern void usb_hcd_disable_endpoint(struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep);
|
||||
extern void usb_hcd_reset_endpoint(struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep);
|
||||
extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
|
||||
extern int usb_hcd_get_frame_number(struct usb_device *udev);
|
||||
|
||||
|
@ -279,6 +286,13 @@ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
|
|||
extern void usb_hc_died(struct usb_hcd *hcd);
|
||||
extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
|
||||
|
||||
/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
|
||||
#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
|
||||
#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
|
||||
#define usb_settoggle(dev, ep, out, bit) \
|
||||
((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
|
||||
((bit) << (ep)))
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
/* Enumeration is only for the hub driver, or HCD virtual root hubs */
|
||||
|
|
|
@ -1002,8 +1002,7 @@ int usb_clear_halt(struct usb_device *dev, int pipe)
|
|||
* the copy in usb-storage, for as long as we need two copies.
|
||||
*/
|
||||
|
||||
/* toggle was reset by the clear */
|
||||
usb_settoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe), 0);
|
||||
usb_reset_endpoint(dev, endp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1075,6 +1074,30 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_reset_endpoint - Reset an endpoint's state.
|
||||
* @dev: the device whose endpoint is to be reset
|
||||
* @epaddr: the endpoint's address. Endpoint number for output,
|
||||
* endpoint number + USB_DIR_IN for input
|
||||
*
|
||||
* Resets any host-side endpoint state such as the toggle bit,
|
||||
* sequence number or current window.
|
||||
*/
|
||||
void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr)
|
||||
{
|
||||
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
|
||||
struct usb_host_endpoint *ep;
|
||||
|
||||
if (usb_endpoint_out(epaddr))
|
||||
ep = dev->ep_out[epnum];
|
||||
else
|
||||
ep = dev->ep_in[epnum];
|
||||
if (ep)
|
||||
usb_hcd_reset_endpoint(dev, ep);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_reset_endpoint);
|
||||
|
||||
|
||||
/**
|
||||
* usb_disable_interface -- Disable all endpoints for an interface
|
||||
* @dev: the device whose interface is being disabled
|
||||
|
@ -1117,7 +1140,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
|
|||
usb_disable_endpoint(dev, i, true);
|
||||
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
|
||||
}
|
||||
dev->toggle[0] = dev->toggle[1] = 0;
|
||||
|
||||
/* getting rid of interfaces will disconnect
|
||||
* any drivers bound to them (a key side effect)
|
||||
|
@ -1154,28 +1176,24 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
|
|||
* usb_enable_endpoint - Enable an endpoint for USB communications
|
||||
* @dev: the device whose interface is being enabled
|
||||
* @ep: the endpoint
|
||||
* @reset_toggle: flag to set the endpoint's toggle back to 0
|
||||
* @reset_ep: flag to reset the endpoint state
|
||||
*
|
||||
* Resets the endpoint toggle if asked, and sets dev->ep_{in,out} pointers.
|
||||
* Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers.
|
||||
* For control endpoints, both the input and output sides are handled.
|
||||
*/
|
||||
void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
|
||||
bool reset_toggle)
|
||||
bool reset_ep)
|
||||
{
|
||||
int epnum = usb_endpoint_num(&ep->desc);
|
||||
int is_out = usb_endpoint_dir_out(&ep->desc);
|
||||
int is_control = usb_endpoint_xfer_control(&ep->desc);
|
||||
|
||||
if (is_out || is_control) {
|
||||
if (reset_toggle)
|
||||
usb_settoggle(dev, epnum, 1, 0);
|
||||
if (reset_ep)
|
||||
usb_hcd_reset_endpoint(dev, ep);
|
||||
if (is_out || is_control)
|
||||
dev->ep_out[epnum] = ep;
|
||||
}
|
||||
if (!is_out || is_control) {
|
||||
if (reset_toggle)
|
||||
usb_settoggle(dev, epnum, 0, 0);
|
||||
if (!is_out || is_control)
|
||||
dev->ep_in[epnum] = ep;
|
||||
}
|
||||
ep->enabled = 1;
|
||||
}
|
||||
|
||||
|
@ -1183,18 +1201,18 @@ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
|
|||
* usb_enable_interface - Enable all the endpoints for an interface
|
||||
* @dev: the device whose interface is being enabled
|
||||
* @intf: pointer to the interface descriptor
|
||||
* @reset_toggles: flag to set the endpoints' toggles back to 0
|
||||
* @reset_eps: flag to reset the endpoints' state
|
||||
*
|
||||
* Enables all the endpoints for the interface's current altsetting.
|
||||
*/
|
||||
void usb_enable_interface(struct usb_device *dev,
|
||||
struct usb_interface *intf, bool reset_toggles)
|
||||
struct usb_interface *intf, bool reset_eps)
|
||||
{
|
||||
struct usb_host_interface *alt = intf->cur_altsetting;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
|
||||
usb_enable_endpoint(dev, &alt->endpoint[i], reset_toggles);
|
||||
usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1335,7 +1353,7 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
|
|||
* This issues a standard SET_CONFIGURATION request to the device using
|
||||
* the current configuration. The effect is to reset most USB-related
|
||||
* state in the device, including interface altsettings (reset to zero),
|
||||
* endpoint halts (cleared), and data toggle (only for bulk and interrupt
|
||||
* endpoint halts (cleared), and endpoint state (only for bulk and interrupt
|
||||
* endpoints). Other usbcore state is unchanged, including bindings of
|
||||
* usb device drivers to interfaces.
|
||||
*
|
||||
|
@ -1343,7 +1361,7 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
|
|||
* (multi-interface) devices. Instead, the driver for each interface may
|
||||
* use usb_set_interface() on the interfaces it claims. Be careful though;
|
||||
* some devices don't support the SET_INTERFACE request, and others won't
|
||||
* reset all the interface state (notably data toggles). Resetting the whole
|
||||
* reset all the interface state (notably endpoint state). Resetting the whole
|
||||
* configuration would affect other drivers' interfaces.
|
||||
*
|
||||
* The caller must own the device lock.
|
||||
|
@ -1376,8 +1394,6 @@ int usb_reset_configuration(struct usb_device *dev)
|
|||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
dev->toggle[0] = dev->toggle[1] = 0;
|
||||
|
||||
/* re-init hc/hcd interface/endpoint state */
|
||||
for (i = 0; i < config->desc.bNumInterfaces; i++) {
|
||||
struct usb_interface *intf = config->interface[i];
|
||||
|
|
|
@ -362,7 +362,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
|
|||
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
|
||||
dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
|
||||
/* ep0 maxpacket comes later, from device descriptor */
|
||||
usb_enable_endpoint(dev, &dev->ep0, true);
|
||||
usb_enable_endpoint(dev, &dev->ep0, false);
|
||||
dev->can_submit = 1;
|
||||
|
||||
/* Save readable and stable topology id, distinguishing devices
|
||||
|
|
|
@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
|
|||
strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
|
||||
}
|
||||
|
||||
static u32 eth_get_link(struct net_device *net)
|
||||
{
|
||||
struct eth_dev *dev = netdev_priv(net);
|
||||
return dev->gadget->speed != USB_SPEED_UNKNOWN;
|
||||
}
|
||||
|
||||
/* REVISIT can also support:
|
||||
* - WOL (by tracking suspends and issuing remote wakeup)
|
||||
* - msglevel (implies updated messaging)
|
||||
|
@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net)
|
|||
|
||||
static struct ethtool_ops ops = {
|
||||
.get_drvinfo = eth_get_drvinfo,
|
||||
.get_link = eth_get_link
|
||||
.get_link = ethtool_op_get_link,
|
||||
};
|
||||
|
||||
static void defer_kevent(struct eth_dev *dev, int flag)
|
||||
|
|
|
@ -122,7 +122,8 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
|
|||
process_inactive_qtd(whc, qset, td);
|
||||
}
|
||||
|
||||
update |= qset_add_qtds(whc, qset);
|
||||
if (!qset->remove)
|
||||
update |= qset_add_qtds(whc, qset);
|
||||
|
||||
done:
|
||||
/*
|
||||
|
@ -254,23 +255,29 @@ int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
|
|||
|
||||
spin_lock_irqsave(&whc->lock, flags);
|
||||
|
||||
err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
|
||||
if (err < 0) {
|
||||
spin_unlock_irqrestore(&whc->lock, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
qset = get_qset(whc, urb, GFP_ATOMIC);
|
||||
if (qset == NULL)
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
|
||||
if (!err) {
|
||||
usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
|
||||
if (!qset->in_sw_list)
|
||||
asl_qset_insert_begin(whc, qset);
|
||||
}
|
||||
} else
|
||||
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
|
||||
|
||||
spin_unlock_irqrestore(&whc->lock, flags);
|
||||
|
||||
if (!err)
|
||||
queue_work(whc->workqueue, &whc->async_work);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -186,6 +186,28 @@ static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
|
|||
}
|
||||
}
|
||||
|
||||
static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
|
||||
struct usb_host_endpoint *ep)
|
||||
{
|
||||
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
|
||||
struct whc *whc = wusbhc_to_whc(wusbhc);
|
||||
struct whc_qset *qset;
|
||||
|
||||
qset = ep->hcpriv;
|
||||
if (qset) {
|
||||
qset->remove = 1;
|
||||
|
||||
if (usb_endpoint_xfer_bulk(&ep->desc)
|
||||
|| usb_endpoint_xfer_control(&ep->desc))
|
||||
queue_work(whc->workqueue, &whc->async_work);
|
||||
else
|
||||
queue_work(whc->workqueue, &whc->periodic_work);
|
||||
|
||||
qset_reset(whc, qset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static struct hc_driver whc_hc_driver = {
|
||||
.description = "whci-hcd",
|
||||
.product_desc = "Wireless host controller",
|
||||
|
@ -200,6 +222,7 @@ static struct hc_driver whc_hc_driver = {
|
|||
.urb_enqueue = whc_urb_enqueue,
|
||||
.urb_dequeue = whc_urb_dequeue,
|
||||
.endpoint_disable = whc_endpoint_disable,
|
||||
.endpoint_reset = whc_endpoint_reset,
|
||||
|
||||
.hub_status_data = wusbhc_rh_status_data,
|
||||
.hub_control = wusbhc_rh_control,
|
||||
|
|
|
@ -128,7 +128,8 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
|
|||
process_inactive_qtd(whc, qset, td);
|
||||
}
|
||||
|
||||
update |= qset_add_qtds(whc, qset);
|
||||
if (!qset->remove)
|
||||
update |= qset_add_qtds(whc, qset);
|
||||
|
||||
done:
|
||||
/*
|
||||
|
@ -282,23 +283,29 @@ int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
|
|||
|
||||
spin_lock_irqsave(&whc->lock, flags);
|
||||
|
||||
err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
|
||||
if (err < 0) {
|
||||
spin_unlock_irqrestore(&whc->lock, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
qset = get_qset(whc, urb, GFP_ATOMIC);
|
||||
if (qset == NULL)
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
|
||||
if (!err) {
|
||||
usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
|
||||
if (!qset->in_sw_list)
|
||||
qset_insert_in_sw_list(whc, qset);
|
||||
}
|
||||
} else
|
||||
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
|
||||
|
||||
spin_unlock_irqrestore(&whc->lock, flags);
|
||||
|
||||
if (!err)
|
||||
queue_work(whc->workqueue, &whc->periodic_work);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -353,7 +360,6 @@ void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
|
|||
qset_delete(whc, qset);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* pzl_init - initialize the periodic zone list
|
||||
* @whc: the WHCI host controller
|
||||
|
|
|
@ -89,11 +89,16 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
|
|||
QH_INFO3_TX_RATE_53_3
|
||||
| QH_INFO3_TX_PWR(0) /* 0 == max power */
|
||||
);
|
||||
|
||||
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* qset_clear - clear fields in a qset so it may be reinserted into a
|
||||
* schedule
|
||||
* schedule.
|
||||
*
|
||||
* The sequence number and current window are not cleared (see
|
||||
* qset_reset()).
|
||||
*/
|
||||
void qset_clear(struct whc *whc, struct whc_qset *qset)
|
||||
{
|
||||
|
@ -101,9 +106,8 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
|
|||
qset->remove = 0;
|
||||
|
||||
qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
|
||||
qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
|
||||
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
|
||||
qset->qh.err_count = 0;
|
||||
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
|
||||
qset->qh.scratch[0] = 0;
|
||||
qset->qh.scratch[1] = 0;
|
||||
qset->qh.scratch[2] = 0;
|
||||
|
@ -113,6 +117,20 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
|
|||
init_completion(&qset->remove_complete);
|
||||
}
|
||||
|
||||
/**
|
||||
* qset_reset - reset endpoint state in a qset.
|
||||
*
|
||||
* Clears the sequence number and current window. This qset must not
|
||||
* be in the ASL or PZL.
|
||||
*/
|
||||
void qset_reset(struct whc *whc, struct whc_qset *qset)
|
||||
{
|
||||
wait_for_completion(&qset->remove_complete);
|
||||
|
||||
qset->qh.status &= ~QH_STATUS_SEQ_MASK;
|
||||
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_qset - get the qset for an async endpoint
|
||||
*
|
||||
|
|
|
@ -184,6 +184,7 @@ void qset_free(struct whc *whc, struct whc_qset *qset);
|
|||
struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
|
||||
void qset_delete(struct whc *whc, struct whc_qset *qset);
|
||||
void qset_clear(struct whc *whc, struct whc_qset *qset);
|
||||
void qset_reset(struct whc *whc, struct whc_qset *qset);
|
||||
int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
|
||||
gfp_t mem_flags);
|
||||
void qset_free_std(struct whc *whc, struct whc_std *std);
|
||||
|
|
|
@ -185,6 +185,7 @@ struct whc_qhead {
|
|||
#define QH_STATUS_FLOW_CTRL (1 << 15)
|
||||
#define QH_STATUS_ICUR(i) ((i) << 5)
|
||||
#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
|
||||
#define QH_STATUS_SEQ_MASK 0x1f
|
||||
|
||||
/**
|
||||
* usb_pipe_to_qh_type - USB core pipe type to QH transfer type
|
||||
|
|
|
@ -579,6 +579,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
|
|||
* trigger the "send a ZLP?" confusion.
|
||||
*/
|
||||
rndis = (maxpacket & 0x3f) == 0
|
||||
&& length > maxpacket
|
||||
&& length < 0xffff
|
||||
&& (length % maxpacket) != 0;
|
||||
|
||||
|
@ -1228,27 +1229,7 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
|
|||
|
||||
hw_ep = tx_ch->hw_ep;
|
||||
|
||||
/* Peripheral role never repurposes the
|
||||
* endpoint, so immediate completion is
|
||||
* safe. Host role waits for the fifo
|
||||
* to empty (TXPKTRDY irq) before going
|
||||
* to the next queued bulk transfer.
|
||||
*/
|
||||
if (is_host_active(cppi->musb)) {
|
||||
#if 0
|
||||
/* WORKAROUND because we may
|
||||
* not always get TXKPTRDY ...
|
||||
*/
|
||||
int csr;
|
||||
|
||||
csr = musb_readw(hw_ep->regs,
|
||||
MUSB_TXCSR);
|
||||
if (csr & MUSB_TXCSR_TXPKTRDY)
|
||||
#endif
|
||||
completed = false;
|
||||
}
|
||||
if (completed)
|
||||
musb_dma_completion(musb, index + 1, 1);
|
||||
musb_dma_completion(musb, index + 1, 1);
|
||||
|
||||
} else {
|
||||
/* Bigger transfer than we could fit in
|
||||
|
|
|
@ -2170,26 +2170,22 @@ static int musb_suspend(struct platform_device *pdev, pm_message_t message)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int musb_resume(struct platform_device *pdev)
|
||||
static int musb_resume_early(struct platform_device *pdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct musb *musb = dev_to_musb(&pdev->dev);
|
||||
|
||||
if (!musb->clock)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&musb->lock, flags);
|
||||
|
||||
if (musb->set_clock)
|
||||
musb->set_clock(musb->clock, 1);
|
||||
else
|
||||
clk_enable(musb->clock);
|
||||
|
||||
/* for static cmos like DaVinci, register values were preserved
|
||||
* unless for some reason the whole soc powered down and we're
|
||||
* not treating that as a whole-system restart (e.g. swsusp)
|
||||
* unless for some reason the whole soc powered down or the USB
|
||||
* module got reset through the PSC (vs just being disabled).
|
||||
*/
|
||||
spin_unlock_irqrestore(&musb->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2207,7 +2203,7 @@ static struct platform_driver musb_driver = {
|
|||
.remove = __devexit_p(musb_remove),
|
||||
.shutdown = musb_shutdown,
|
||||
.suspend = musb_suspend,
|
||||
.resume = musb_resume,
|
||||
.resume_early = musb_resume_early,
|
||||
};
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
|
|
@ -165,9 +165,15 @@ static void nuke(struct musb_ep *ep, const int status)
|
|||
if (is_dma_capable() && ep->dma) {
|
||||
struct dma_controller *c = ep->musb->dma_controller;
|
||||
int value;
|
||||
|
||||
if (ep->is_in) {
|
||||
/*
|
||||
* The programming guide says that we must not clear
|
||||
* the DMAMODE bit before DMAENAB, so we only
|
||||
* clear it in the second write...
|
||||
*/
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
0 | MUSB_TXCSR_FLUSHFIFO);
|
||||
MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
0 | MUSB_TXCSR_FLUSHFIFO);
|
||||
} else {
|
||||
|
@ -230,7 +236,7 @@ static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
|
|||
| IN token(s) are recd from Host.
|
||||
| -> DMA interrupt on completion
|
||||
| calls TxAvail.
|
||||
| -> stop DMA, ~DmaEenab,
|
||||
| -> stop DMA, ~DMAENAB,
|
||||
| -> set TxPktRdy for last short pkt or zlp
|
||||
| -> Complete Request
|
||||
| -> Continue next request (call txstate)
|
||||
|
@ -315,9 +321,17 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
request->dma, request_size);
|
||||
if (use_dma) {
|
||||
if (musb_ep->dma->desired_mode == 0) {
|
||||
/* ASSERT: DMAENAB is clear */
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET |
|
||||
MUSB_TXCSR_DMAMODE);
|
||||
/*
|
||||
* We must not clear the DMAMODE bit
|
||||
* before the DMAENAB bit -- and the
|
||||
* latter doesn't always get cleared
|
||||
* before we get here...
|
||||
*/
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAENAB);
|
||||
musb_writew(epio, MUSB_TXCSR, csr
|
||||
| MUSB_TXCSR_P_WZC_BITS);
|
||||
csr &= ~MUSB_TXCSR_DMAMODE;
|
||||
csr |= (MUSB_TXCSR_DMAENAB |
|
||||
MUSB_TXCSR_MODE);
|
||||
/* against programming guide */
|
||||
|
@ -334,10 +348,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
|
||||
#elif defined(CONFIG_USB_TI_CPPI_DMA)
|
||||
/* program endpoint CSR first, then setup DMA */
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAMODE
|
||||
| MUSB_TXCSR_P_UNDERRUN
|
||||
| MUSB_TXCSR_TXPKTRDY);
|
||||
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
|
||||
csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
|
||||
|
@ -364,8 +375,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|||
if (!use_dma) {
|
||||
c->channel_release(musb_ep->dma);
|
||||
musb_ep->dma = NULL;
|
||||
/* ASSERT: DMAENAB clear */
|
||||
csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
|
||||
csr &= ~MUSB_TXCSR_DMAENAB;
|
||||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
/* invariant: prequest->buf is non-null */
|
||||
}
|
||||
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright 2005 Mentor Graphics Corporation
|
||||
* Copyright (C) 2005-2006 by Texas Instruments
|
||||
* Copyright (C) 2006-2007 Nokia Corporation
|
||||
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
|
@ -96,8 +97,8 @@
|
|||
|
||||
|
||||
static void musb_ep_program(struct musb *musb, u8 epnum,
|
||||
struct urb *urb, unsigned int nOut,
|
||||
u8 *buf, u32 len);
|
||||
struct urb *urb, int is_out,
|
||||
u8 *buf, u32 offset, u32 len);
|
||||
|
||||
/*
|
||||
* Clear TX fifo. Needed to avoid BABBLE errors.
|
||||
|
@ -125,6 +126,29 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
|
|||
}
|
||||
}
|
||||
|
||||
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
|
||||
{
|
||||
void __iomem *epio = ep->regs;
|
||||
u16 csr;
|
||||
int retries = 5;
|
||||
|
||||
/* scrub any data left in the fifo */
|
||||
do {
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
|
||||
break;
|
||||
musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
udelay(10);
|
||||
} while (--retries);
|
||||
|
||||
WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
|
||||
ep->epnum, csr);
|
||||
|
||||
/* and reset for the next transfer */
|
||||
musb_writew(epio, MUSB_TXCSR, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start transmit. Caller is responsible for locking shared resources.
|
||||
* musb must be locked.
|
||||
|
@ -145,13 +169,15 @@ static inline void musb_h_tx_start(struct musb_hw_ep *ep)
|
|||
|
||||
}
|
||||
|
||||
static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
|
||||
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
|
||||
{
|
||||
u16 txcsr;
|
||||
|
||||
/* NOTE: no locks here; caller should lock and select EP */
|
||||
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
|
||||
txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
|
||||
if (is_cppi_enabled())
|
||||
txcsr |= MUSB_TXCSR_DMAMODE;
|
||||
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
|
||||
}
|
||||
|
||||
|
@ -166,9 +192,10 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
|
|||
{
|
||||
u16 frame;
|
||||
u32 len;
|
||||
void *buf;
|
||||
void __iomem *mbase = musb->mregs;
|
||||
struct urb *urb = next_urb(qh);
|
||||
void *buf = urb->transfer_buffer;
|
||||
u32 offset = 0;
|
||||
struct musb_hw_ep *hw_ep = qh->hw_ep;
|
||||
unsigned pipe = urb->pipe;
|
||||
u8 address = usb_pipedevice(pipe);
|
||||
|
@ -191,7 +218,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
|
|||
case USB_ENDPOINT_XFER_ISOC:
|
||||
qh->iso_idx = 0;
|
||||
qh->frame = 0;
|
||||
buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
|
||||
offset = urb->iso_frame_desc[0].offset;
|
||||
len = urb->iso_frame_desc[0].length;
|
||||
break;
|
||||
default: /* bulk, interrupt */
|
||||
|
@ -209,14 +236,14 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
|
|||
case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
|
||||
default: s = "-intr"; break;
|
||||
}; s; }),
|
||||
epnum, buf, len);
|
||||
epnum, buf + offset, len);
|
||||
|
||||
/* Configure endpoint */
|
||||
if (is_in || hw_ep->is_shared_fifo)
|
||||
hw_ep->in_qh = qh;
|
||||
else
|
||||
hw_ep->out_qh = qh;
|
||||
musb_ep_program(musb, epnum, urb, !is_in, buf, len);
|
||||
musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
|
||||
|
||||
/* transmit may have more work: start it when it is time */
|
||||
if (is_in)
|
||||
|
@ -227,7 +254,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
|
|||
case USB_ENDPOINT_XFER_ISOC:
|
||||
case USB_ENDPOINT_XFER_INT:
|
||||
DBG(3, "check whether there's still time for periodic Tx\n");
|
||||
qh->iso_idx = 0;
|
||||
frame = musb_readw(mbase, MUSB_FRAME);
|
||||
/* FIXME this doesn't implement that scheduling policy ...
|
||||
* or handle framecounter wrapping
|
||||
|
@ -256,7 +282,7 @@ start:
|
|||
if (!hw_ep->tx_channel)
|
||||
musb_h_tx_start(hw_ep);
|
||||
else if (is_cppi_enabled() || tusb_dma_omap())
|
||||
cppi_host_txdma_start(hw_ep);
|
||||
musb_h_tx_dma_start(hw_ep);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -567,10 +593,17 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
|
|||
csr = musb_readw(ep->regs, MUSB_TXCSR);
|
||||
if (csr & MUSB_TXCSR_MODE) {
|
||||
musb_h_tx_flush_fifo(ep);
|
||||
csr = musb_readw(ep->regs, MUSB_TXCSR);
|
||||
musb_writew(ep->regs, MUSB_TXCSR,
|
||||
MUSB_TXCSR_FRCDATATOG);
|
||||
csr | MUSB_TXCSR_FRCDATATOG);
|
||||
}
|
||||
/* clear mode (and everything else) to enable Rx */
|
||||
|
||||
/*
|
||||
* Clear the MODE bit (and everything else) to enable Rx.
|
||||
* NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
|
||||
*/
|
||||
if (csr & MUSB_TXCSR_DMAMODE)
|
||||
musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
|
||||
musb_writew(ep->regs, MUSB_TXCSR, 0);
|
||||
|
||||
/* scrub all previous state, clearing toggle */
|
||||
|
@ -601,14 +634,68 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
|
|||
ep->rx_reinit = 0;
|
||||
}
|
||||
|
||||
static bool musb_tx_dma_program(struct dma_controller *dma,
|
||||
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
|
||||
struct urb *urb, u32 offset, u32 length)
|
||||
{
|
||||
struct dma_channel *channel = hw_ep->tx_channel;
|
||||
void __iomem *epio = hw_ep->regs;
|
||||
u16 pkt_size = qh->maxpacket;
|
||||
u16 csr;
|
||||
u8 mode;
|
||||
|
||||
#ifdef CONFIG_USB_INVENTRA_DMA
|
||||
if (length > channel->max_len)
|
||||
length = channel->max_len;
|
||||
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
if (length > pkt_size) {
|
||||
mode = 1;
|
||||
csr |= MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAMODE
|
||||
| MUSB_TXCSR_DMAENAB;
|
||||
} else {
|
||||
mode = 0;
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
|
||||
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
|
||||
}
|
||||
channel->desired_mode = mode;
|
||||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
#else
|
||||
if (!is_cppi_enabled() && !tusb_dma_omap())
|
||||
return false;
|
||||
|
||||
channel->actual_len = 0;
|
||||
|
||||
/*
|
||||
* TX uses "RNDIS" mode automatically but needs help
|
||||
* to identify the zero-length-final-packet case.
|
||||
*/
|
||||
mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
|
||||
#endif
|
||||
|
||||
qh->segsize = length;
|
||||
|
||||
if (!dma->channel_program(channel, pkt_size, mode,
|
||||
urb->transfer_dma + offset, length)) {
|
||||
dma->channel_release(channel);
|
||||
hw_ep->tx_channel = NULL;
|
||||
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
|
||||
musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Program an HDRC endpoint as per the given URB
|
||||
* Context: irqs blocked, controller lock held
|
||||
*/
|
||||
static void musb_ep_program(struct musb *musb, u8 epnum,
|
||||
struct urb *urb, unsigned int is_out,
|
||||
u8 *buf, u32 len)
|
||||
struct urb *urb, int is_out,
|
||||
u8 *buf, u32 offset, u32 len)
|
||||
{
|
||||
struct dma_controller *dma_controller;
|
||||
struct dma_channel *dma_channel;
|
||||
|
@ -667,12 +754,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
|
|||
|
||||
/* general endpoint setup */
|
||||
if (epnum) {
|
||||
/* ASSERT: TXCSR_DMAENAB was already cleared */
|
||||
|
||||
/* flush all old state, set default */
|
||||
musb_h_tx_flush_fifo(hw_ep);
|
||||
|
||||
/*
|
||||
* We must not clear the DMAMODE bit before or in
|
||||
* the same cycle with the DMAENAB bit, so we clear
|
||||
* the latter first...
|
||||
*/
|
||||
csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
|
||||
| MUSB_TXCSR_DMAMODE
|
||||
| MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAENAB
|
||||
| MUSB_TXCSR_FRCDATATOG
|
||||
| MUSB_TXCSR_H_RXSTALL
|
||||
| MUSB_TXCSR_H_ERROR
|
||||
|
@ -680,24 +772,20 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
|
|||
);
|
||||
csr |= MUSB_TXCSR_MODE;
|
||||
|
||||
if (usb_gettoggle(urb->dev,
|
||||
qh->epnum, 1))
|
||||
if (usb_gettoggle(urb->dev, qh->epnum, 1))
|
||||
csr |= MUSB_TXCSR_H_WR_DATATOGGLE
|
||||
| MUSB_TXCSR_H_DATATOGGLE;
|
||||
else
|
||||
csr |= MUSB_TXCSR_CLRDATATOG;
|
||||
|
||||
/* twice in case of double packet buffering */
|
||||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
/* REVISIT may need to clear FLUSHFIFO ... */
|
||||
csr &= ~MUSB_TXCSR_DMAMODE;
|
||||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
} else {
|
||||
/* endpoint 0: just flush */
|
||||
musb_writew(epio, MUSB_CSR0,
|
||||
csr | MUSB_CSR0_FLUSHFIFO);
|
||||
musb_writew(epio, MUSB_CSR0,
|
||||
csr | MUSB_CSR0_FLUSHFIFO);
|
||||
musb_h_ep0_flush_fifo(hw_ep);
|
||||
}
|
||||
|
||||
/* target addr and (for multipoint) hub addr/port */
|
||||
|
@ -734,113 +822,14 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
|
|||
else
|
||||
load_count = min((u32) packet_sz, len);
|
||||
|
||||
#ifdef CONFIG_USB_INVENTRA_DMA
|
||||
if (dma_channel) {
|
||||
|
||||
/* clear previous state */
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAMODE
|
||||
| MUSB_TXCSR_DMAENAB);
|
||||
csr |= MUSB_TXCSR_MODE;
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
csr | MUSB_TXCSR_MODE);
|
||||
|
||||
qh->segsize = min(len, dma_channel->max_len);
|
||||
|
||||
if (qh->segsize <= packet_sz)
|
||||
dma_channel->desired_mode = 0;
|
||||
else
|
||||
dma_channel->desired_mode = 1;
|
||||
|
||||
|
||||
if (dma_channel->desired_mode == 0) {
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAMODE);
|
||||
csr |= (MUSB_TXCSR_DMAENAB);
|
||||
/* against programming guide */
|
||||
} else
|
||||
csr |= (MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAENAB
|
||||
| MUSB_TXCSR_DMAMODE);
|
||||
|
||||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
|
||||
dma_ok = dma_controller->channel_program(
|
||||
dma_channel, packet_sz,
|
||||
dma_channel->desired_mode,
|
||||
urb->transfer_dma,
|
||||
qh->segsize);
|
||||
if (dma_ok) {
|
||||
load_count = 0;
|
||||
} else {
|
||||
dma_controller->channel_release(dma_channel);
|
||||
if (is_out)
|
||||
hw_ep->tx_channel = NULL;
|
||||
else
|
||||
hw_ep->rx_channel = NULL;
|
||||
dma_channel = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* candidate for DMA */
|
||||
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
|
||||
|
||||
/* program endpoint CSRs first, then setup DMA.
|
||||
* assume CPPI setup succeeds.
|
||||
* defer enabling dma.
|
||||
*/
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET
|
||||
| MUSB_TXCSR_DMAMODE
|
||||
| MUSB_TXCSR_DMAENAB);
|
||||
csr |= MUSB_TXCSR_MODE;
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
csr | MUSB_TXCSR_MODE);
|
||||
|
||||
dma_channel->actual_len = 0L;
|
||||
qh->segsize = len;
|
||||
|
||||
/* TX uses "rndis" mode automatically, but needs help
|
||||
* to identify the zero-length-final-packet case.
|
||||
*/
|
||||
dma_ok = dma_controller->channel_program(
|
||||
dma_channel, packet_sz,
|
||||
(urb->transfer_flags
|
||||
& URB_ZERO_PACKET)
|
||||
== URB_ZERO_PACKET,
|
||||
urb->transfer_dma,
|
||||
qh->segsize);
|
||||
if (dma_ok) {
|
||||
load_count = 0;
|
||||
} else {
|
||||
dma_controller->channel_release(dma_channel);
|
||||
hw_ep->tx_channel = NULL;
|
||||
dma_channel = NULL;
|
||||
|
||||
/* REVISIT there's an error path here that
|
||||
* needs handling: can't do dma, but
|
||||
* there's no pio buffer address...
|
||||
*/
|
||||
}
|
||||
}
|
||||
if (dma_channel && musb_tx_dma_program(dma_controller,
|
||||
hw_ep, qh, urb, offset, len))
|
||||
load_count = 0;
|
||||
|
||||
if (load_count) {
|
||||
/* ASSERT: TXCSR_DMAENAB was already cleared */
|
||||
|
||||
/* PIO to load FIFO */
|
||||
qh->segsize = load_count;
|
||||
musb_write_fifo(hw_ep, load_count, buf);
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
csr &= ~(MUSB_TXCSR_DMAENAB
|
||||
| MUSB_TXCSR_DMAMODE
|
||||
| MUSB_TXCSR_AUTOSET);
|
||||
/* write CSR */
|
||||
csr |= MUSB_TXCSR_MODE;
|
||||
|
||||
if (epnum)
|
||||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
}
|
||||
|
||||
/* re-enable interrupt */
|
||||
|
@ -895,7 +884,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
|
|||
dma_channel, packet_sz,
|
||||
!(urb->transfer_flags
|
||||
& URB_SHORT_NOT_OK),
|
||||
urb->transfer_dma,
|
||||
urb->transfer_dma + offset,
|
||||
qh->segsize);
|
||||
if (!dma_ok) {
|
||||
dma_controller->channel_release(
|
||||
|
@ -1063,11 +1052,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
|
|||
csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
|
||||
musb_writew(epio, MUSB_CSR0, csr);
|
||||
} else {
|
||||
csr |= MUSB_CSR0_FLUSHFIFO;
|
||||
musb_writew(epio, MUSB_CSR0, csr);
|
||||
musb_writew(epio, MUSB_CSR0, csr);
|
||||
csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
|
||||
musb_writew(epio, MUSB_CSR0, csr);
|
||||
musb_h_ep0_flush_fifo(hw_ep);
|
||||
}
|
||||
|
||||
musb_writeb(epio, MUSB_NAKLIMIT0, 0);
|
||||
|
@ -1081,10 +1066,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
|
|||
* SHOULD NEVER HAPPEN! */
|
||||
ERR("no URB for end 0\n");
|
||||
|
||||
musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
|
||||
musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
|
||||
musb_writew(epio, MUSB_CSR0, 0);
|
||||
|
||||
musb_h_ep0_flush_fifo(hw_ep);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -1145,8 +1127,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|||
int pipe;
|
||||
bool done = false;
|
||||
u16 tx_csr;
|
||||
size_t wLength = 0;
|
||||
u8 *buf = NULL;
|
||||
size_t length = 0;
|
||||
size_t offset = 0;
|
||||
struct urb *urb;
|
||||
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
|
||||
void __iomem *epio = hw_ep->regs;
|
||||
|
@ -1164,7 +1146,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|||
/* with CPPI, DMA sometimes triggers "extra" irqs */
|
||||
if (!urb) {
|
||||
DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
|
||||
goto finish;
|
||||
return;
|
||||
}
|
||||
|
||||
pipe = urb->pipe;
|
||||
|
@ -1201,7 +1183,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|||
musb_writew(epio, MUSB_TXCSR,
|
||||
MUSB_TXCSR_H_WZC_BITS
|
||||
| MUSB_TXCSR_TXPKTRDY);
|
||||
goto finish;
|
||||
return;
|
||||
}
|
||||
|
||||
if (status) {
|
||||
|
@ -1233,29 +1215,89 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|||
/* second cppi case */
|
||||
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
|
||||
DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
|
||||
goto finish;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_dma_capable() && dma && !status) {
|
||||
/*
|
||||
* DMA has completed. But if we're using DMA mode 1 (multi
|
||||
* packet DMA), we need a terminal TXPKTRDY interrupt before
|
||||
* we can consider this transfer completed, lest we trash
|
||||
* its last packet when writing the next URB's data. So we
|
||||
* switch back to mode 0 to get that interrupt; we'll come
|
||||
* back here once it happens.
|
||||
*/
|
||||
if (tx_csr & MUSB_TXCSR_DMAMODE) {
|
||||
/*
|
||||
* We shouldn't clear DMAMODE with DMAENAB set; so
|
||||
* clear them in a safe order. That should be OK
|
||||
* once TXPKTRDY has been set (and I've never seen
|
||||
* it being 0 at this moment -- DMA interrupt latency
|
||||
* is significant) but if it hasn't been then we have
|
||||
* no choice but to stop being polite and ignore the
|
||||
* programmer's guide... :-)
|
||||
*
|
||||
* Note that we must write TXCSR with TXPKTRDY cleared
|
||||
* in order not to re-trigger the packet send (this bit
|
||||
* can't be cleared by CPU), and there's another caveat:
|
||||
* TXPKTRDY may be set shortly and then cleared in the
|
||||
* double-buffered FIFO mode, so we do an extra TXCSR
|
||||
* read for debouncing...
|
||||
*/
|
||||
tx_csr &= musb_readw(epio, MUSB_TXCSR);
|
||||
if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
|
||||
tx_csr &= ~(MUSB_TXCSR_DMAENAB |
|
||||
MUSB_TXCSR_TXPKTRDY);
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
tx_csr | MUSB_TXCSR_H_WZC_BITS);
|
||||
}
|
||||
tx_csr &= ~(MUSB_TXCSR_DMAMODE |
|
||||
MUSB_TXCSR_TXPKTRDY);
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
tx_csr | MUSB_TXCSR_H_WZC_BITS);
|
||||
|
||||
/*
|
||||
* There is no guarantee that we'll get an interrupt
|
||||
* after clearing DMAMODE as we might have done this
|
||||
* too late (after TXPKTRDY was cleared by controller).
|
||||
* Re-read TXCSR as we have spoiled its previous value.
|
||||
*/
|
||||
tx_csr = musb_readw(epio, MUSB_TXCSR);
|
||||
}
|
||||
|
||||
/*
|
||||
* We may get here from a DMA completion or TXPKTRDY interrupt.
|
||||
* In any case, we must check the FIFO status here and bail out
|
||||
* only if the FIFO still has data -- that should prevent the
|
||||
* "missed" TXPKTRDY interrupts and deal with double-buffered
|
||||
* FIFO mode too...
|
||||
*/
|
||||
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
|
||||
DBG(2, "DMA complete but packet still in FIFO, "
|
||||
"CSR %04x\n", tx_csr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* REVISIT this looks wrong... */
|
||||
if (!status || dma || usb_pipeisoc(pipe)) {
|
||||
if (dma)
|
||||
wLength = dma->actual_len;
|
||||
length = dma->actual_len;
|
||||
else
|
||||
wLength = qh->segsize;
|
||||
qh->offset += wLength;
|
||||
length = qh->segsize;
|
||||
qh->offset += length;
|
||||
|
||||
if (usb_pipeisoc(pipe)) {
|
||||
struct usb_iso_packet_descriptor *d;
|
||||
|
||||
d = urb->iso_frame_desc + qh->iso_idx;
|
||||
d->actual_length = qh->segsize;
|
||||
d->actual_length = length;
|
||||
d->status = status;
|
||||
if (++qh->iso_idx >= urb->number_of_packets) {
|
||||
done = true;
|
||||
} else {
|
||||
d++;
|
||||
buf = urb->transfer_buffer + d->offset;
|
||||
wLength = d->length;
|
||||
offset = d->offset;
|
||||
length = d->length;
|
||||
}
|
||||
} else if (dma) {
|
||||
done = true;
|
||||
|
@ -1268,10 +1310,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|||
& URB_ZERO_PACKET))
|
||||
done = true;
|
||||
if (!done) {
|
||||
buf = urb->transfer_buffer
|
||||
+ qh->offset;
|
||||
wLength = urb->transfer_buffer_length
|
||||
- qh->offset;
|
||||
offset = qh->offset;
|
||||
length = urb->transfer_buffer_length - offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1290,28 +1330,31 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|||
urb->status = status;
|
||||
urb->actual_length = qh->offset;
|
||||
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
|
||||
return;
|
||||
} else if (usb_pipeisoc(pipe) && dma) {
|
||||
if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
|
||||
offset, length))
|
||||
return;
|
||||
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
|
||||
DBG(1, "not complete, but DMA enabled?\n");
|
||||
return;
|
||||
}
|
||||
|
||||
} else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
|
||||
/* WARN_ON(!buf); */
|
||||
/*
|
||||
* PIO: start next packet in this URB.
|
||||
*
|
||||
* REVISIT: some docs say that when hw_ep->tx_double_buffered,
|
||||
* (and presumably, FIFO is not half-full) we should write *two*
|
||||
* packets before updating TXCSR; other docs disagree...
|
||||
*/
|
||||
if (length > qh->maxpacket)
|
||||
length = qh->maxpacket;
|
||||
musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
|
||||
qh->segsize = length;
|
||||
|
||||
/* REVISIT: some docs say that when hw_ep->tx_double_buffered,
|
||||
* (and presumably, fifo is not half-full) we should write TWO
|
||||
* packets before updating TXCSR ... other docs disagree ...
|
||||
*/
|
||||
/* PIO: start next packet in this URB */
|
||||
if (wLength > qh->maxpacket)
|
||||
wLength = qh->maxpacket;
|
||||
musb_write_fifo(hw_ep, wLength, buf);
|
||||
qh->segsize = wLength;
|
||||
|
||||
musb_ep_select(mbase, epnum);
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
|
||||
} else
|
||||
DBG(1, "not complete, but dma enabled?\n");
|
||||
|
||||
finish:
|
||||
return;
|
||||
musb_ep_select(mbase, epnum);
|
||||
musb_writew(epio, MUSB_TXCSR,
|
||||
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1841,7 +1884,7 @@ static int musb_urb_enqueue(
|
|||
unsigned long flags;
|
||||
struct musb *musb = hcd_to_musb(hcd);
|
||||
struct usb_host_endpoint *hep = urb->ep;
|
||||
struct musb_qh *qh = hep->hcpriv;
|
||||
struct musb_qh *qh;
|
||||
struct usb_endpoint_descriptor *epd = &hep->desc;
|
||||
int ret;
|
||||
unsigned type_reg;
|
||||
|
@ -1853,22 +1896,21 @@ static int musb_urb_enqueue(
|
|||
|
||||
spin_lock_irqsave(&musb->lock, flags);
|
||||
ret = usb_hcd_link_urb_to_ep(hcd, urb);
|
||||
qh = ret ? NULL : hep->hcpriv;
|
||||
if (qh)
|
||||
urb->hcpriv = qh;
|
||||
spin_unlock_irqrestore(&musb->lock, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* DMA mapping was already done, if needed, and this urb is on
|
||||
* hep->urb_list ... so there's little to do unless hep wasn't
|
||||
* yet scheduled onto a live qh.
|
||||
* hep->urb_list now ... so we're done, unless hep wasn't yet
|
||||
* scheduled onto a live qh.
|
||||
*
|
||||
* REVISIT best to keep hep->hcpriv valid until the endpoint gets
|
||||
* disabled, testing for empty qh->ring and avoiding qh setup costs
|
||||
* except for the first urb queued after a config change.
|
||||
*/
|
||||
if (qh) {
|
||||
urb->hcpriv = qh;
|
||||
return 0;
|
||||
}
|
||||
if (qh || ret)
|
||||
return ret;
|
||||
|
||||
/* Allocate and initialize qh, minimizing the work done each time
|
||||
* hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
|
||||
|
@ -2044,7 +2086,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
|
|||
* endpoint's irq status here to avoid bogus irqs.
|
||||
* clearing that status is platform-specific...
|
||||
*/
|
||||
} else {
|
||||
} else if (ep->epnum) {
|
||||
musb_h_tx_flush_fifo(ep);
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET
|
||||
|
@ -2058,6 +2100,8 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
|
|||
musb_writew(epio, MUSB_TXCSR, csr);
|
||||
/* flush cpu writebuffer */
|
||||
csr = musb_readw(epio, MUSB_TXCSR);
|
||||
} else {
|
||||
musb_h_ep0_flush_fifo(ep);
|
||||
}
|
||||
if (status == 0)
|
||||
musb_advance_schedule(ep->musb, urb, ep, is_in);
|
||||
|
|
|
@ -195,30 +195,32 @@ static int dma_channel_abort(struct dma_channel *channel)
|
|||
void __iomem *mbase = musb_channel->controller->base;
|
||||
|
||||
u8 bchannel = musb_channel->idx;
|
||||
int offset;
|
||||
u16 csr;
|
||||
|
||||
if (channel->status == MUSB_DMA_STATUS_BUSY) {
|
||||
if (musb_channel->transmit) {
|
||||
offset = MUSB_EP_OFFSET(musb_channel->epnum,
|
||||
MUSB_TXCSR);
|
||||
|
||||
csr = musb_readw(mbase,
|
||||
MUSB_EP_OFFSET(musb_channel->epnum,
|
||||
MUSB_TXCSR));
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET |
|
||||
MUSB_TXCSR_DMAENAB |
|
||||
MUSB_TXCSR_DMAMODE);
|
||||
musb_writew(mbase,
|
||||
MUSB_EP_OFFSET(musb_channel->epnum, MUSB_TXCSR),
|
||||
csr);
|
||||
/*
|
||||
* The programming guide says that we must clear
|
||||
* the DMAENAB bit before the DMAMODE bit...
|
||||
*/
|
||||
csr = musb_readw(mbase, offset);
|
||||
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
|
||||
musb_writew(mbase, offset, csr);
|
||||
csr &= ~MUSB_TXCSR_DMAMODE;
|
||||
musb_writew(mbase, offset, csr);
|
||||
} else {
|
||||
csr = musb_readw(mbase,
|
||||
MUSB_EP_OFFSET(musb_channel->epnum,
|
||||
MUSB_RXCSR));
|
||||
offset = MUSB_EP_OFFSET(musb_channel->epnum,
|
||||
MUSB_RXCSR);
|
||||
|
||||
csr = musb_readw(mbase, offset);
|
||||
csr &= ~(MUSB_RXCSR_AUTOCLEAR |
|
||||
MUSB_RXCSR_DMAENAB |
|
||||
MUSB_RXCSR_DMAMODE);
|
||||
musb_writew(mbase,
|
||||
MUSB_EP_OFFSET(musb_channel->epnum, MUSB_RXCSR),
|
||||
csr);
|
||||
musb_writew(mbase, offset, csr);
|
||||
}
|
||||
|
||||
musb_writew(mbase,
|
||||
|
@ -296,20 +298,28 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
|
|||
&& ((channel->desired_mode == 0)
|
||||
|| (channel->actual_len &
|
||||
(musb_channel->max_packet_sz - 1)))
|
||||
) {
|
||||
) {
|
||||
u8 epnum = musb_channel->epnum;
|
||||
int offset = MUSB_EP_OFFSET(epnum,
|
||||
MUSB_TXCSR);
|
||||
u16 txcsr;
|
||||
|
||||
/*
|
||||
* The programming guide says that we
|
||||
* must clear DMAENAB before DMAMODE.
|
||||
*/
|
||||
musb_ep_select(mbase, epnum);
|
||||
txcsr = musb_readw(mbase, offset);
|
||||
txcsr &= ~(MUSB_TXCSR_DMAENAB
|
||||
| MUSB_TXCSR_AUTOSET);
|
||||
musb_writew(mbase, offset, txcsr);
|
||||
/* Send out the packet */
|
||||
musb_ep_select(mbase,
|
||||
musb_channel->epnum);
|
||||
musb_writew(mbase, MUSB_EP_OFFSET(
|
||||
musb_channel->epnum,
|
||||
MUSB_TXCSR),
|
||||
MUSB_TXCSR_TXPKTRDY);
|
||||
} else {
|
||||
musb_dma_completion(
|
||||
musb,
|
||||
musb_channel->epnum,
|
||||
musb_channel->transmit);
|
||||
txcsr &= ~MUSB_TXCSR_DMAMODE;
|
||||
txcsr |= MUSB_TXCSR_TXPKTRDY;
|
||||
musb_writew(mbase, offset, txcsr);
|
||||
}
|
||||
musb_dma_completion(musb, musb_channel->epnum,
|
||||
musb_channel->transmit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -668,6 +668,7 @@ static struct usb_device_id id_table_combined [] = {
|
|||
{ USB_DEVICE(DE_VID, WHT_PID) },
|
||||
{ USB_DEVICE(ADI_VID, ADI_GNICE_PID),
|
||||
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
||||
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
|
||||
{ }, /* Optional parameter entry */
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
|
|
@ -912,6 +912,13 @@
|
|||
#define ADI_VID 0x0456
|
||||
#define ADI_GNICE_PID 0xF000
|
||||
|
||||
/*
|
||||
* JETI SPECTROMETER SPECBOS 1201
|
||||
* http://www.jeti.com/products/sys/scb/scb1201.php
|
||||
*/
|
||||
#define JETI_VID 0x0c6c
|
||||
#define JETI_SPC1201_PID 0x04b2
|
||||
|
||||
/*
|
||||
* BmRequestType: 1100 0000b
|
||||
* bRequest: FTDI_E2_READ
|
||||
|
|
|
@ -25,6 +25,7 @@ static struct usb_device_id id_table [] = {
|
|||
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
|
||||
{ USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
|
||||
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
|
||||
{ USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, id_table);
|
||||
|
|
|
@ -300,6 +300,10 @@ static int option_resume(struct usb_serial *serial);
|
|||
#define BENQ_VENDOR_ID 0x04a5
|
||||
#define BENQ_PRODUCT_H10 0x4068
|
||||
|
||||
#define DLINK_VENDOR_ID 0x1186
|
||||
#define DLINK_PRODUCT_DWM_652 0x3e04
|
||||
|
||||
|
||||
static struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
|
||||
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
|
||||
|
@ -516,6 +520,7 @@ static struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
|
||||
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
|
||||
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
|
||||
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
|
||||
{ USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
|
|
@ -26,6 +26,27 @@ static struct usb_device_id id_table[] = {
|
|||
{USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
{USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
|
||||
{USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
|
||||
{USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
|
||||
{USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
|
||||
{USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
|
||||
{USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */
|
||||
{USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
|
||||
{USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
|
||||
{USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
|
||||
{USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
|
||||
{USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
|
||||
{USB_DEVICE(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
|
||||
{USB_DEVICE(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
|
||||
{USB_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
|
||||
{USB_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
|
||||
{USB_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
|
||||
{USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
|
||||
{USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
|
||||
{USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
|
||||
{USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
|
||||
{USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
|
||||
{USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
|
||||
{USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, id_table);
|
||||
|
|
|
@ -17,7 +17,8 @@ usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
|
|||
ifeq ($(CONFIG_USB_LIBUSUAL),)
|
||||
usb-storage-objs += usual-tables.o
|
||||
else
|
||||
obj-$(CONFIG_USB) += libusual.o usual-tables.o
|
||||
obj-$(CONFIG_USB) += usb-libusual.o
|
||||
usb-libusual-objs := libusual.o usual-tables.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_USB_STORAGE_ALAUDA) += ums-alauda.o
|
||||
|
|
|
@ -247,10 +247,8 @@ int usb_stor_clear_halt(struct us_data *us, unsigned int pipe)
|
|||
USB_ENDPOINT_HALT, endp,
|
||||
NULL, 0, 3*HZ);
|
||||
|
||||
/* reset the endpoint toggle */
|
||||
if (result >= 0)
|
||||
usb_settoggle(us->pusb_dev, usb_pipeendpoint(pipe),
|
||||
usb_pipeout(pipe), 0);
|
||||
usb_reset_endpoint(us->pusb_dev, endp);
|
||||
|
||||
US_DEBUGP("%s: result = %d\n", __func__, result);
|
||||
return result;
|
||||
|
|
|
@ -975,12 +975,14 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
|
|||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
|
||||
|
||||
/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
|
||||
/* Reported by Rauch Wolke <rauchwolke@gmx.net>
|
||||
* and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882)
|
||||
*/
|
||||
UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
|
||||
"Simple Tech/Datafab",
|
||||
"CF+SM Reader",
|
||||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
US_FL_IGNORE_RESIDUE ),
|
||||
US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
|
||||
|
||||
/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
|
||||
* to the USB storage specification in two ways:
|
||||
|
@ -1376,6 +1378,14 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
|
|||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
0),
|
||||
|
||||
/* Reported by Pascal Terjan <pterjan@mandriva.com>
|
||||
* Ignore driver CD mode and force into modem mode by default.
|
||||
*/
|
||||
UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
|
||||
"D-Link",
|
||||
"USB Mass Storage",
|
||||
US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 0),
|
||||
|
||||
/* Reported by Kevin Lloyd <linux@sierrawireless.com>
|
||||
* Entry is needed for the initializer function override,
|
||||
* which instructs the device to load as a modem
|
||||
|
|
|
@ -267,6 +267,8 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work)
|
|||
mutex_lock(&wusbhc->mutex);
|
||||
wusbhc_devconnect_acked(wusbhc, wusb_dev);
|
||||
mutex_unlock(&wusbhc->mutex);
|
||||
|
||||
wusb_dev_put(wusb_dev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -396,7 +398,8 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
|
|||
|
||||
/* After a device disconnects, change the GTK (see [WUSB]
|
||||
* section 6.2.11.2). */
|
||||
wusbhc_gtk_rekey(wusbhc);
|
||||
if (wusbhc->active)
|
||||
wusbhc_gtk_rekey(wusbhc);
|
||||
|
||||
/* The Wireless USB part has forgotten about the device already; now
|
||||
* khubd's timer will pick up the disconnection and remove the USB
|
||||
|
@ -1084,15 +1087,21 @@ error_mmcie_set:
|
|||
* wusbhc_devconnect_stop - stop managing connected devices
|
||||
* @wusbhc: the WUSB HC
|
||||
*
|
||||
* Removes the Host Info IE and stops the keep alives.
|
||||
*
|
||||
* FIXME: should this disconnect all devices?
|
||||
* Disconnects any devices still connected, stops the keep alives and
|
||||
* removes the Host Info IE.
|
||||
*/
|
||||
void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
|
||||
{
|
||||
cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
|
||||
WARN_ON(!list_empty(&wusbhc->cack_list));
|
||||
int i;
|
||||
|
||||
mutex_lock(&wusbhc->mutex);
|
||||
for (i = 0; i < wusbhc->ports_max; i++) {
|
||||
if (wusbhc->port[i].wusb_dev)
|
||||
__wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]);
|
||||
}
|
||||
mutex_unlock(&wusbhc->mutex);
|
||||
|
||||
cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
|
||||
wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
|
||||
kfree(wusbhc->wuie_host_info);
|
||||
wusbhc->wuie_host_info = NULL;
|
||||
|
|
|
@ -88,33 +88,31 @@ static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
|
|||
wusb_trust_timeout_store);
|
||||
|
||||
/*
|
||||
* Show & store the current WUSB CHID
|
||||
* Show the current WUSB CHID.
|
||||
*/
|
||||
static ssize_t wusb_chid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
|
||||
const struct wusb_ckhdid *chid;
|
||||
ssize_t result = 0;
|
||||
|
||||
if (wusbhc->wuie_host_info != NULL)
|
||||
result += ckhdid_printf(buf, PAGE_SIZE,
|
||||
&wusbhc->wuie_host_info->CHID);
|
||||
chid = &wusbhc->wuie_host_info->CHID;
|
||||
else
|
||||
chid = &wusb_ckhdid_zero;
|
||||
|
||||
result += ckhdid_printf(buf, PAGE_SIZE, chid);
|
||||
result += sprintf(buf + result, "\n");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store a new CHID
|
||||
* Store a new CHID.
|
||||
*
|
||||
* This will (FIXME) trigger many changes.
|
||||
*
|
||||
* - Send an all zeros CHID and it will stop the controller
|
||||
* - Send a non-zero CHID and it will start it
|
||||
* (unless it was started, it will just change the CHID,
|
||||
* diconnecting all devices first).
|
||||
*
|
||||
* So first we scan the MMC we are sent and then we act on it. We
|
||||
* read it in the same format as we print it, an ASCII string of 16
|
||||
* hex bytes.
|
||||
* - Write an all zeros CHID and it will stop the controller
|
||||
* - Write a non-zero CHID and it will start it.
|
||||
*
|
||||
* See wusbhc_chid_set() for more info.
|
||||
*/
|
||||
|
@ -339,13 +337,15 @@ void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
|
|||
{
|
||||
struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
|
||||
|
||||
if (status == 0) {
|
||||
if (status == 0 && wusb_dev) {
|
||||
wusb_dev->entry_ts = jiffies;
|
||||
|
||||
/* wusbhc_devconnect_acked() can't be called from from
|
||||
/* wusbhc_devconnect_acked() can't be called from
|
||||
atomic context so defer it to a work queue. */
|
||||
if (!list_empty(&wusb_dev->cack_node))
|
||||
queue_work(wusbd, &wusb_dev->devconnect_acked_work);
|
||||
else
|
||||
wusb_dev_put(wusb_dev);
|
||||
}
|
||||
|
||||
usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
|
||||
|
|
|
@ -1387,6 +1387,7 @@ extern int usb_string(struct usb_device *dev, int index,
|
|||
extern int usb_clear_halt(struct usb_device *dev, int pipe);
|
||||
extern int usb_reset_configuration(struct usb_device *dev);
|
||||
extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
|
||||
extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
|
||||
|
||||
/* this request isn't really synchronous, but it belongs with the others */
|
||||
extern int usb_driver_set_configuration(struct usb_device *udev, int config);
|
||||
|
@ -1491,14 +1492,6 @@ void usb_sg_wait(struct usb_sg_request *io);
|
|||
#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
|
||||
#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
|
||||
|
||||
/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
|
||||
#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
|
||||
#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
|
||||
#define usb_settoggle(dev, ep, out, bit) \
|
||||
((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
|
||||
((bit) << (ep)))
|
||||
|
||||
|
||||
static inline unsigned int __create_pipe(struct usb_device *dev,
|
||||
unsigned int endpoint)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче