staging: rts5139: remove disable code in rts51x_transport.c

Signed-off-by: Oleksij Rempel <bug-track@fisher-privat.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Oleksij Rempel 2012-05-04 17:14:26 +02:00 коммит произвёл Greg Kroah-Hartman
Родитель 30572230e1
Коммит 7f00e6eac6
1 изменённых файлов: 0 добавлений и 262 удалений

Просмотреть файл

@ -391,218 +391,9 @@ static void rts51x_sg_clean(struct usb_sg_request *io)
kfree(io->urbs);
io->urbs = NULL;
}
#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */
if (io->dev->dev.dma_mask != NULL)
usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
io->sg, io->nents);
#endif
io->dev = NULL;
}
#if 0
static void rts51x_sg_complete(struct urb *urb)
{
struct usb_sg_request *io = urb->context;
int status = urb->status;
spin_lock(&io->lock);
/* In 2.5 we require hcds' endpoint queues not to progress after fault
* reports, until the completion callback (this!) returns. That lets
* device driver code (like this routine) unlink queued urbs first,
* if it needs to, since the HC won't work on them at all. So it's
* not possible for page N+1 to overwrite page N, and so on.
*
* That's only for "hard" faults; "soft" faults (unlinks) sometimes
* complete before the HCD can get requests away from hardware,
* though never during cleanup after a hard fault.
*/
if (io->status
&& (io->status != -ECONNRESET
|| status != -ECONNRESET)
&& urb->actual_length) {
dev_err(io->dev->bus->controller,
"dev %s ep%d%s scatterlist error %d/%d\n",
io->dev->devpath,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
status, io->status);
/* BUG (); */
}
if (io->status == 0 && status && status != -ECONNRESET) {
int i, found, retval;
io->status = status;
/* the previous urbs, and this one, completed already.
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
spin_unlock(&io->lock);
for (i = 0, found = 0; i < io->entries; i++) {
if (!io->urbs[i] || !io->urbs[i]->dev)
continue;
if (found) {
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
retval != -EBUSY)
dev_err(&io->dev->dev,
"%s, unlink --> %d\n",
__func__, retval);
} else if (urb == io->urbs[i])
found = 1;
}
spin_lock(&io->lock);
}
urb->dev = NULL;
/* on the last completion, signal usb_sg_wait() */
io->bytes += urb->actual_length;
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock(&io->lock);
}
/* This function is ported from usb_sg_init, which can transfer
* sg list partially */
int rts51x_sg_init_partial(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, void *buf, struct scatterlist **sgptr,
unsigned int *offset, int nents, size_t length, gfp_t mem_flags)
{
int i;
int urb_flags;
int dma;
struct scatterlist *sg = *sgptr, *first_sg;
first_sg = (struct scatterlist *)buf;
if (!sg)
sg = first_sg;
if (!io || !dev || !sg
|| usb_pipecontrol(pipe)
|| usb_pipeisoc(pipe)
|| (nents <= 0))
return -EINVAL;
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
io->sg = first_sg; /* used by unmap */
io->nents = nents;
RTS51X_DEBUGP("Before map, sg address: 0x%x\n", (unsigned int)sg);
RTS51X_DEBUGP("Before map, dev address: 0x%x\n", (unsigned int)dev);
/* not all host controllers use DMA (like the mainstream pci ones);
* they can use PIO (sl811) or be software over another transport.
*/
dma = (dev->dev.dma_mask != NULL);
if (dma) {
/* map the whole sg list, because here we only know the
* total nents */
io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
first_sg, nents);
} else {
io->entries = nents;
}
/* initialize all the urbs we'll use */
if (io->entries <= 0)
return io->entries;
io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
if (!io->urbs)
goto nomem;
urb_flags = URB_NO_INTERRUPT;
if (dma)
urb_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
RTS51X_DEBUGP("io->entries = %d\n", io->entries);
for (i = 0; (sg != NULL) && (length > 0); i++) {
unsigned len;
RTS51X_DEBUGP("sg address: 0x%x\n", (unsigned int)sg);
RTS51X_DEBUGP("length = %d, *offset = %d\n", length, *offset);
io->urbs[i] = usb_alloc_urb(0, mem_flags);
if (!io->urbs[i]) {
io->entries = i;
goto nomem;
}
io->urbs[i]->dev = NULL;
io->urbs[i]->pipe = pipe;
io->urbs[i]->interval = period;
io->urbs[i]->transfer_flags = urb_flags;
io->urbs[i]->complete = rts51x_sg_complete;
io->urbs[i]->context = io;
if (dma) {
io->urbs[i]->transfer_dma =
sg_dma_address(sg) + *offset;
len = sg_dma_len(sg) - *offset;
io->urbs[i]->transfer_buffer = NULL;
RTS51X_DEBUGP(" -- sg entry dma length = %d\n",
sg_dma_len(sg));
} else {
/* hc may use _only_ transfer_buffer */
io->urbs[i]->transfer_buffer = sg_virt(sg) + *offset;
len = sg->length - *offset;
RTS51X_DEBUGP(" -- sg entry length = %d\n",
sg->length);
}
if (length >= len) {
*offset = 0;
io->urbs[i]->transfer_buffer_length = len;
length -= len;
sg = sg_next(sg);
} else {
*offset += length;
io->urbs[i]->transfer_buffer_length = length;
length = 0;
}
if (length == 0)
io->entries = i + 1;
#if 0
if (length) {
len = min_t(unsigned, len, length);
length -= len;
if (length == 0) {
io->entries = i + 1;
*offset += len;
} else {
*offset = 0;
}
}
#endif
}
RTS51X_DEBUGP("In %s, urb count: %d\n", __func__, i);
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
RTS51X_DEBUGP("sg address stored in sgptr: 0x%x\n", (unsigned int)sg);
*sgptr = sg;
/* transaction state */
io->count = io->entries;
io->status = 0;
io->bytes = 0;
init_completion(&io->complete);
return 0;
nomem:
rts51x_sg_clean(io);
return -ENOMEM;
}
#endif
int rts51x_sg_init(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, struct scatterlist *sg,
int nents, size_t length, gfp_t mem_flags)
@ -740,55 +531,7 @@ static int rts51x_bulk_transfer_sglist(struct rts51x_chip *chip,
return interpret_urb_result(chip, pipe, length, result,
chip->usb->current_sg.bytes);
}
#if 0
static int rts51x_bulk_transfer_sglist_partial(struct rts51x_chip *chip,
unsigned int pipe, void *buf, struct scatterlist **sgptr,
unsigned int *offset, int num_sg, unsigned int length,
unsigned int *act_len, int timeout)
{
int result;
/* don't submit s-g requests during abort processing */
if (test_bit(FLIDX_ABORTING, &chip->usb->dflags))
TRACE_RET(chip, STATUS_ERROR);
/* initialize the scatter-gather request block */
RTS51X_DEBUGP("%s: xfer %u bytes, %d entries\n", __func__,
length, num_sg);
result = rts51x_sg_init_partial(&chip->usb->current_sg,
chip->usb->pusb_dev, pipe, 0, buf, sgptr, offset,
num_sg, length, GFP_NOIO);
if (result) {
RTS51X_DEBUGP("rts51x_sg_init_partial returned %d\n", result);
TRACE_RET(chip, STATUS_ERROR);
}
/* since the block has been initialized successfully, it's now
* okay to cancel it */
set_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
/* did an abort occur during the submission? */
if (test_bit(FLIDX_ABORTING, &chip->usb->dflags)) {
/* cancel the request, if it hasn't been cancelled already */
if (test_and_clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags)) {
RTS51X_DEBUGP("-- cancelling sg request\n");
usb_sg_cancel(&chip->usb->current_sg);
}
}
/* wait for the completion of the transfer */
result = rts51x_sg_wait(&chip->usb->current_sg, timeout);
clear_bit(FLIDX_SG_ACTIVE, &chip->usb->dflags);
/* result = us->current_sg.status; */
if (act_len)
*act_len = chip->usb->current_sg.bytes;
return interpret_urb_result(chip, pipe, length, result,
chip->usb->current_sg.bytes);
}
#endif
int rts51x_bulk_transfer_buf(struct rts51x_chip *chip, unsigned int pipe,
void *buf, unsigned int length,
unsigned int *act_len, int timeout)
@ -860,11 +603,6 @@ int rts51x_transfer_data_partial(struct rts51x_chip *chip, unsigned int pipe,
}
kfree(tmp_buf);
#if 0
result = rts51x_bulk_transfer_sglist_partial(chip, pipe, buf,
(struct scatterlist **)ptr, offset,
use_sg, len, act_len, timeout);
#endif
} else {
unsigned int step = 0;
if (offset)