firewire: Use dma_mapping_error() for checking for DMA mapping errors.

Pointed out by Pete Zaitcev.

Signed-off-by: Kristian Høgsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
This commit is contained in:
Kristian Høgsberg 2007-02-06 14:49:40 -05:00 коммит произвёл Stefan Richter
Родитель 27a15e50fb
Коммит 82eff9db7d
3 изменённых файлов: 47 добавлений и 25 удалений

Просмотреть файл

@ -33,7 +33,7 @@ setup_iso_buffer(struct fw_iso_context *ctx, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct page *page; struct page *page;
int i; int i, j;
void *p; void *p;
ctx->buffer_size = PAGE_ALIGN(size); ctx->buffer_size = PAGE_ALIGN(size);
@ -42,24 +42,33 @@ setup_iso_buffer(struct fw_iso_context *ctx, size_t size,
ctx->buffer = vmalloc_32_user(ctx->buffer_size); ctx->buffer = vmalloc_32_user(ctx->buffer_size);
if (ctx->buffer == NULL) if (ctx->buffer == NULL)
return -ENOMEM; goto fail_buffer_alloc;
ctx->page_count = ctx->buffer_size >> PAGE_SHIFT; ctx->page_count = ctx->buffer_size >> PAGE_SHIFT;
ctx->pages = ctx->pages =
kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL); kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL);
if (ctx->pages == NULL) { if (ctx->pages == NULL)
vfree(ctx->buffer); goto fail_pages_alloc;
return -ENOMEM;
}
p = ctx->buffer; p = ctx->buffer;
for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) { for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) {
page = vmalloc_to_page(p); page = vmalloc_to_page(p);
ctx->pages[i] = dma_map_page(ctx->card->device, ctx->pages[i] = dma_map_page(ctx->card->device,
page, 0, PAGE_SIZE, direction); page, 0, PAGE_SIZE, direction);
if (dma_mapping_error(ctx->pages[i]))
goto fail_mapping;
} }
return 0; return 0;
fail_mapping:
for (j = 0; j < i; j++)
dma_unmap_page(ctx->card->device, ctx->pages[j],
PAGE_SIZE, DMA_TO_DEVICE);
fail_pages_alloc:
vfree(ctx->buffer);
fail_buffer_alloc:
return -ENOMEM;
} }
static void destroy_iso_buffer(struct fw_iso_context *ctx) static void destroy_iso_buffer(struct fw_iso_context *ctx)

Просмотреть файл

@ -431,7 +431,7 @@ at_context_setup_packet(struct at_context *ctx, struct list_head *list)
packet->payload, packet->payload,
packet->payload_length, packet->payload_length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (packet->payload_bus == 0) { if (dma_mapping_error(packet->payload_bus)) {
complete_transmission(packet, RCODE_SEND_ERROR, list); complete_transmission(packet, RCODE_SEND_ERROR, list);
return; return;
} }
@ -590,7 +590,7 @@ at_context_init(struct at_context *ctx, struct fw_ohci *ohci, u32 regs)
ctx->descriptor_bus = ctx->descriptor_bus =
dma_map_single(ohci->card.device, &ctx->d, dma_map_single(ohci->card.device, &ctx->d,
sizeof ctx->d, DMA_TO_DEVICE); sizeof ctx->d, DMA_TO_DEVICE);
if (ctx->descriptor_bus == 0) if (dma_mapping_error(ctx->descriptor_bus))
return -ENOMEM; return -ENOMEM;
ctx->regs = regs; ctx->regs = regs;
@ -1159,16 +1159,14 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx); tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx);
ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
if (ctx->buffer == NULL) { if (ctx->buffer == NULL)
spin_lock_irqsave(&ohci->lock, flags); goto buffer_alloc_failed;
*mask |= 1 << index;
spin_unlock_irqrestore(&ohci->lock, flags);
return ERR_PTR(-ENOMEM);
}
ctx->buffer_bus = ctx->buffer_bus =
dma_map_single(card->device, ctx->buffer, dma_map_single(card->device, ctx->buffer,
ISO_BUFFER_SIZE, DMA_TO_DEVICE); ISO_BUFFER_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->buffer_bus))
goto buffer_map_failed;
ctx->head_descriptor = ctx->buffer; ctx->head_descriptor = ctx->buffer;
ctx->prev_descriptor = ctx->buffer; ctx->prev_descriptor = ctx->buffer;
@ -1187,6 +1185,15 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ctx->head_descriptor++; ctx->head_descriptor++;
return &ctx->base; return &ctx->base;
buffer_map_failed:
kfree(ctx->buffer);
buffer_alloc_failed:
spin_lock_irqsave(&ohci->lock, flags);
*mask |= 1 << index;
spin_unlock_irqrestore(&ohci->lock, flags);
return ERR_PTR(-ENOMEM);
} }
static int ohci_send_iso(struct fw_iso_context *base, s32 cycle) static int ohci_send_iso(struct fw_iso_context *base, s32 cycle)

Просмотреть файл

@ -411,13 +411,13 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
orb->base.request_bus = orb->base.request_bus =
dma_map_single(device->card->device, &orb->request, dma_map_single(device->card->device, &orb->request,
sizeof orb->request, DMA_TO_DEVICE); sizeof orb->request, DMA_TO_DEVICE);
if (orb->base.request_bus == 0) if (dma_mapping_error(orb->base.request_bus))
goto out; goto out;
orb->response_bus = orb->response_bus =
dma_map_single(device->card->device, &orb->response, dma_map_single(device->card->device, &orb->response,
sizeof orb->response, DMA_FROM_DEVICE); sizeof orb->response, DMA_FROM_DEVICE);
if (orb->response_bus == 0) if (dma_mapping_error(orb->response_bus))
goto out; goto out;
orb->request.response.high = 0; orb->request.response.high = 0;
@ -963,22 +963,20 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
* transfer direction not handled. */ * transfer direction not handled. */
if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command"); fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
cmd->result = DID_ERROR << 16; goto fail_alloc;
done(cmd);
return 0;
} }
orb = kzalloc(sizeof *orb, GFP_ATOMIC); orb = kzalloc(sizeof *orb, GFP_ATOMIC);
if (orb == NULL) { if (orb == NULL) {
fw_notify("failed to alloc orb\n"); fw_notify("failed to alloc orb\n");
cmd->result = DID_NO_CONNECT << 16; goto fail_alloc;
done(cmd);
return 0;
} }
orb->base.request_bus = orb->base.request_bus =
dma_map_single(device->card->device, &orb->request, dma_map_single(device->card->device, &orb->request,
sizeof orb->request, DMA_TO_DEVICE); sizeof orb->request, DMA_TO_DEVICE);
if (dma_mapping_error(orb->base.request_bus))
goto fail_mapping;
orb->unit = unit; orb->unit = unit;
orb->done = done; orb->done = done;
@ -1009,9 +1007,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
* could we get the scsi or blk layer to do that by * could we get the scsi or blk layer to do that by
* reporting our max supported block size? */ * reporting our max supported block size? */
fw_error("command > 64k\n"); fw_error("command > 64k\n");
cmd->result = DID_ERROR << 16; goto fail_bufflen;
done(cmd);
return 0;
} else if (cmd->request_bufflen > 0) { } else if (cmd->request_bufflen > 0) {
sbp2_command_orb_map_buffer(orb); sbp2_command_orb_map_buffer(orb);
} }
@ -1028,6 +1024,16 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
sd->command_block_agent_address + SBP2_ORB_POINTER); sd->command_block_agent_address + SBP2_ORB_POINTER);
return 0; return 0;
fail_bufflen:
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof orb->request, DMA_TO_DEVICE);
fail_mapping:
kfree(orb);
fail_alloc:
cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
} }
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)