[SCSI] lpfc 8.2.5 : Fix buffer leaks

Fix buffer leaks:
- HBQ dma buffer leak at dma_pool_destroy when unloading driver
- Fix missing buffer free in slow ring buffer handling

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
This commit is contained in:
James Smart 2008-02-08 18:50:25 -05:00 коммит произвёл James Bottomley
Родитель 7f5f3d0d02
Коммит 3163f725a5
6 изменённых файлов: 137 добавлений и 7 удалений

Просмотреть файл

@ -495,6 +495,8 @@ struct lpfc_hba {
wait_queue_head_t *work_wait; wait_queue_head_t *work_wait;
struct task_struct *worker_thread; struct task_struct *worker_thread;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
uint32_t hbq_count; /* Count of configured HBQs */ uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */

Просмотреть файл

@ -629,9 +629,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mb; LPFC_MBOXQ_t *mb;
int i; int i;
if (phba->link_state == LPFC_LINK_DOWN) { if (phba->link_state == LPFC_LINK_DOWN)
return 0; return 0;
}
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if (phba->link_state > LPFC_LINK_DOWN) { if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN; phba->link_state = LPFC_LINK_DOWN;
@ -1122,7 +1121,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (la->attType == AT_LINK_UP) { if (la->attType == AT_LINK_UP) {
phba->fc_stat.LinkUp++; phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) { if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1306 Link Up Event in loop back mode " "1306 Link Up Event in loop back mode "
"x%x received Data: x%x x%x x%x x%x\n", "x%x received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag, la->eventTag, phba->fc_eventTag,
@ -1139,11 +1138,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbx_process_link_up(phba, la); lpfc_mbx_process_link_up(phba, la);
} else { } else {
phba->fc_stat.LinkDown++; phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
}
else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received " "1305 Link Down Event x%x received "
"Data: x%x x%x x%x\n", "Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag, la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag); phba->pport->port_state, vport->fc_flag);
}
lpfc_mbx_issue_link_down(phba); lpfc_mbx_issue_link_down(phba);
} }

Просмотреть файл

@ -1,7 +1,7 @@
/******************************************************************* /*******************************************************************
* This file is part of the Emulex Linux Device Driver for * * This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. * * Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. * * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. * * EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com * * www.emulex.com *
* * * *
@ -1377,11 +1377,26 @@ typedef struct { /* FireFly BIU registers */
#define CMD_QUE_XRI64_CX 0xB3 #define CMD_QUE_XRI64_CX 0xB3
#define CMD_IOCB_RCV_SEQ64_CX 0xB5 #define CMD_IOCB_RCV_SEQ64_CX 0xB5
#define CMD_IOCB_RCV_ELS64_CX 0xB7 #define CMD_IOCB_RCV_ELS64_CX 0xB7
#define CMD_IOCB_RET_XRI64_CX 0xB9
#define CMD_IOCB_RCV_CONT64_CX 0xBB #define CMD_IOCB_RCV_CONT64_CX 0xBB
#define CMD_GEN_REQUEST64_CR 0xC2 #define CMD_GEN_REQUEST64_CR 0xC2
#define CMD_GEN_REQUEST64_CX 0xC3 #define CMD_GEN_REQUEST64_CX 0xC3
/* Unhandled SLI-3 Commands */
#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0
#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1
#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1
#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD
#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6
#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA
#define CMD_IOCB_RET_HBQE64_CN 0xCA
#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC
#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD
#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF
#define CMD_IOCB_LOGENTRY_CN 0x94
#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
#define CMD_MAX_IOCB_CMD 0xE6 #define CMD_MAX_IOCB_CMD 0xE6
#define CMD_IOCB_MASK 0xff #define CMD_IOCB_MASK 0xff

Просмотреть файл

@ -2087,6 +2087,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
INIT_LIST_HEAD(&phba->hbqbuf_in_list);
/* Initialize the SLI Layer to run with lpfc HBAs. */ /* Initialize the SLI Layer to run with lpfc HBAs. */
lpfc_sli_setup(phba); lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba); lpfc_sli_queue_setup(phba);

Просмотреть файл

@ -264,18 +264,27 @@ void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{ {
struct hbq_dmabuf *hbq_entry; struct hbq_dmabuf *hbq_entry;
unsigned long flags;
if (!mp) if (!mp)
return; return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
list_del(&hbq_entry->dbuf.list);
if (hbq_entry->tag == -1) { if (hbq_entry->tag == -1) {
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_entry); (phba, hbq_entry);
} else { } else {
lpfc_sli_free_hbq(phba, hbq_entry); lpfc_sli_free_hbq(phba, hbq_entry);
} }
spin_unlock_irqrestore(&phba->hbalock, flags);
} else { } else {
lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp); kfree(mp);

Просмотреть файл

@ -203,8 +203,25 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_IOCB_RCV_SEQ64_CX: case CMD_IOCB_RCV_SEQ64_CX:
case CMD_IOCB_RCV_ELS64_CX: case CMD_IOCB_RCV_ELS64_CX:
case CMD_IOCB_RCV_CONT64_CX: case CMD_IOCB_RCV_CONT64_CX:
case CMD_IOCB_RET_XRI64_CX:
type = LPFC_UNSOL_IOCB; type = LPFC_UNSOL_IOCB;
break; break;
case CMD_IOCB_XMIT_MSEQ64_CR:
case CMD_IOCB_XMIT_MSEQ64_CX:
case CMD_IOCB_RCV_SEQ_LIST64_CX:
case CMD_IOCB_RCV_ELS_LIST64_CX:
case CMD_IOCB_CLOSE_EXTENDED_CN:
case CMD_IOCB_ABORT_EXTENDED_CN:
case CMD_IOCB_RET_HBQE64_CN:
case CMD_IOCB_FCP_IBIDIR64_CR:
case CMD_IOCB_FCP_IBIDIR64_CX:
case CMD_IOCB_FCP_ITASKMGT64_CX:
case CMD_IOCB_LOGENTRY_CN:
case CMD_IOCB_LOGENTRY_ASYNC_CN:
printk("%s - Unhandled SLI-3 Command x%x\n",
__FUNCTION__, iocb_cmnd);
type = LPFC_UNKNOWN_IOCB;
break;
default: default:
type = LPFC_UNKNOWN_IOCB; type = LPFC_UNKNOWN_IOCB;
break; break;
@ -529,10 +546,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{ {
struct lpfc_dmabuf *dmabuf, *next_dmabuf; struct lpfc_dmabuf *dmabuf, *next_dmabuf;
struct hbq_dmabuf *hbq_buf; struct hbq_dmabuf *hbq_buf;
unsigned long flags;
int i, hbq_count; int i, hbq_count;
uint32_t hbqno;
hbq_count = lpfc_sli_hbq_count(); hbq_count = lpfc_sli_hbq_count();
/* Return all memory used by all HBQs */ /* Return all memory used by all HBQs */
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < hbq_count; ++i) { for (i = 0; i < hbq_count; ++i) {
list_for_each_entry_safe(dmabuf, next_dmabuf, list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbqs[i].hbq_buffer_list, list) { &phba->hbqs[i].hbq_buffer_list, list) {
@ -542,6 +562,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
} }
phba->hbqs[i].buffer_count = 0; phba->hbqs[i].buffer_count = 0;
} }
/* Return all HBQ buffer that are in-fly */
list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbqbuf_in_list, list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
if (hbq_buf->tag == -1) {
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_buf);
} else {
hbqno = hbq_buf->tag >> 16;
if (hbqno >= LPFC_MAX_HBQS)
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_buf);
else
(phba->hbqs[hbqno].hbq_free_buffer)(phba,
hbq_buf);
}
}
/* Mark the HBQs not in use */
phba->hbq_in_use = 0;
spin_unlock_irqrestore(&phba->hbalock, flags);
} }
static struct lpfc_hbq_entry * static struct lpfc_hbq_entry *
@ -603,6 +645,7 @@ static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{ {
uint32_t i, start, end; uint32_t i, start, end;
unsigned long flags;
struct hbq_dmabuf *hbq_buffer; struct hbq_dmabuf *hbq_buffer;
if (!phba->hbqs[hbqno].hbq_alloc_buffer) { if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
@ -615,6 +658,13 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
end = lpfc_hbq_defs[hbqno]->entry_count; end = lpfc_hbq_defs[hbqno]->entry_count;
} }
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0;
}
/* Populate HBQ entries */ /* Populate HBQ entries */
for (i = start; i < end; i++) { for (i = start; i < end; i++) {
hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
@ -626,6 +676,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
else else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
} }
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; return 0;
} }
@ -910,16 +962,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
uint32_t hbqno; uint32_t hbqno;
void *virt; /* virtual address ptr */ void *virt; /* virtual address ptr */
dma_addr_t phys; /* mapped address */ dma_addr_t phys; /* mapped address */
unsigned long flags;
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
}
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (hbq_entry == NULL) if (hbq_entry == NULL) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL; return NULL;
}
list_del(&hbq_entry->dbuf.list); list_del(&hbq_entry->dbuf.list);
hbqno = tag >> 16; hbqno = tag >> 16;
new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
if (new_hbq_entry == NULL) if (new_hbq_entry == NULL) {
list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
return &hbq_entry->dbuf; return &hbq_entry->dbuf;
}
new_hbq_entry->tag = -1; new_hbq_entry->tag = -1;
phys = new_hbq_entry->dbuf.phys; phys = new_hbq_entry->dbuf.phys;
virt = new_hbq_entry->dbuf.virt; virt = new_hbq_entry->dbuf.virt;
@ -928,6 +993,9 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
hbq_entry->dbuf.phys = phys; hbq_entry->dbuf.phys = phys;
hbq_entry->dbuf.virt = virt; hbq_entry->dbuf.virt = virt;
lpfc_sli_free_hbq(phba, hbq_entry); lpfc_sli_free_hbq(phba, hbq_entry);
list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
return &new_hbq_entry->dbuf; return &new_hbq_entry->dbuf;
} }
@ -951,6 +1019,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t Rctl, Type; uint32_t Rctl, Type;
uint32_t match, i; uint32_t match, i;
struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
match = 0; match = 0;
irsp = &(saveq->iocb); irsp = &(saveq->iocb);
@ -972,6 +1041,29 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 1; return 1;
} }
if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
if (irsp->ulpBdeCount > 0) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 1) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 2) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
lpfc_in_buf_free(phba, dmzbuf);
}
return 1;
}
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0) { if (irsp->ulpBdeCount != 0) {
saveq->context2 = lpfc_sli_get_buff(phba, pring, saveq->context2 = lpfc_sli_get_buff(phba, pring,
@ -2293,6 +2385,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
/* Initialize the struct lpfc_sli_hbq structure for each hbq */ /* Initialize the struct lpfc_sli_hbq structure for each hbq */
phba->link_state = LPFC_INIT_MBX_CMDS; phba->link_state = LPFC_INIT_MBX_CMDS;
phba->hbq_in_use = 1;
hbq_entry_index = 0; hbq_entry_index = 0;
for (hbqno = 0; hbqno < hbq_count; ++hbqno) { for (hbqno = 0; hbqno < hbq_count; ++hbqno) {