The patch set is mostly driver updates (qla4, qla2 [ISF support updates],
 lpfc, aacraid [dual firmware image support]) and a few bug fixes.
 
 Signed-off-by: James Bottomley <JBottomley@Parallels.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iQEcBAABAgAGBQJRgAd8AAoJEDeqqVYsXL0Mg5wH/3P4wlXaRyqvyrFk1WSkmklZ
 6YxzIKn/RLFmJlJvkaTT7N02ble2UqTluB6+5+AorU/jqz6DArxHsPnyv0/+2pXS
 zYmp1hrcLn9dB3sZ2Y32jU2GlzHq+LSJSjjnUrA/uRrq1KTP09KCJtGbZUkvy710
 x1/3e3I8u2bvBAehUkKvazg5+xlw/XImJ+IVXgUWOyiv1mNbqNEtT5qYt7sjnhLu
 Mg2VfKTSb+kzxSpol3v51vh/wqY6unVcI/a9HxanihkDtkqRBRhg8wXpNAYI2xNK
 uNLGq0VFpyCYfBZX0aqh01QZ++EUU2TNlRreVg4/crQnBR88EI3KtUsA1BOthwQ=
 =x6dL
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James "Jej B" Bottomley:
 "The patch set is mostly driver updates (qla4, qla2 [ISF support
  updates], lpfc, aacraid [dual firmware image support]) and a few bug
  fixes"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits)
  [SCSI] iscsi_tcp: support PF_MEMALLOC/__GFP_MEMALLOC
  [SCSI] libiscsi: avoid unnecessary multiple NULL assignments
  [SCSI] qla4xxx: Update driver version to 5.03.00-k8
  [SCSI] qla4xxx: Added print statements to display AENs
  [SCSI] qla4xxx: Use correct value for max flash node entries
  [SCSI] qla4xxx: Restrict logout from boot target session using session id
  [SCSI] qla4xxx: Use correct flash ddb offset for ISP40XX
  [SCSI] isci: add CONFIG_PM_SLEEP to suspend/resume functions
  [SCSI] scsi_dh_alua: Add module parameter to allow failover to non preferred path without STPG
  [SCSI] qla2xxx: Update the driver version to 8.05.00.03-k.
  [SCSI] qla2xxx: Obtain loopback iteration count from bsg request.
  [SCSI] qla2xxx: Add clarifying printk to thermal access fail cases.
  [SCSI] qla2xxx: Remove duplicated include form qla_isr.c
  [SCSI] qla2xxx: Enhancements to support ISPFx00.
  [SCSI] qla4xxx: Update driver version to 5.03.00-k7
  [SCSI] qla4xxx: Replace dev type macros with generic portal type macros
  [SCSI] scsi_transport_iscsi: Declare portal type string macros for generic use
  [SCSI] qla4xxx: Add flash node mgmt support
  [SCSI] libiscsi: export function iscsi_switch_str_param
  [SCSI] scsi_transport_iscsi: Add flash node mgmt support
  ...
This commit is contained in:
Linus Torvalds 2013-04-30 13:16:38 -07:00
Родитель 6da6dc2380 9e45dd7323
Коммит 5aa1c98862
53 изменённых файлов: 8983 добавлений и 820 удалений

Просмотреть файл

@ -6236,7 +6236,7 @@ S: Supported
F: drivers/scsi/pmcraid.*
PMC SIERRA PM8001 DRIVER
M: jack_wang@usish.com
M: xjtuwjp@gmail.com
M: lindar_liu@usish.com
L: linux-scsi@vger.kernel.org
S: Supported

Просмотреть файл

@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 30000
# define AAC_DRIVER_BUILD 30200
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@ -1918,6 +1918,10 @@ extern struct aac_common aac_config;
#define MONITOR_PANIC 0x00000020
#define KERNEL_UP_AND_RUNNING 0x00000080
#define KERNEL_PANIC 0x00000100
#define FLASH_UPD_PENDING 0x00002000
#define FLASH_UPD_SUCCESS 0x00004000
#define FLASH_UPD_FAILED 0x00008000
#define FWUPD_TIMEOUT (5 * 60)
/*
* Doorbell bit defines

Просмотреть файл

@ -214,7 +214,7 @@ int aac_send_shutdown(struct aac_dev * dev)
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xffffffff);
cmd->cid = cpu_to_le32(0xfffffffe);
status = aac_fib_send(ContainerCommand,
fibctx,

Просмотреть файл

@ -702,6 +702,28 @@ int aac_srcv_init(struct aac_dev *dev)
if ((aac_reset_devices || reset_devices) &&
!aac_src_restart_adapter(dev, 0))
++restart;
/*
* Check to see if flash update is running.
* Wait for the adapter to be up and running. Wait up to 5 minutes
*/
status = src_readl(dev, MUnit.OMR);
if (status & FLASH_UPD_PENDING) {
start = jiffies;
do {
status = src_readl(dev, MUnit.OMR);
if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
printk(KERN_ERR "%s%d: adapter flash update failed.\n",
dev->name, instance);
goto error_iounmap;
}
} while (!(status & FLASH_UPD_SUCCESS) &&
!(status & FLASH_UPD_FAILED));
/* Delay 10 seconds.
* Because right now FW is doing a soft reset,
* do not read scratch pad register at this time
*/
ssleep(10);
}
/*
* Check to see if the board panic'd while booting.
*/
@ -730,7 +752,9 @@ int aac_srcv_init(struct aac_dev *dev)
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
while (!((status = src_readl(dev, MUnit.OMR)) &
KERNEL_UP_AND_RUNNING) ||
status == 0xffffffff) {
if ((restart &&
(status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
time_after(jiffies, start+HZ*startup_timeout)) {

Просмотреть файл

@ -3892,7 +3892,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
__u8 op;
__be64 *data;
void *msg = NULL;
uint32_t msg_len = 0;
bool msg_sg = 0;
@ -3908,8 +3907,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
msg = (void *) flb;
msg_len = flb->totlen;
msg_sg = 1;
data = (__be64 *) msg;
} else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
CSIO_INC_STATS(hw, n_cpl_fw6_msg);
@ -3917,8 +3914,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
msg = (void *)((uintptr_t)wr + sizeof(__be64));
msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
sizeof(struct cpl_fw4_msg);
data = (__be64 *) msg;
} else {
csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
CSIO_INC_STATS(hw, n_cpl_unexp);

Просмотреть файл

@ -232,13 +232,13 @@ static void stpg_endio(struct request *req, int error)
struct scsi_sense_hdr sense_hdr;
unsigned err = SCSI_DH_OK;
if (error || host_byte(req->errors) != DID_OK ||
msg_byte(req->errors) != COMMAND_COMPLETE) {
if (host_byte(req->errors) != DID_OK ||
msg_byte(req->errors) != COMMAND_COMPLETE) {
err = SCSI_DH_IO;
goto done;
}
if (h->senselen > 0) {
if (req->sense_len > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
if (!err) {
@ -255,7 +255,9 @@ static void stpg_endio(struct request *req, int error)
ALUA_DH_NAME, sense_hdr.sense_key,
sense_hdr.asc, sense_hdr.ascq);
err = SCSI_DH_IO;
}
} else if (error)
err = SCSI_DH_IO;
if (err == SCSI_DH_OK) {
h->state = TPGS_STATE_OPTIMIZED;
sdev_printk(KERN_INFO, h->sdev,
@ -710,6 +712,10 @@ static int alua_set_params(struct scsi_device *sdev, const char *params)
return result;
}
static uint optimize_stpg;
module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
/*
* alua_activate - activate a path
* @sdev: device on the path to be activated
@ -731,6 +737,9 @@ static int alua_activate(struct scsi_device *sdev,
if (err != SCSI_DH_OK)
goto out;
if (optimize_stpg)
h->flags |= ALUA_OPTIMIZE_STPG;
if (h->tpgs & TPGS_MODE_EXPLICIT) {
switch (h->state) {
case TPGS_STATE_NONOPTIMIZED:

Просмотреть файл

@ -721,7 +721,7 @@ static void isci_pci_remove(struct pci_dev *pdev)
}
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int isci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@ -770,18 +770,16 @@ static int isci_resume(struct device *dev)
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
#endif
static struct pci_driver isci_pci_driver = {
.name = DRV_NAME,
.id_table = isci_id_table,
.probe = isci_pci_probe,
.remove = isci_pci_remove,
#ifdef CONFIG_PM
.driver.pm = &isci_pm_ops,
#endif
};
static __init int isci_init(void)

Просмотреть файл

@ -370,17 +370,24 @@ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
int rc;
unsigned long pflags = current->flags;
int rc = 0;
current->flags |= PF_MEMALLOC;
while (iscsi_sw_tcp_xmit_qlen(conn)) {
rc = iscsi_sw_tcp_xmit(conn);
if (rc == 0)
return -EAGAIN;
if (rc == 0) {
rc = -EAGAIN;
break;
}
if (rc < 0)
return rc;
break;
rc = 0;
}
return 0;
tsk_restore_flags(current, pflags, PF_MEMALLOC);
return rc;
}
/*
@ -665,6 +672,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_reuse = SK_CAN_REUSE;
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
sk_set_memalloc(sk);
iscsi_sw_tcp_conn_set_callbacks(conn);
tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;

Просмотреть файл

@ -507,7 +507,6 @@ static void iscsi_free_task(struct iscsi_task *task)
kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
if (sc) {
task->sc = NULL;
/* SCSI eh reuses commands to verify us */
sc->SCp.ptr = NULL;
/*
@ -3142,7 +3141,7 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
static int iscsi_switch_str_param(char **param, char *new_val_buf)
int iscsi_switch_str_param(char **param, char *new_val_buf)
{
char *new_val;
@ -3159,6 +3158,7 @@ static int iscsi_switch_str_param(char **param, char *new_val_buf)
*param = new_val;
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)

Просмотреть файл

@ -692,7 +692,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
*/
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
while (pring->txcmplq_cnt) {
while (!list_empty(&pring->txcmplq)) {
msleep(10);
if (cnt++ > 500) { /* 5 secs */
lpfc_printf_log(phba,
@ -2302,11 +2302,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
int lpfc_enable_rrq;
int lpfc_enable_rrq = 2;
module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
lpfc_param_init(enable_rrq, 0, 0, 1);
/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
*/
lpfc_param_init(enable_rrq, 2, 0, 2);
static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
/*

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
int lpfc_unregister_fcf_prep(struct lpfc_hba *);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);

Просмотреть файл

@ -484,6 +484,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
vport->port_state = LPFC_FABRIC_CFG_LINK;
memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
mboxq->context1 = dmabuf;
@ -700,6 +701,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
/*
* For FC we need to do some special processing because of the SLI
* Port's default settings of the Common Service Parameters.
*/
if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
lpfc_unregister_fcf_prep(phba);
/* This should just update the VFI CSPs*/
if (vport->fc_flag & FC_VFI_REGISTERED)
lpfc_issue_reg_vfi(vport);
}
if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
@ -6225,7 +6240,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
}
if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
}
@ -6279,7 +6294,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
continue;
list_move_tail(&piocb->list, &completions);
pring->txq_cnt--;
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@ -6339,7 +6353,6 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
cmd->ulpCommand == CMD_ABORT_XRI_CN)
continue;
list_move_tail(&piocb->list, &completions);
pring->txq_cnt--;
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
if (piocb->iocb_flag & LPFC_IO_LIBDFC)
@ -8065,7 +8078,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
rxid, 1);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
if (!(list_empty(&pring->txq)))
lpfc_worker_wake_up(phba);
return;
}

Просмотреть файл

@ -691,12 +691,15 @@ lpfc_work_done(struct lpfc_hba *phba)
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
(status &
HA_RXMASK));
if (phba->link_state >= LPFC_LINK_UP) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
(status &
HA_RXMASK));
}
}
if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
if ((phba->sli_rev == LPFC_SLI_REV4) &
(!list_empty(&pring->txq)))
lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
@ -1792,6 +1795,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
virt_addr = mboxq->sge_array->addr[0];
shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
lpfc_sli_pcimem_bcopy(shdr, shdr,
sizeof(union lpfc_sli4_cfg_shdr));
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status) {
@ -2888,6 +2893,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out_free_mem;
}
/* If the VFI is already registered, there is nothing else to do */
if (vport->fc_flag & FC_VFI_REGISTERED)
goto out_free_mem;
/* The VPI is implicitly registered when the VFI is registered */
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
@ -2980,6 +2990,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
struct lpfc_dmabuf *mp;
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
spin_lock_irq(&phba->hbalock);
switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
@ -3011,11 +3022,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"1309 Link Up Event npiv not supported in loop "
"topology\n");
/* Get Loop Map information */
if (bf_get(lpfc_mbx_read_top_il, la)) {
spin_lock(shost->host_lock);
vport->fc_flag |= FC_LBIT;
spin_unlock(shost->host_lock);
}
if (bf_get(lpfc_mbx_read_top_il, la))
fc_flags |= FC_LBIT;
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@ -3064,12 +3072,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
spin_lock(shost->host_lock);
vport->fc_flag |= FC_LBIT;
spin_unlock(shost->host_lock);
fc_flags |= FC_LBIT;
}
spin_unlock_irq(&phba->hbalock);
if (fc_flags) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= fc_flags;
spin_unlock_irq(shost->host_lock);
}
lpfc_linkup(phba);
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
@ -3237,8 +3249,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_flag &= ~FC_BYPASSED_MODE;
spin_unlock_irq(shost->host_lock);
if ((phba->fc_eventTag < la->eventTag) ||
(phba->fc_eventTag == la->eventTag)) {
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
if (phba->fc_eventTag != 0)
@ -3246,16 +3257,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
spin_lock_irq(&phba->hbalock);
if (bf_get(lpfc_mbx_read_top_mm, la))
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
spin_unlock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
spin_lock_irq(&phba->hbalock);
if (bf_get(lpfc_mbx_read_top_mm, la))
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
spin_unlock_irq(&phba->hbalock);
}
phba->link_events++;
if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
(!bf_get(lpfc_mbx_read_top_mm, la))) {
!(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@ -3300,8 +3313,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
(bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
if (phba->link_state != LPFC_LINK_DOWN) {
phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@ -3329,8 +3342,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
}
if (bf_get(lpfc_mbx_read_top_fa, la)) {
if (bf_get(lpfc_mbx_read_top_mm, la))
if ((phba->sli_rev < LPFC_SLI_REV4) &&
bf_get(lpfc_mbx_read_top_fa, la)) {
if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
lpfc_issue_clear_la(phba, vport);
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
@ -4354,7 +4368,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
with an error */
list_move_tail(&iocb->list,
&completions);
pring->txq_cnt--;
}
}
spin_unlock_irq(&phba->hbalock);
@ -5055,7 +5068,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
(icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
}

Просмотреть файл

@ -1958,6 +1958,9 @@ struct lpfc_mbx_init_vfi {
struct lpfc_mbx_reg_vfi {
uint32_t word1;
#define lpfc_reg_vfi_upd_SHIFT 29
#define lpfc_reg_vfi_upd_MASK 0x00000001
#define lpfc_reg_vfi_upd_WORD word1
#define lpfc_reg_vfi_vp_SHIFT 28
#define lpfc_reg_vfi_vp_MASK 0x00000001
#define lpfc_reg_vfi_vp_WORD word1

Просмотреть файл

@ -839,7 +839,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
* way, nothing should be on txcmplq as it will NEVER complete.
*/
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&phba->hbalock);
/* Cancel all the IOCBs from the completions list */
@ -2915,9 +2914,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &els_sgl_list);
}
spin_lock(&phba->hbalock);
spin_lock_irq(&phba->hbalock);
list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
spin_unlock(&phba->hbalock);
spin_unlock_irq(&phba->hbalock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */
xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@ -3015,9 +3014,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->cur_iocbq.sli4_lxritag = lxri;
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
spin_lock(&phba->scsi_buf_list_lock);
spin_lock_irq(&phba->scsi_buf_list_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
spin_unlock(&phba->scsi_buf_list_lock);
spin_unlock_irq(&phba->scsi_buf_list_lock);
return 0;
@ -4003,6 +4002,52 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
}
/**
* lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
* @vport: pointer to lpfc hba data structure.
*
* This routine is to perform FCF recovery when the in-use FCF either dead or
* got modified.
**/
static void
lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
struct lpfc_acqe_fip *acqe_fip)
{
int rc;
spin_lock_irq(&phba->hbalock);
/* Mark the fast failover process in progress */
phba->fcf.fcf_flag |= FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2771 Start FCF fast failover process due to in-use "
"FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
acqe_fip->event_tag, acqe_fip->index);
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2772 Issue FCF rediscover mabilbox command "
"failed, fail through to FCF dead event\n");
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock);
/*
* Last resort will fail over by treating this as a link
* down to FCF registration.
*/
lpfc_sli4_fcf_dead_failthrough(phba);
} else {
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
/*
* Handling fast FCF failover to a DEAD FCF event is
* considered equalivant to receiving CVL to all vports.
*/
lpfc_sli4_perform_all_vport_cvl(phba);
}
}
/**
* lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
* @phba: pointer to lpfc hba data structure.
@ -4068,9 +4113,22 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
}
/* If the FCF has been in discovered state, do nothing. */
if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
/* If FCF has been in discovered state, perform rediscovery
* only if the FCF with the same index of the in-use FCF got
* modified during normal operation. Otherwise, do nothing.
*/
if (phba->pport->port_state > LPFC_FLOGI) {
spin_unlock_irq(&phba->hbalock);
if (phba->fcf.current_rec.fcf_indx ==
acqe_fip->index) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"3300 In-use FCF (%d) "
"modified, perform FCF "
"rediscovery\n",
acqe_fip->index);
lpfc_sli4_perform_inuse_fcf_recovery(phba,
acqe_fip);
}
break;
}
spin_unlock_irq(&phba->hbalock);
@ -4123,39 +4181,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* is no longer valid as we are not in the middle of FCF
* failover process already.
*/
spin_lock_irq(&phba->hbalock);
/* Mark the fast failover process in progress */
phba->fcf.fcf_flag |= FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2771 Start FCF fast failover process due to "
"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
"\n", acqe_fip->event_tag, acqe_fip->index);
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
"2772 Issue FCF rediscover mabilbox "
"command failed, fail through to FCF "
"dead event\n");
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock);
/*
* Last resort will fail over by treating this
* as a link down to FCF registration.
*/
lpfc_sli4_fcf_dead_failthrough(phba);
} else {
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
/*
* Handling fast FCF failover to a DEAD FCF event is
* considered equalivant to receiving CVL to all vports.
*/
lpfc_sli4_perform_all_vport_cvl(phba);
}
lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
break;
case LPFC_FIP_EVENT_TYPE_CVL:
phba->fcoe_cvl_eventtag = acqe_fip->event_tag;

Просмотреть файл

@ -2126,32 +2126,40 @@ void
lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
{
struct lpfc_mbx_reg_vfi *reg_vfi;
struct lpfc_hba *phba = vport->phba;
memset(mbox, 0, sizeof(*mbox));
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
bf_set(lpfc_reg_vfi_vfi, reg_vfi,
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
phba->sli4_hba.vfi_ids[vport->vfi]);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
reg_vfi->e_d_tov = vport->phba->fc_edtov;
reg_vfi->r_a_tov = vport->phba->fc_ratov;
reg_vfi->e_d_tov = phba->fc_edtov;
reg_vfi->r_a_tov = phba->fc_ratov;
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
/* Only FC supports upd bit */
if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
(vport->fc_flag & FC_VFI_REGISTERED)) {
bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
"3134 Register VFI, mydid:x%x, fcfi:%d, "
" vfi:%d, vpi:%d, fc_pname:%x%x\n",
vport->fc_myDID,
vport->phba->fcf.fcfi,
vport->phba->sli4_hba.vfi_ids[vport->vfi],
vport->phba->vpi_ids[vport->vpi],
phba->fcf.fcfi,
phba->sli4_hba.vfi_ids[vport->vfi],
phba->vpi_ids[vport->vpi],
reg_vfi->wwn[0], reg_vfi->wwn[1]);
}

Просмотреть файл

@ -226,7 +226,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
/* It matches, so deque and call compl with anp error */
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
}

Просмотреть файл

@ -732,7 +732,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
psb->exch_busy = 0;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (pring->txq_cnt)
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
return;
@ -885,9 +885,9 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
int num_posted, rc = 0;
/* get all SCSI buffers need to repost to a local list */
spin_lock(&phba->scsi_buf_list_lock);
spin_lock_irq(&phba->scsi_buf_list_lock);
list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
spin_unlock(&phba->scsi_buf_list_lock);
spin_unlock_irq(&phba->scsi_buf_list_lock);
/* post the list of scsi buffer sgls to port if available */
if (!list_empty(&post_sblist)) {
@ -4246,7 +4246,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
unsigned long poll_tmo_expires =
(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
mod_timer(&phba->fcp_poll_timer,
poll_tmo_expires);
}

Просмотреть файл

@ -873,14 +873,16 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
xritag, rxid, ndlp->nlp_DID, send_rrq);
return -EINVAL;
}
rrq->send_rrq = send_rrq;
if (phba->cfg_enable_rrq == 1)
rrq->send_rrq = send_rrq;
else
rrq->send_rrq = 0;
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
rrq->send_rrq = send_rrq;
spin_lock_irqsave(&phba->hbalock, iflags);
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
@ -1009,6 +1011,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
/*
** This should have been removed from the txcmplq before calling
** iocbq_release. The normal completion
** path should have already done the list_del_init.
*/
if (unlikely(!list_empty(&iocbq->list))) {
if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_del_init(&iocbq->list);
}
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@ -1025,7 +1039,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
&phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
}
}
@ -1056,6 +1070,14 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
/*
** This should have been removed from the txcmplq before calling
** iocbq_release. The normal completion
** path should have already done the list_del_init.
*/
if (unlikely(!list_empty(&iocbq->list)))
list_del_init(&iocbq->list);
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@ -1122,7 +1144,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
if (!piocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, piocb);
else {
@ -1310,9 +1331,6 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt++;
if (pring->txcmplq_cnt > pring->txcmplq_max)
pring->txcmplq_max = pring->txcmplq_cnt;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@ -1344,8 +1362,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
struct lpfc_iocbq *cmd_iocb;
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
if (cmd_iocb != NULL)
pring->txq_cnt--;
return cmd_iocb;
}
@ -1614,8 +1630,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* (c) link attention events can be processed (fcp ring only)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
if (pring->txq_cnt &&
lpfc_is_link_up(phba) &&
if (lpfc_is_link_up(phba) &&
(!list_empty(&pring->txq)) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA)) {
@ -2612,7 +2629,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
pring->txcmplq_cnt--;
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
}
return cmd_iocb;
@ -2650,7 +2666,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
return cmd_iocb;
}
}
@ -3499,7 +3514,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
*/
spin_lock_irq(&phba->hbalock);
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
@ -3536,11 +3550,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* Retrieve everything on txq */
list_splice_init(&pring->txq, &txq);
pring->txq_cnt = 0;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txcmplq_cnt = 0;
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
@ -5988,9 +6000,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(post_sgl_list);
LIST_HEAD(free_sgl_list);
spin_lock(&phba->hbalock);
spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
spin_unlock(&phba->hbalock);
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
&allc_sgl_list, list) {
@ -6091,10 +6103,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* push els sgls posted to the availble list */
if (!list_empty(&post_sgl_list)) {
spin_lock(&phba->hbalock);
spin_lock_irq(&phba->hbalock);
list_splice_init(&post_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
spin_unlock(&phba->hbalock);
spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3161 Failure to post els sgl to port.\n");
@ -7615,7 +7627,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
pring->txq_cnt++;
}
/**
@ -8387,7 +8398,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
if (pring->txq_cnt) {
if (!list_empty(&pring->txq)) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
pring, piocb);
@ -9055,7 +9066,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
/* Next issue ABTS for everything on the txcmplq */
@ -9124,8 +9134,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
* given to the FW yet.
*/
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
@ -9966,6 +9974,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
struct lpfc_iocbq *iocb;
int txq_cnt = 0;
int txcmplq_cnt = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/*
* If the caller has provided a response iocbq buffer, then context2
@ -10013,9 +10024,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = IOCB_TIMEDOUT;
}
} else if (retval == IOCB_BUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
if (phba->cfg_log_verbose & LOG_SLI) {
list_for_each_entry(iocb, &pring->txq, list) {
txq_cnt++;
}
list_for_each_entry(iocb, &pring->txcmplq, list) {
txcmplq_cnt++;
}
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
phba->iocb_cnt, txq_cnt, txcmplq_cnt);
}
return retval;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@ -11298,16 +11317,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0;
int txcmplq_cnt = 0;
int fcp_txcmplq_cnt = 0;
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
if (!list_empty(&pring->txq))
txq_cnt++;
if (!list_empty(&pring->txcmplq))
txcmplq_cnt++;
if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
fcp_txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
pring->txq_cnt, phba->iocb_cnt,
phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
txq_cnt, phba->iocb_cnt,
fcp_txcmplq_cnt,
txcmplq_cnt);
return false;
}
@ -15482,11 +15510,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
LPFC_SLI4_FCF_TBL_INDX_MAX);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"3060 Last IDX %d\n", last_index);
if (list_empty(&phba->fcf.fcf_pri_list)) {
/* Verify the priority list has 2 or more entries */
spin_lock_irq(&phba->hbalock);
if (list_empty(&phba->fcf.fcf_pri_list) ||
list_is_singular(&phba->fcf.fcf_pri_list)) {
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"3061 Last IDX %d\n", last_index);
return 0; /* Empty rr list */
}
spin_unlock_irq(&phba->hbalock);
next_fcf_pri = 0;
/*
* Clear the rr_bmask and set all of the bits that are at this
@ -16245,14 +16280,19 @@ lpfc_drain_txq(struct lpfc_hba *phba)
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
int txq_cnt = 0;
spin_lock_irqsave(&phba->hbalock, iflags);
if (pring->txq_cnt > pring->txq_max)
pring->txq_max = pring->txq_cnt;
list_for_each_entry(piocbq, &pring->txq, list) {
txq_cnt++;
}
if (txq_cnt > pring->txq_max)
pring->txq_max = txq_cnt;
spin_unlock_irqrestore(&phba->hbalock, iflags);
while (pring->txq_cnt) {
while (!list_empty(&pring->txq)) {
spin_lock_irqsave(&phba->hbalock, iflags);
piocbq = lpfc_sli_ringtx_get(phba, pring);
@ -16260,7 +16300,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
pring->txq_cnt);
txq_cnt);
break;
}
sglq = __lpfc_sli_get_sglq(phba, piocbq);
@ -16269,6 +16309,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, iflags);
break;
}
txq_cnt--;
/* The xri and iocb resources secured,
* attempt to issue request
@ -16300,5 +16341,5 @@ lpfc_drain_txq(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
return pring->txq_cnt;
return txq_cnt;
}

Просмотреть файл

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.37"
#define LPFC_DRIVER_VERSION "8.3.38"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

Просмотреть файл

@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
qla_nx.o qla_target.o
qla_nx.o qla_mr.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o

Просмотреть файл

@ -888,7 +888,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
uint32_t sn;
if (IS_FWI2_CAPABLE(ha)) {
if (IS_QLAFX00(vha->hw)) {
return snprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.serial_num);
} else if (IS_FWI2_CAPABLE(ha)) {
qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", buf);
}
@ -912,6 +915,11 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
return snprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.hw_version);
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
@ -922,6 +930,11 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (IS_QLAFX00(vha->hw))
return snprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.product_name);
return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
}
@ -1304,6 +1317,12 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
uint32_t pstate;
if (IS_QLAFX00(vha->hw)) {
pstate = qlafx00_fw_state_show(dev, attr, buf);
return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
if (qla2x00_reset_active(vha))
ql_log(ql_log_warn, vha, 0x707c,
@ -1454,6 +1473,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
(shost_priv(shost)))->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
if (IS_QLAFX00(ha)) {
qlafx00_get_host_speed(shost);
return;
}
switch (ha->link_data_rate) {
case PORT_SPEED_1GB:
speed = FC_PORTSPEED_1GBIT;
@ -1637,6 +1661,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
if (IS_QLAFX00(vha->hw))
return 0;
qla2x00_loop_reset(vha);
return 0;
}
@ -1655,6 +1682,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (IS_QLAFX00(vha->hw))
goto done;
if (test_bit(UNLOADING, &vha->dpc_flags))
goto done;
@ -2087,6 +2117,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
FC_PORTSPEED_1GBIT;
else if (IS_QLA23XX(ha))
speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else if (IS_QLAFX00(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;

Просмотреть файл

@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (sp->type == SRB_FXIOCB_BCMD) {
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
}
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
qla2x00_rel_sp(vha, sp);
@ -751,6 +768,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.transfer_size = req_data_len;
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
elreq.iteration_count =
bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
@ -1882,6 +1901,128 @@ done:
return 0;
}
static int
qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
{
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
srb_t *sp;
int req_sg_cnt = 0, rsp_sg_cnt = 0;
struct fc_port *fcport;
char *type = "FC_BSG_HST_FX_MGMT";
/* Copy the IOCB specific information */
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
(uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
if (!vha->flags.online) {
ql_log(ql_log_warn, vha, 0x70d0,
"Host is not online.\n");
rval = -EIO;
goto done;
}
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
req_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
ql_log(ql_log_warn, vha, 0x70c7,
"dma_map_sg return %d for request\n", req_sg_cnt);
rval = -ENOMEM;
goto done;
}
}
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
ql_log(ql_log_warn, vha, 0x70c8,
"dma_map_sg return %d for reply\n", rsp_sg_cnt);
rval = -ENOMEM;
goto done_unmap_req_sg;
}
}
ql_dbg(ql_dbg_user, vha, 0x70c9,
"request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
"dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
/* Allocate a dummy fcport structure, since functions preparing the
* IOCB and mailbox command retrieves port specific information
* from fcport structure. For Host based ELS commands there will be
* no fcport structure allocated
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_log(ql_log_warn, vha, 0x70ca,
"Failed to allocate fcport.\n");
rval = -ENOMEM;
goto done_unmap_rsp_sg;
}
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_warn, vha, 0x70cb,
"qla2x00_get_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->loop_id = piocb_rqst->dataword;
sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt";
sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x70cc,
"bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
type, piocb_rqst->func_type, fcport->loop_id);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x70cd,
"qla2x00_start_sp failed=%d.\n", rval);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
}
return rval;
done_free_fcport:
kfree(fcport);
done_unmap_rsp_sg:
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done_unmap_req_sg:
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
done:
return rval;
}
static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
@ -1928,6 +2069,8 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_DIAG_IO_CMD:
return qla24xx_process_bidir_cmd(bsg_job);
case QL_VND_FX00_MGMT_CMD:
return qlafx00_mgmt_cmd(bsg_job);
default:
return -ENOSYS;
}
@ -2007,7 +2150,8 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
(sp->type == SRB_ELS_CMD_HST))
(sp->type == SRB_ELS_CMD_HST) ||
(sp->type == SRB_FXIOCB_BCMD))
&& (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {

Просмотреть файл

@ -22,6 +22,7 @@
#define QL_VND_DIAG_IO_CMD 0x0A
#define QL_VND_WRITE_I2C 0x10
#define QL_VND_READ_I2C 0x11
#define QL_VND_FX00_MGMT_CMD 0x12
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0

Просмотреть файл

@ -11,28 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x1179 | 0x111a-0x111b |
* | | | 0x1155-0x1158 |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2016 |
* | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4022 | 0x4002,0x4013 |
* | Async Events | 0x5081 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | | | 0x5040,0x5075 |
* | Timer Routines | 0x6011 | |
* | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
* | User Space Interactions | 0x70dd | 0x7018,0x702e, |
* | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
* | | | 0x707b,0x708c, |
* | | | 0x70a5,0x70a6, |
* | | | 0x70a8,0x70ab, |
* | | | 0x70ad-0x70ae |
* | | | 0x70ad-0x70ae, |
* | | | 0x70d1-0x70da |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |

Просмотреть файл

@ -245,7 +245,6 @@
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
/*
* Timeout timer counts in seconds
*/
@ -265,6 +264,7 @@
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
struct req_que;
@ -284,6 +284,7 @@ struct sd_dif_tuple {
struct srb_cmd {
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
uint32_t request_sense_length;
uint32_t fw_sense_length;
uint8_t *request_sense_ptr;
void *ctx;
};
@ -321,7 +322,39 @@ struct srb_iocb {
uint32_t flags;
uint32_t lun;
uint32_t data;
struct completion comp;
uint32_t comp_status;
} tmf;
struct {
#define SRB_FXDISC_REQ_DMA_VALID BIT_0
#define SRB_FXDISC_RESP_DMA_VALID BIT_1
#define SRB_FXDISC_REQ_DWRD_VALID BIT_2
#define SRB_FXDISC_RSP_DWRD_VALID BIT_3
#define FXDISC_TIMEOUT 20
uint8_t flags;
uint32_t req_len;
uint32_t rsp_len;
void *req_addr;
void *rsp_addr;
dma_addr_t req_dma_handle;
dma_addr_t rsp_dma_handle;
uint32_t adapter_id;
uint32_t adapter_id_hi;
uint32_t req_func_type;
uint32_t req_data;
uint32_t req_data_extra;
uint32_t result;
uint32_t seq_number;
uint32_t fw_flags;
struct completion fxiocb_comp;
uint32_t reserved_0;
uint8_t reserved_1;
} fxiocb;
struct {
uint32_t cmd_hndl;
uint32_t comp_status;
struct completion comp;
} abt;
} u;
struct timer_list timer;
@ -338,6 +371,10 @@ struct srb_iocb {
#define SRB_TM_CMD 7
#define SRB_SCSI_CMD 8
#define SRB_BIDI_CMD 9
#define SRB_FXIOCB_DCMD 10
#define SRB_FXIOCB_BCMD 11
#define SRB_ABT_CMD 12
typedef struct srb {
atomic_t ref_count;
@ -368,6 +405,10 @@ typedef struct srb {
(sp->u.scmd.request_sense_ptr)
#define SET_CMD_SENSE_PTR(sp, ptr) \
(sp->u.scmd.request_sense_ptr = ptr)
#define GET_FW_SENSE_LEN(sp) \
(sp->u.scmd.fw_sense_length)
#define SET_FW_SENSE_LEN(sp, len) \
(sp->u.scmd.fw_sense_length = len)
struct msg_echo_lb {
dma_addr_t send_dma;
@ -376,6 +417,7 @@ struct msg_echo_lb {
uint16_t rsp_sg_cnt;
uint16_t options;
uint32_t transfer_size;
uint32_t iteration_count;
};
/*
@ -542,11 +584,74 @@ struct device_reg_25xxmq {
uint32_t atio_q_out;
};
struct device_reg_fx00 {
uint32_t mailbox0; /* 00 */
uint32_t mailbox1; /* 04 */
uint32_t mailbox2; /* 08 */
uint32_t mailbox3; /* 0C */
uint32_t mailbox4; /* 10 */
uint32_t mailbox5; /* 14 */
uint32_t mailbox6; /* 18 */
uint32_t mailbox7; /* 1C */
uint32_t mailbox8; /* 20 */
uint32_t mailbox9; /* 24 */
uint32_t mailbox10; /* 28 */
uint32_t mailbox11;
uint32_t mailbox12;
uint32_t mailbox13;
uint32_t mailbox14;
uint32_t mailbox15;
uint32_t mailbox16;
uint32_t mailbox17;
uint32_t mailbox18;
uint32_t mailbox19;
uint32_t mailbox20;
uint32_t mailbox21;
uint32_t mailbox22;
uint32_t mailbox23;
uint32_t mailbox24;
uint32_t mailbox25;
uint32_t mailbox26;
uint32_t mailbox27;
uint32_t mailbox28;
uint32_t mailbox29;
uint32_t mailbox30;
uint32_t mailbox31;
uint32_t aenmailbox0;
uint32_t aenmailbox1;
uint32_t aenmailbox2;
uint32_t aenmailbox3;
uint32_t aenmailbox4;
uint32_t aenmailbox5;
uint32_t aenmailbox6;
uint32_t aenmailbox7;
/* Request Queue. */
uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
/* Response Queue. */
uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
/* Init values shadowed on FW Up Event */
uint32_t initval0; /* B0 */
uint32_t initval1; /* B4 */
uint32_t initval2; /* B8 */
uint32_t initval3; /* BC */
uint32_t initval4; /* C0 */
uint32_t initval5; /* C4 */
uint32_t initval6; /* C8 */
uint32_t initval7; /* CC */
uint32_t fwheartbeat; /* D0 */
};
typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
struct device_reg_25xxmq isp25mq;
struct device_reg_82xx isp82;
struct device_reg_fx00 ispfx00;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
@ -602,6 +707,20 @@ typedef struct {
#define IOCTL_CMD BIT_2
} mbx_cmd_t;
struct mbx_cmd_32 {
uint32_t out_mb; /* outbound from driver */
uint32_t in_mb; /* Incoming from RISC */
uint32_t mb[MAILBOX_REGISTER_COUNT];
long buf_size;
void *bufp;
uint32_t tov;
uint8_t flags;
#define MBX_DMA_IN BIT_0
#define MBX_DMA_OUT BIT_1
#define IOCTL_CMD BIT_2
};
#define MBX_TOV_SECONDS 30
/*
@ -677,6 +796,15 @@ typedef struct {
#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */
#define MBA_FW_STARTING 0x8051 /* Firmware starting */
#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@ -797,6 +925,12 @@ typedef struct {
#define MBC_SEND_LFA_COMMAND 0x7D /* Send Loop Fabric Address */
#define MBC_LUN_RESET 0x7E /* Send LUN reset */
/*
* all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
* should be defined with MBC_MR_*
*/
#define MBC_MR_DRV_SHUTDOWN 0x6A
/*
* ISP24xx mailbox commands
*/
@ -1058,6 +1192,30 @@ typedef struct {
uint8_t reserved_3[26];
} init_cb_t;
struct init_cb_fx {
uint16_t version;
uint16_t reserved_1[13];
uint16_t request_q_outpointer;
uint16_t response_q_inpointer;
uint16_t reserved_2[2];
uint16_t response_q_length;
uint16_t request_q_length;
uint16_t reserved_3[2];
uint32_t request_q_address[2];
uint32_t response_q_address[2];
uint16_t reserved_4[4];
uint8_t response_q_msivec;
uint8_t reserved_5[19];
uint16_t interrupt_delay_timer;
uint16_t reserved_6;
uint32_t fwoptions1;
uint32_t fwoptions2;
uint32_t fwoptions3;
uint8_t reserved_7[24];
};
/*
* Get Link Status mailbox command return buffer.
*/
@ -1831,6 +1989,9 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
uint16_t tgt_id;
uint16_t old_tgt_id;
uint8_t fcp_prio;
uint8_t fabric_port_name[WWN_SIZE];
@ -1848,8 +2009,15 @@ typedef struct fc_port {
uint8_t fc4_type;
uint8_t scan_state;
unsigned long last_queue_full;
unsigned long last_ramp_up;
uint16_t port_id;
} fc_port_t;
#include "qla_mr.h"
/*
* Fibre channel port/lun states.
*/
@ -2391,6 +2559,7 @@ struct isp_operations {
int (*start_scsi) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
int (*initialize_adapter)(struct scsi_qla_host *);
};
/* MSI-X Support *************************************************************/
@ -2429,6 +2598,7 @@ enum qla_work_type {
QLA_EVT_ASYNC_ADISC,
QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
};
@ -2456,7 +2626,15 @@ struct qla_work_evt {
u32 code;
#define QLA_UEVENT_CODE_FW_DUMP 0
} uevent;
} u;
struct {
uint32_t evtcode;
uint32_t mbx[8];
uint32_t count;
} aenfx;
struct {
srb_t *sp;
} iosb;
} u;
};
struct qla_chip_state_84xx {
@ -2520,6 +2698,11 @@ struct rsp_que {
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
struct work_struct q_work;
dma_addr_t dma_fx00;
response_t *ring_fx00;
uint16_t length_fx00;
uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
};
/* Request queue data structure */
@ -2544,6 +2727,11 @@ struct req_que {
uint16_t num_outstanding_cmds;
#define MAX_Q_DEPTH 32
int max_q_depth;
dma_addr_t dma_fx00;
request_t *ring_fx00;
uint16_t length_fx00;
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
/* Place holder for FW buffer parameters */
@ -2633,7 +2821,10 @@ struct qla_hw_data {
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
uint32_t idc_compl_status:1;
/* 32 bits */
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
/* 34 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@ -2650,7 +2841,21 @@ struct qla_hw_data {
resource_size_t pio_address;
#define MIN_IOBASE_LEN 0x100
/* Multi queue data structs */
dma_addr_t bar0_hdl;
void __iomem *cregbase;
dma_addr_t bar2_hdl;
#define BAR0_LEN_FX00 (1024 * 1024)
#define BAR2_LEN_FX00 (128 * 1024)
uint32_t rqstq_intr_code;
uint32_t mbx_intr_code;
uint32_t req_que_len;
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
/* Multi queue data structs */
device_reg_t __iomem *mqiobase;
device_reg_t __iomem *msixbase;
uint16_t msix_count;
@ -2729,7 +2934,8 @@ struct qla_hw_data {
#define DT_ISP8021 BIT_14
#define DT_ISP2031 BIT_15
#define DT_ISP8031 BIT_16
#define DT_ISP_LAST (DT_ISP8031 << 1)
#define DT_ISPFX00 BIT_17
#define DT_ISP_LAST (DT_ISPFX00 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@ -2757,6 +2963,7 @@ struct qla_hw_data {
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@ -2821,6 +3028,7 @@ struct qla_hw_data {
uint16_t r_a_tov;
int port_down_retry_count;
uint8_t mbx_count;
uint8_t aen_mbx_count;
uint32_t login_retry_count;
/* SNS command interfaces. */
@ -2868,9 +3076,13 @@ struct qla_hw_data {
void *swl;
/* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
mbx_cmd_t *mcp;
struct mbx_cmd_32 *mcp32;
unsigned long mbx_cmd_flags;
#define MBX_INTERRUPT 1
#define MBX_INTR_WAIT 2
@ -3014,6 +3226,7 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@ -3080,6 +3293,8 @@ struct qla_hw_data {
unsigned long host_last_rampup_time;
int cfg_lun_q_depth;
struct mr_data_fx00 mr;
struct qlt_hw_data tgt;
uint16_t thermal_support;
#define THERMAL_SUPPORT_I2C BIT_0
@ -3109,6 +3324,8 @@ typedef struct scsi_qla_host {
uint32_t process_response_queue :1;
uint32_t difdix_supported:1;
uint32_t delete_progress:1;
uint32_t fw_tgt_reported:1;
} flags;
atomic_t loop_state;
@ -3144,6 +3361,9 @@ typedef struct scsi_qla_host {
#define SCR_PENDING 21 /* SCR in target mode */
#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
#define HOST_RAMP_UP_QUEUE_DEPTH 23
#define PORT_UPDATE_NEEDED 24
#define FX00_RESET_RECOVERY 25
#define FX00_TARGET_SCAN 26
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@ -3234,6 +3454,10 @@ struct qla_tgt_vp_map {
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
#define STATE_TRANSITION(ha) \
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
atomic_inc(&__vha->vref_count); \
mb(); \

Просмотреть файл

@ -86,6 +86,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
/*
* Global Data in qla_os.c source file.
@ -134,7 +135,6 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@ -158,6 +158,7 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
/*
* Global Functions in qla_mid.c source file.
@ -211,8 +212,6 @@ extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
@ -424,6 +423,12 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
extern srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
void *);
extern void
qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
/*
* Global Function Prototypes in qla_sup.c source file.
@ -561,6 +566,42 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
extern void qlafx00_soft_reset(scsi_qla_host_t *);
extern int qlafx00_chip_diag(scsi_qla_host_t *);
extern void qlafx00_config_rings(struct scsi_qla_host *);
extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
extern int qlafx00_abort_command(srb_t *);
extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
extern int qlafx00_start_scsi(srb_t *);
extern int qlafx00_abort_isp(scsi_qla_host_t *);
extern int qlafx00_iospace_config(struct qla_hw_data *);
extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *,
struct device_attribute *, char *);
extern void qlafx00_get_host_speed(struct Scsi_Host *);
extern void qlafx00_init_response_q_entries(struct rsp_que *);
extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
extern void qlafx00_timer_routine(scsi_qla_host_t *);
extern int qlafx00_rescan_isp(scsi_qla_host_t *);
/* qla82xx related functions */
/* PCI related functions */

Просмотреть файл

@ -639,9 +639,14 @@ void
qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
{
struct qla_hw_data *ha = vha->hw;
sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
if (IS_QLAFX00(ha))
sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
ha->mr.fw_version, qla2x00_version_str);
else
sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
}
/**
@ -923,7 +928,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sns_cmd->p.gpn_data[9] != 0x02) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
"GPN_ID failed, rejected request, gpn_rsp:\n");
ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
sns_cmd->p.gpn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
@ -1718,7 +1723,8 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
return QLA_FUNCTION_FAILED;
rval = qla2x00_mgmt_svr_login(vha);

Просмотреть файл

@ -25,7 +25,6 @@
*/
static int qla2x00_isp_firmware(scsi_qla_host_t *);
static int qla2x00_setup_chip(scsi_qla_host_t *);
static int qla2x00_init_rings(scsi_qla_host_t *);
static int qla2x00_fw_ready(scsi_qla_host_t *);
static int qla2x00_configure_hba(scsi_qla_host_t *);
static int qla2x00_configure_loop(scsi_qla_host_t *);
@ -83,7 +82,9 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
/* Firmware should use switch negotiated r_a_tov for timeout. */
tmo = ha->r_a_tov / 10 * 2;
if (!IS_FWI2_CAPABLE(ha)) {
if (IS_QLAFX00(ha)) {
tmo = FX00_DEF_RATOV * 2;
} else if (!IS_FWI2_CAPABLE(ha)) {
/*
* Except for earlier ISPs where the timeout is seeded from the
* initialization control block.
@ -1977,7 +1978,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
*
* Returns 0 on success.
*/
static int
int
qla2x00_init_rings(scsi_qla_host_t *vha)
{
int rval;
@ -2012,7 +2013,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (!rsp)
continue;
/* Initialize response queue entries */
qla2x00_init_response_q_entries(rsp);
if (IS_QLAFX00(ha))
qlafx00_init_response_q_entries(rsp);
else
qla2x00_init_response_q_entries(rsp);
}
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
@ -2024,11 +2028,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (IS_QLAFX00(ha)) {
rval = qlafx00_init_firmware(vha, ha->init_cb_size);
goto next_check;
}
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@ -2042,6 +2051,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
}
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
@ -2069,6 +2079,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
uint16_t state[5];
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
return qlafx00_fw_ready(vha);
rval = QLA_SUCCESS;
/* 20 seconds for loop down. */
@ -3134,6 +3147,12 @@ void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
fcport->vha = vha;
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_reg_remote_port(vha, fcport);
return;
}
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@ -3894,15 +3913,24 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0,
MK_SYNC_ALL);
vha->marker_needed = 0;
if (!IS_QLAFX00(vha->hw)) {
/*
* Issue a marker after FW becomes
* ready.
*/
qla2x00_marker(vha, req, rsp, 0, 0,
MK_SYNC_ALL);
vha->marker_needed = 0;
}
/* Remap devices on Loop. */
clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
qla2x00_configure_loop(vha);
if (IS_QLAFX00(vha->hw))
qlafx00_configure_devices(vha);
else
qla2x00_configure_loop(vha);
wait_time--;
} while (!atomic_read(&vha->loop_down_timer) &&
!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
@ -3968,9 +3996,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
if (fcport->drport &&
atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_rport_del(fcport);
spin_lock_irqsave(&ha->vport_slock, flags);
}
}

Просмотреть файл

@ -5,6 +5,28 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
static inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 1) {
iocbs += (dsds - 1) / 5;
if ((dsds - 1) % 5)
iocbs++;
}
return iocbs;
}
/*
* qla2x00_debounce_register
* Debounce register.
@ -57,6 +79,17 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
return fcp;
}
static inline void
host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
{
uint32_t *isrc = (uint32_t *) src;
uint32_t *odest = (uint32_t *) dst;
uint32_t iter = bsize >> 2;
for (; iter ; iter--)
*odest++ = cpu_to_le32(*isrc++);
}
static inline void
qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
{
@ -213,12 +246,18 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
(sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
}
static inline int
qla2x00_gid_list_size(struct qla_hw_data *ha)
{
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
if (IS_QLAFX00(ha))
return sizeof(uint32_t) * 32;
else
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
static inline void

Просмотреть файл

@ -135,7 +135,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
__constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
__constant_cpu_to_le32(CONTINUE_A64_TYPE);
return (cont_pkt);
@ -486,6 +487,10 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
if (ha->mqenable || IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@ -514,11 +519,12 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
uint16_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
struct mrk_entry_24xx *mrk24 = NULL;
struct mrk_entry_fx00 *mrkfx = NULL;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
req = ha->req_q_map[0];
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
if (mrk == NULL) {
@ -531,7 +537,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_FWI2_CAPABLE(ha)) {
if (IS_QLAFX00(ha)) {
mrkfx = (struct mrk_entry_fx00 *) mrk;
mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
mrkfx->handle_hi = 0;
mrkfx->tgt_id = cpu_to_le16(loop_id);
mrkfx->lun[1] = LSB(lun);
mrkfx->lun[2] = MSB(lun);
host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
} else if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
@ -589,28 +603,6 @@ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
return QLA_SUCCESS;
}
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 1) {
iocbs += (dsds - 1) / 5;
if ((dsds - 1) % 5)
iocbs++;
}
return iocbs;
}
static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
@ -1583,7 +1575,6 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
/**
* qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
@ -1852,6 +1843,8 @@ skip_cmd_array:
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha))
cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
@ -1869,8 +1862,13 @@ skip_cmd_array:
req->cnt -= req_cnt;
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt->entry_count = req_cnt;
pkt->handle = handle;
if (IS_QLAFX00(ha)) {
WRT_REG_BYTE(&pkt->entry_count, req_cnt);
WRT_REG_WORD(&pkt->handle, handle);
} else {
pkt->entry_count = req_cnt;
pkt->handle = handle;
}
queuing_error:
return pkt;
@ -2625,7 +2623,16 @@ qla2x00_start_sp(srb_t *sp)
qla2x00_adisc_iocb(sp, pkt);
break;
case SRB_TM_CMD:
qla24xx_tm_iocb(sp, pkt);
IS_QLAFX00(ha) ?
qlafx00_tm_iocb(sp, pkt) :
qla24xx_tm_iocb(sp, pkt);
break;
case SRB_FXIOCB_DCMD:
case SRB_FXIOCB_BCMD:
qlafx00_fxdisc_iocb(sp, pkt);
break;
case SRB_ABT_CMD:
qlafx00_abort_iocb(sp, pkt);
break;
default:
break;

Просмотреть файл

@ -13,11 +13,7 @@
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
#include "qla_target.h"
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
@ -1065,9 +1061,9 @@ skip_rio:
* @ha: SCSI driver HA context
* @index: SRB index
*/
static void
void
qla2x00_process_completed_request(struct scsi_qla_host *vha,
struct req_que *req, uint32_t index)
struct req_que *req, uint32_t index)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@ -1101,7 +1097,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
static srb_t *
srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
struct req_que *req, void *iocb)
{
@ -1994,7 +1990,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
lscsi_status = scsi_status & STATUS_MASK;
lscsi_status = scsi_status & STATUS_MASK;
fcport = sp->fcport;
@ -2939,7 +2935,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@ -2972,7 +2968,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_QLA8001(ha) && !IS_QLA82XX(ha))
!IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
@ -2998,9 +2994,11 @@ skip_msi:
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
} else if (!ha->flags.msi_enabled)
} else if (!ha->flags.msi_enabled) {
ql_dbg(ql_dbg_init, vha, 0x0125,
"INTa mode: Enabled.\n");
ha->flags.mr_intr_valid = 1;
}
clear_risc_ints:

Просмотреть файл

@ -4113,7 +4113,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
uint32_t iter_cnt = 0x1;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
"Entered %s.\n", __func__);
@ -4139,8 +4138,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
/* Iteration count */
mcp->mb[18] = LSW(iter_cnt);
mcp->mb[19] = MSW(iter_cnt);
mcp->mb[18] = LSW(mreq->iteration_count);
mcp->mb[19] = MSW(mreq->iteration_count);
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
@ -4518,7 +4517,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
goto done;
ql_log(ql_log_warn, vha, 0x10c9,
"Thermal not supported by I2C.\n");
"Thermal not supported through I2C bus, trying alternate "
"method (ISP access).\n");
ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
}
@ -4528,7 +4528,7 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
goto done;
ql_log(ql_log_warn, vha, 0x1019,
"Thermal not supported by ISP.\n");
"Thermal not supported through ISP.\n");
ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,510 @@
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_MR_H
#define __QLA_MR_H
/*
* The PCI VendorID and DeviceID for our board.
*/
#define PCI_DEVICE_ID_QLOGIC_ISPF001 0xF001
/* FX00 specific definitions */
#define FX00_COMMAND_TYPE_7 0x07 /* Command Type 7 entry for 7XXX */
struct cmd_type_7_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t handle_hi;
uint16_t tgt_idx; /* Target Idx. */
uint16_t timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */
uint16_t scsi_rsp_dsd_len;
struct scsi_lun lun; /* LUN (LE). */
uint8_t cntrl_flags;
uint8_t task_mgmt_flags; /* Task management flags. */
uint8_t task;
uint8_t crn;
uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
uint32_t dseg_0_address[2]; /* Data segment 0 address. */
uint32_t dseg_0_len; /* Data segment 0 length. */
};
/*
* ISP queue - marker entry structure definition.
*/
struct mrk_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t handle_count; /* Handle count. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t handle_hi; /* System handle. */
uint16_t tgt_id; /* Target ID. */
uint8_t modifier; /* Modifier (7-0). */
uint8_t reserved_1;
uint8_t reserved_2[5];
uint8_t lun[8]; /* FCP LUN (BE). */
uint8_t reserved_3[36];
};
#define STATUS_TYPE_FX00 0x01 /* Status entry. */
struct sts_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t handle_hi; /* System handle. */
uint16_t comp_status; /* Completion status. */
uint16_t reserved_0; /* OX_ID used by the firmware. */
uint32_t residual_len; /* FW calc residual transfer length. */
uint16_t reserved_1;
uint16_t state_flags; /* State flags. */
uint16_t reserved_2;
uint16_t scsi_status; /* SCSI status. */
uint32_t sense_len; /* FCP SENSE length. */
uint8_t data[32]; /* FCP response/sense information. */
};
#define MAX_HANDLE_COUNT 15
#define MULTI_STATUS_TYPE_FX00 0x0D
struct multi_sts_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t sys_define; /* System defined. */
uint8_t handle_count;
uint8_t entry_status;
uint32_t handles[MAX_HANDLE_COUNT];
};
#define TSK_MGMT_IOCB_TYPE_FX00 0x05
struct tsk_mgmt_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define;
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t handle_hi; /* System handle. */
uint16_t tgt_id; /* Target Idx. */
uint16_t reserved_1;
uint16_t delay; /* Activity delay in seconds. */
uint16_t timeout; /* Command timeout. */
struct scsi_lun lun; /* LUN (LE). */
uint32_t control_flags; /* Control Flags. */
uint8_t reserved_2[32];
};
#define ABORT_IOCB_TYPE_FX00 0x08 /* Abort IOCB status. */
struct abort_iocb_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t handle_hi; /* System handle. */
uint16_t tgt_id_sts; /* Completion status. */
uint16_t options;
uint32_t abort_handle; /* System handle. */
uint32_t abort_handle_hi; /* System handle. */
uint16_t req_que_no;
uint8_t reserved_1[38];
};
#define IOCTL_IOSB_TYPE_FX00 0x0C
struct ioctl_iocb_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t reserved_0; /* System handle. */
uint16_t comp_func_num;
uint16_t fw_iotcl_flags;
uint32_t dataword_r; /* Data word returned */
uint32_t adapid; /* Adapter ID */
uint32_t adapid_hi; /* Adapter ID high */
uint32_t reserved_1;
uint32_t seq_no;
uint8_t reserved_2[20];
uint32_t residuallen;
uint32_t status;
};
#define STATUS_CONT_TYPE_FX00 0x04
#define FX00_IOCB_TYPE 0x0B
struct fxdisc_entry_fx00 {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint32_t reserved_0; /* System handle. */
uint16_t func_num;
uint16_t req_xfrcnt;
uint16_t req_dsdcnt;
uint16_t rsp_xfrcnt;
uint16_t rsp_dsdcnt;
uint8_t flags;
uint8_t reserved_1;
uint32_t dseg_rq_address[2]; /* Data segment 0 address. */
uint32_t dseg_rq_len; /* Data segment 0 length. */
uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
uint32_t dseg_rsp_len; /* Data segment 1 length. */
uint32_t dataword;
uint32_t adapid;
uint32_t adapid_hi;
uint32_t dataword_extra;
};
struct qlafx00_tgt_node_info {
uint8_t tgt_node_wwpn[WWN_SIZE];
uint8_t tgt_node_wwnn[WWN_SIZE];
uint32_t tgt_node_state;
uint8_t reserved[128];
uint32_t reserved_1[8];
uint64_t reserved_2[4];
} __packed;
#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
#define QLAFX00_LINK_STATUS_DOWN 0x10
#define QLAFX00_LINK_STATUS_UP 0x11
#define QLAFX00_PORT_SPEED_2G 0x2
#define QLAFX00_PORT_SPEED_4G 0x4
#define QLAFX00_PORT_SPEED_8G 0x8
#define QLAFX00_PORT_SPEED_10G 0xa
struct port_info_data {
uint8_t port_state;
uint8_t port_type;
uint16_t port_identifier;
uint32_t up_port_state;
uint8_t fw_ver_num[32];
uint8_t portal_attrib;
uint16_t host_option;
uint8_t reset_delay;
uint8_t pdwn_retry_cnt;
uint16_t max_luns2tgt;
uint8_t risc_ver;
uint8_t pconn_option;
uint16_t risc_option;
uint16_t max_frame_len;
uint16_t max_iocb_alloc;
uint16_t exec_throttle;
uint8_t retry_cnt;
uint8_t retry_delay;
uint8_t port_name[8];
uint8_t port_id[3];
uint8_t link_status;
uint8_t plink_rate;
uint32_t link_config;
uint16_t adap_haddr;
uint8_t tgt_disc;
uint8_t log_tout;
uint8_t node_name[8];
uint16_t erisc_opt1;
uint8_t resp_acc_tmr;
uint8_t intr_del_tmr;
uint8_t erisc_opt2;
uint8_t alt_port_name[8];
uint8_t alt_node_name[8];
uint8_t link_down_tout;
uint8_t conn_type;
uint8_t fc_fw_mode;
uint32_t uiReserved[48];
} __packed;
/* OS Type Designations */
#define OS_TYPE_UNKNOWN 0
#define OS_TYPE_LINUX 2
/* Linux Info */
#define SYSNAME_LENGTH 128
#define NODENAME_LENGTH 64
#define RELEASE_LENGTH 64
#define VERSION_LENGTH 64
#define MACHINE_LENGTH 64
#define DOMNAME_LENGTH 64
struct host_system_info {
uint32_t os_type;
char sysname[SYSNAME_LENGTH];
char nodename[NODENAME_LENGTH];
char release[RELEASE_LENGTH];
char version[VERSION_LENGTH];
char machine[MACHINE_LENGTH];
char domainname[DOMNAME_LENGTH];
char hostdriver[VERSION_LENGTH];
uint32_t reserved[64];
} __packed;
struct register_host_info {
struct host_system_info hsi; /* host system info */
uint64_t utc; /* UTC (system time) */
uint32_t reserved[64]; /* future additions */
} __packed;
#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
uint8_t product_name[256];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
uint8_t hw_version[16];
uint8_t fw_version[16];
uint8_t uboot_version[16];
uint8_t fru_serial_num[32];
uint8_t fc_port_count;
uint8_t iscsi_port_count;
uint8_t reserved1[2];
uint8_t mode;
uint8_t log_level;
uint8_t reserved2[2];
uint32_t log_size;
uint8_t tgt_pres_mode;
uint8_t iqn_flags;
uint8_t lun_mapping;
uint64_t adapter_id;
uint32_t cluster_key_len;
uint8_t cluster_key[10];
uint64_t cluster_master_id;
uint64_t cluster_slave_id;
uint8_t cluster_flags;
} __packed;
#define FXDISC_GET_CONFIG_INFO 0x01
#define FXDISC_GET_PORT_INFO 0x02
#define FXDISC_GET_TGT_NODE_INFO 0x80
#define FXDISC_GET_TGT_NODE_LIST 0x81
#define FXDISC_REG_HOST_INFO 0x99
#define QLAFX00_HBA_ICNTRL_REG 0x21B08
#define QLAFX00_ICR_ENB_MASK 0x80000000
#define QLAFX00_ICR_DIS_MASK 0x7fffffff
#define QLAFX00_HST_RST_REG 0x18264
#define QLAFX00_HST_TO_HBA_REG 0x20A04
#define QLAFX00_HBA_TO_HOST_REG 0x21B70
#define QLAFX00_HST_INT_STS_BITS 0x7
#define QLAFX00_BAR1_BASE_ADDR_REG 0x40018
#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG 0x41824
#define QLAFX00_INTR_MB_CMPLT 0x1
#define QLAFX00_INTR_RSP_CMPLT 0x2
#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
#define QLAFX00_INTR_ASYNC_CMPLT 0x4
#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
#define QLAFX00_INTR_ALL_CMPLT 0x7
#define QLAFX00_MBA_SYSTEM_ERR 0x8002
#define QLAFX00_MBA_LINK_UP 0x8011
#define QLAFX00_MBA_LINK_DOWN 0x8012
#define QLAFX00_MBA_PORT_UPDATE 0x8014
#define QLAFX00_MBA_SHUTDOWN_RQSTD 0x8062
#define SOC_SW_RST_CONTROL_REG_CORE0 0x0020800
#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
#define SOC_FABRIC_CONTROL_REG 0x0020200
#define SOC_FABRIC_CONFIG_REG 0x0020204
#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
#define SOC_CORE_TIMER_REG 0x0021850
#define SOC_IRQ_ACK_REG 0x00218b4
#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
#define QLAFX00_SET_HST_INTR(ha, value) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
value)
#define QLAFX00_CLR_HST_INTR(ha, value) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_RD_INTR_REG(ha) \
RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
#define QLAFX00_CLR_INTR_REG(ha, value) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
WRT_REG_DWORD((ha)->cregbase + off, val)
#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
RD_REG_DWORD((ha)->cregbase + off)
#define QLAFX00_HBA_RST_REG(ha, val)\
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
#define QLAFX00_RD_ICNTRL_REG(ha) \
RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
QLAFX00_ICR_ENB_MASK))
#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
QLAFX00_ICR_DIS_MASK))
#define QLAFX00_RD_REG(ha, off) \
RD_REG_DWORD((ha)->cregbase + off)
#define QLAFX00_WR_REG(ha, off, val) \
WRT_REG_DWORD((ha)->cregbase + off, val)
struct qla_mt_iocb_rqst_fx00 {
uint32_t reserved_0;
uint16_t func_type;
uint8_t flags;
uint8_t reserved_1;
uint32_t dataword;
uint32_t adapid;
uint32_t adapid_hi;
uint32_t dataword_extra;
uint32_t req_len;
uint32_t rsp_len;
};
struct qla_mt_iocb_rsp_fx00 {
uint32_t reserved_1;
uint16_t func_type;
uint16_t ioctl_flags;
uint32_t ioctl_data;
uint32_t adapid;
uint32_t adapid_hi;
uint32_t reserved_2;
uint32_t seq_number;
uint8_t reserved_3[20];
int32_t res_count;
uint32_t status;
};
#define MAILBOX_REGISTER_COUNT_FX00 16
#define AEN_MAILBOX_REGISTER_COUNT_FX00 8
#define MAX_FIBRE_DEVICES_FX00 512
#define MAX_LUNS_FX00 0x1024
#define MAX_TARGETS_FX00 MAX_ISA_DEVICES
#define REQUEST_ENTRY_CNT_FX00 512 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
/*
* Firmware state codes for QLAFX00 adapters
*/
#define FSTATE_FX00_CONFIG_WAIT 0x0000 /* Waiting for driver to issue
* Initialize FW Mbox cmd
*/
#define FSTATE_FX00_INITIALIZED 0x1000 /* FW has been initialized by
* the driver
*/
#define FX00_DEF_RATOV 10
struct mr_data_fx00 {
uint8_t product_name[256];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
uint8_t hw_version[16];
uint8_t fw_version[16];
uint8_t uboot_version[16];
uint8_t fru_serial_num[32];
fc_port_t fcport; /* fcport used for requests
* that are not linked
* to a particular target
*/
uint8_t fw_hbt_en;
uint8_t fw_hbt_cnt;
uint8_t fw_hbt_miss_cnt;
uint32_t old_fw_hbt_cnt;
uint16_t fw_reset_timer_tick;
uint8_t fw_reset_timer_exp;
uint32_t old_aenmbx0_state;
};
#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
#endif

Просмотреть файл

@ -47,6 +47,7 @@ MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
"beginning. Default is 0 - class 2 not supported.");
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
@ -354,7 +355,12 @@ fail_req_map:
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
if (req && req->ring)
if (IS_QLAFX00(ha)) {
if (req && req->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
(req->length_fx00 + 1) * sizeof(request_t),
req->ring_fx00, req->dma_fx00);
} else if (req && req->ring)
dma_free_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
@ -368,11 +374,16 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
if (rsp && rsp->ring)
if (IS_QLAFX00(ha)) {
if (rsp && rsp->ring)
dma_free_coherent(&ha->pdev->dev,
(rsp->length_fx00 + 1) * sizeof(request_t),
rsp->ring_fx00, rsp->dma_fx00);
} else if (rsp && rsp->ring) {
dma_free_coherent(&ha->pdev->dev,
(rsp->length + 1) * sizeof(response_t),
rsp->ring, rsp->dma);
}
kfree(rsp);
rsp = NULL;
}
@ -633,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
static void
void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
@ -657,6 +668,9 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
cmd->scsi_done(cmd);
}
/* If we are SP1 here, we need to still take and release the host_lock as SP1
* does not have the changes necessary to avoid taking host->host_lock.
*/
static int
qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
@ -1304,6 +1318,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
if (IS_QLAFX00(ha))
return QLA_SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@ -1858,6 +1875,7 @@ static struct isp_operations qla2100_isp_ops = {
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla2300_isp_ops = {
@ -1895,6 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla24xx_isp_ops = {
@ -1932,6 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla25xx_isp_ops = {
@ -1969,6 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla81xx_isp_ops = {
@ -2006,6 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla82xx_isp_ops = {
@ -2043,6 +2065,7 @@ static struct isp_operations qla82xx_isp_ops = {
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla83xx_isp_ops = {
@ -2080,6 +2103,45 @@ static struct isp_operations qla83xx_isp_ops = {
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qlafx00_isp_ops = {
.pci_config = qlafx00_pci_config,
.reset_chip = qlafx00_soft_reset,
.chip_diag = qlafx00_chip_diag,
.config_rings = qlafx00_config_rings,
.reset_adapter = qlafx00_soft_reset,
.nvram_config = NULL,
.update_fw_options = NULL,
.load_risc = NULL,
.pci_info_str = qlafx00_pci_info_str,
.fw_version_str = qlafx00_fw_version_str,
.intr_handler = qlafx00_intr_handler,
.enable_intrs = qlafx00_enable_intrs,
.disable_intrs = qlafx00_disable_intrs,
.abort_command = qlafx00_abort_command,
.target_reset = qlafx00_abort_target,
.lun_reset = qlafx00_lun_reset,
.fabric_login = NULL,
.fabric_logout = NULL,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
.fw_dump = NULL,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = NULL,
.read_optrom = qla24xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qlafx00_start_scsi,
.abort_isp = qlafx00_abort_isp,
.iospace_config = qlafx00_iospace_config,
.initialize_adapter = qlafx00_initialize_adapter,
};
static inline void
@ -2192,6 +2254,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISPF001:
ha->device_type |= DT_ISPFX00;
break;
}
if (IS_QLA82XX(ha))
@ -2265,7 +2330,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@ -2436,6 +2502,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
} else if (IS_QLAFX00(ha)) {
ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
req_length = REQUEST_ENTRY_CNT_FX00;
rsp_length = RESPONSE_ENTRY_CNT_FX00;
ha->init_cb_size = sizeof(struct init_cb_fx);
ha->isp_ops = &qlafx00_isp_ops;
ha->port_down_retry_count = 30; /* default value */
ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
ha->mr.fw_hbt_en = 1;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@ -2500,13 +2578,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
host->can_queue = req->length + 128;
if (IS_QLAFX00(ha))
host->can_queue = 1024;
else
host->can_queue = req->length + 128;
if (IS_QLA2XXX_MIDTYPE(ha))
base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
/* Setup fcport template structure. */
ha->mr.fcport.vha = base_vha;
ha->mr.fcport.port_type = FCT_UNKNOWN;
ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
ha->mr.fcport.scan_state = 1;
/* Set the SG table size based on ISP type */
if (!IS_FWI2_CAPABLE(ha)) {
if (IS_QLA2100(ha))
@ -2562,6 +2651,13 @@ que_init:
rsp->req = req;
req->rsp = rsp;
if (IS_QLAFX00(ha)) {
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
}
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@ -2574,6 +2670,13 @@ que_init:
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
}
if (IS_QLAFX00(ha)) {
req->req_q_in = &ha->iobase->ispfx00.req_q_in;
req->req_q_out = &ha->iobase->ispfx00.req_q_out;
rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
}
if (IS_QLA82XX(ha)) {
req->req_q_out = &ha->iobase->isp82.req_q_out[0];
rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
@ -2595,7 +2698,7 @@ que_init:
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
if (qla2x00_initialize_adapter(base_vha)) {
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
"Failed to initialize adapter - Adapter flags %x.\n",
base_vha->device_flags);
@ -2720,6 +2823,18 @@ skip_dpc:
qla2x00_alloc_sysfs_attr(base_vha);
if (IS_QLAFX00(ha)) {
ret = qlafx00_fx_disc(base_vha,
&base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
ret = qlafx00_fx_disc(base_vha,
&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
/* Register system information */
ret = qlafx00_fx_disc(base_vha,
&base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
}
qla2x00_init_host_attr(base_vha);
qla2x00_dfs_setup(base_vha);
@ -2777,6 +2892,8 @@ iospace_config_failed:
} else {
if (ha->iobase)
iounmap(ha->iobase);
if (ha->cregbase)
iounmap(ha->cregbase);
}
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@ -2960,6 +3077,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (ha->iobase)
iounmap(ha->iobase);
if (ha->cregbase)
iounmap(ha->cregbase);
if (ha->mqiobase)
iounmap(ha->mqiobase);
@ -3068,6 +3188,12 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport, defer);
return;
}
if (atomic_read(&fcport->state) == FCS_ONLINE &&
vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
@ -3710,6 +3836,22 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
}
int
qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
uint32_t *data, int cnt)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
if (!e)
return QLA_FUNCTION_FAILED;
e->u.aenfx.evtcode = evtcode;
e->u.aenfx.count = cnt;
memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
return qla2x00_post_work(vha, e);
}
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@ -3758,6 +3900,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@ -4592,6 +4737,38 @@ qla2x00_do_dpc(void *data)
ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
"FCoE context reset end.\n");
}
} else if (IS_QLAFX00(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
&base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
"Firmware Reset Recovery\n");
if (qlafx00_reset_initialize(base_vha)) {
/* Failed. Abort isp later. */
if (!test_bit(UNLOADING,
&base_vha->dpc_flags))
set_bit(ISP_UNRECOVERABLE,
&base_vha->dpc_flags);
ql_dbg(ql_dbg_dpc, base_vha,
0x4021,
"Reset Recovery Failed\n");
}
}
if (test_and_clear_bit(FX00_TARGET_SCAN,
&base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
"ISPFx00 Target Scan scheduled\n");
if (qlafx00_rescan_isp(base_vha)) {
if (!test_bit(UNLOADING,
&base_vha->dpc_flags))
set_bit(ISP_UNRECOVERABLE,
&base_vha->dpc_flags);
ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
"ISPFx00 Target Scan Failed\n");
}
ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
"ISPFx00 Target Scan End\n");
}
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
@ -4630,6 +4807,9 @@ qla2x00_do_dpc(void *data)
clear_bit(SCR_PENDING, &base_vha->dpc_flags);
}
if (IS_QLAFX00(ha))
goto loop_resync_check;
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
@ -4654,7 +4834,7 @@ qla2x00_do_dpc(void *data)
}
if (test_and_clear_bit(RESET_MARKER_NEEDED,
&base_vha->dpc_flags) &&
&base_vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
@ -4677,9 +4857,9 @@ qla2x00_do_dpc(void *data)
ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
"Relogin end.\n");
}
loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
&base_vha->dpc_flags)) {
&base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
"Loop resync scheduled.\n");
@ -4697,6 +4877,9 @@ qla2x00_do_dpc(void *data)
"Loop resync end.\n");
}
if (IS_QLAFX00(ha))
goto intr_on_check;
if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) == LOOP_READY) {
clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
@ -4714,7 +4897,7 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
&base_vha->dpc_flags))
qla2x00_host_ramp_up_queuedepth(base_vha);
intr_on_check:
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@ -4722,7 +4905,8 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags))
ha->isp_ops->beacon_blink(base_vha);
qla2x00_do_dpc_all_vps(base_vha);
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
end_loop:
@ -4818,6 +5002,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
qla82xx_watchdog(vha);
}
if (!vha->vp_idx && IS_QLAFX00(ha))
qlafx00_timer_routine(vha);
/* Loop down handler. */
if (atomic_read(&vha->loop_down_timer) > 0 &&
!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
@ -5335,6 +5522,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);

Просмотреть файл

@ -7,9 +7,9 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.04.00.13-k"
#define QLA2XXX_VERSION "8.05.00.03-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
#define QLA_DRIVER_MINOR_VER 5
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0

Просмотреть файл

@ -1629,9 +1629,37 @@ static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
}
/**
* qla4_83xx_eport_init - Initialize EPort.
* @ha: Pointer to host adapter structure.
*
* If EPort hardware is in reset state before disabling pause, there would be
* serious hardware wedging issues. To prevent this perform eport init everytime
* before disabling pause frames.
**/
static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
{
/* Clear the 8 registers */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
/* Write any value to Reset Control register */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
}
void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
{
ha->isp_ops->idc_lock(ha);
/* Before disabling pause frames, ensure that eport is not in reset */
qla4_83xx_eport_init(ha);
qla4_83xx_dump_pause_control_regs(ha);
__qla4_83xx_disable_pause(ha);
ha->isp_ops->idc_unlock(ha);

Просмотреть файл

@ -55,6 +55,16 @@
#define QLA83XX_SET_PAUSE_VAL 0x0
#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
#define QLA83XX_RESET_CONTROL 0x28084E50
#define QLA83XX_RESET_REG 0x28084E60
#define QLA83XX_RESET_PORT0 0x28084E70
#define QLA83XX_RESET_PORT1 0x28084E80
#define QLA83XX_RESET_PORT2 0x28084E90
#define QLA83XX_RESET_PORT3 0x28084EA0
#define QLA83XX_RESET_SRE_SHIM 0x28084EB0
#define QLA83XX_RESET_EPG_SHIM 0x28084EC0
#define QLA83XX_RESET_ETHER_PCS 0x28084ED0
/* qla_83xx_reg_tbl registers */
#define QLA83XX_PEG_HALT_STATUS1 0x34A8
#define QLA83XX_PEG_HALT_STATUS2 0x34AC

Просмотреть файл

@ -12,6 +12,7 @@
/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
/* #define QL_DEBUG_LEVEL_4 */
/* #define QL_DEBUG_LEVEL_5 */
/* #define QL_DEBUG_LEVEL_7 */
/* #define QL_DEBUG_LEVEL_9 */
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
@ -48,6 +49,12 @@
#define DEBUG5(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_7)
#define DEBUG7(x) do {x; } while (0)
#else /* */
#define DEBUG7(x) do {} while (0)
#endif /* */
#if defined(QL_DEBUG_LEVEL_9)
#define DEBUG9(x) do {x;} while (0);
#else /* */

Просмотреть файл

@ -159,6 +159,22 @@
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
#define DEV_DB_NON_PERSISTENT 0
#define DEV_DB_PERSISTENT 1
#define COPY_ISID(dst_isid, src_isid) { \
int i, j; \
for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
dst_isid[i++] = src_isid[j--]; \
}
#define SET_BITVAL(o, n, v) { \
if (o) \
n |= v; \
else \
n &= ~v; \
}
/*
* Retry & Timeout Values
*/
@ -363,6 +379,8 @@ struct ql82xx_hw_data {
uint32_t flt_iscsi_param;
uint32_t flt_region_chap;
uint32_t flt_chap_size;
uint32_t flt_region_ddb;
uint32_t flt_ddb_size;
};
struct qla4_8xxx_legacy_intr_set {
@ -501,6 +519,7 @@ struct scsi_qla_host {
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
#define AF_ST_DISCOVERY_IN_PROGRESS 4 /* 0x00000010 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */

Просмотреть файл

@ -288,6 +288,8 @@ union external_hw_config_reg {
#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
#define FA_FLASH_ISCSI_CHAP 0x540000
#define FA_FLASH_CHAP_SIZE 0xC0000
#define FA_FLASH_ISCSI_DDB 0x420000
#define FA_FLASH_DDB_SIZE 0x080000
/* Flash Description Table */
struct qla_fdt_layout {
@ -348,6 +350,7 @@ struct qla_flt_header {
#define FLT_REG_BOOT_CODE_82 0x78
#define FLT_REG_ISCSI_PARAM 0x65
#define FLT_REG_ISCSI_CHAP 0x63
#define FLT_REG_ISCSI_DDB 0x6A
struct qla_flt_region {
uint32_t code;
@ -490,12 +493,16 @@ struct qla_flt_region {
#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
#define MBOX_ASTS_IDC_COMPLETE 0x8100
#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@ -779,12 +786,41 @@ struct dev_db_entry {
#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
#define OPT_IS_FW_ASSIGNED_IPV6 11
#define OPT_IPV6_DEVICE 8
#define OPT_AUTO_SENDTGTS_DISABLE 6
#define OPT_DISC_SESSION 4
#define OPT_ENTRY_STATE 3
uint16_t exec_throttle; /* 02-03 */
uint16_t exec_count; /* 04-05 */
uint16_t res0; /* 06-07 */
uint16_t iscsi_options; /* 08-09 */
#define ISCSIOPT_HEADER_DIGEST_EN 13
#define ISCSIOPT_DATA_DIGEST_EN 12
#define ISCSIOPT_IMMEDIATE_DATA_EN 11
#define ISCSIOPT_INITIAL_R2T_EN 10
#define ISCSIOPT_DATA_SEQ_IN_ORDER 9
#define ISCSIOPT_DATA_PDU_IN_ORDER 8
#define ISCSIOPT_CHAP_AUTH_EN 7
#define ISCSIOPT_SNACK_REQ_EN 6
#define ISCSIOPT_DISCOVERY_LOGOUT_EN 5
#define ISCSIOPT_BIDI_CHAP_EN 4
#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL 3
#define ISCSIOPT_ERL1 1
#define ISCSIOPT_ERL0 0
uint16_t tcp_options; /* 0A-0B */
#define TCPOPT_TIMESTAMP_STAT 6
#define TCPOPT_NAGLE_DISABLE 5
#define TCPOPT_WSF_DISABLE 4
#define TCPOPT_TIMER_SCALE3 3
#define TCPOPT_TIMER_SCALE2 2
#define TCPOPT_TIMER_SCALE1 1
#define TCPOPT_TIMESTAMP_EN 0
uint16_t ip_options; /* 0C-0D */
#define IPOPT_FRAGMENT_DISABLE 4
uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
#define BYTE_UNITS 512
uint32_t res1; /* 10-13 */
@ -816,6 +852,8 @@ struct dev_db_entry {
* much RAM */
uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
#define DDB_NO_LINK 0xFFFF
#define DDB_ISNS 0xFFFD
uint16_t ddb_link; /* 1C0-1C1 */
uint16_t chap_tbl_idx; /* 1C2-1C3 */
uint16_t tgt_portal_grp; /* 1C4-1C5 */

Просмотреть файл

@ -191,6 +191,9 @@ int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
uint32_t status, uint32_t pid,
uint32_t data_size, uint8_t *data);
int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
@ -224,8 +227,6 @@ void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
@ -261,6 +262,10 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr);
int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
char *password, uint16_t chap_index);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;

Просмотреть файл

@ -396,7 +396,6 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
task_data = task->dd_data;
memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
ha->req_q_count += task_data->iocb_req_cnt;
ha->iocb_cnt -= task_data->iocb_req_cnt;
queue_work(ha->task_wq, &task_data->task_work);
}
@ -416,7 +415,6 @@ static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
return mrb;
/* update counters */
ha->req_q_count += mrb->iocb_cnt;
ha->iocb_cnt -= mrb->iocb_cnt;
return mrb;
@ -877,6 +875,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
break;
case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
ha->host_no, mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3], mbox_sts[4],
mbox_sts[5]));
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
ha->host_no, mbox_sts[0]));
break;
case MBOX_ASTS_INITIALIZATION_FAILED:
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
ha->host_no, mbox_sts[0],
mbox_sts[3]));
break;
case MBOX_ASTS_SYSTEM_WARNING_EVENT:
DEBUG2(ql4_printk(KERN_WARNING, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
ha->host_no, mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3], mbox_sts[4],
mbox_sts[5]));
break;
case MBOX_ASTS_DCBX_CONF_CHANGE:
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
ha->host_no, mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3], mbox_sts[4],
mbox_sts[5]));
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
ha->host_no, mbox_sts[0]));
break;
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
@ -1099,8 +1134,8 @@ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s legacy Int not triggered\n", __func__));
DEBUG7(ql4_printk(KERN_INFO, ha,
"%s legacy Int not triggered\n", __func__));
return IRQ_NONE;
}
@ -1158,7 +1193,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
DEBUG2(ql4_printk(KERN_ERR, ha,
DEBUG7(ql4_printk(KERN_ERR, ha,
"%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
__func__));
return IRQ_NONE;
@ -1166,7 +1201,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
DEBUG2(ql4_printk(KERN_ERR, ha,
DEBUG7(ql4_printk(KERN_ERR, ha,
"%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
__func__, (leg_int_ptr & PF_BITS_MASK),
ha->pf_bit));

Просмотреть файл

@ -1129,6 +1129,7 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
uint32_t scsi_lun[2];
int status = QLA_SUCCESS;
DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
@ -1140,10 +1141,16 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
mbox_cmd[0] = MBOX_CMD_LUN_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[2] = lun << 8;
/* FW expects LUN bytes 0-3 in Incoming Mailbox 2
* (LUN byte 0 is LSByte, byte 3 is MSByte) */
mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
/* FW expects LUN bytes 4-7 in Incoming Mailbox 3
* (LUN byte 4 is LSByte, byte 7 is MSByte) */
mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
@ -1281,8 +1288,8 @@ exit_about_fw:
return status;
}
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr)
int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@ -1410,6 +1417,55 @@ exit_bootdb_failed:
return status;
}
int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
{
uint32_t dev_db_start_offset;
uint32_t dev_db_end_offset;
int status = QLA_ERROR;
memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
if (is_qla40XX(ha)) {
dev_db_start_offset = FLASH_OFFSET_DB_INFO;
dev_db_end_offset = FLASH_OFFSET_DB_END;
} else {
dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
(ha->hw.flt_region_ddb << 2);
/* flt_ddb_size is DDB table size for both ports
* so divide it by 2 to calculate the offset for second port
*/
if (ha->port_num == 1)
dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
dev_db_end_offset = dev_db_start_offset +
(ha->hw.flt_ddb_size / 2);
}
dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
if (dev_db_start_offset > dev_db_end_offset) {
DEBUG2(ql4_printk(KERN_ERR, ha,
"%s:Invalid DDB index %d", __func__,
ddb_index));
goto exit_fdb_failed;
}
if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
ha->host_no, __func__);
goto exit_fdb_failed;
}
if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
status = QLA_SUCCESS;
exit_fdb_failed:
return status;
}
int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
uint16_t idx)
{
@ -1503,6 +1559,62 @@ exit_set_chap:
return ret;
}
int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
char *password, uint16_t chap_index)
{
int rval = QLA_ERROR;
struct ql4_chap_table *chap_table = NULL;
int max_chap_entries;
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
rval = QLA_ERROR;
goto exit_uni_chap;
}
if (!username || !password) {
ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
rval = QLA_ERROR;
goto exit_uni_chap;
}
if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
if (chap_index > max_chap_entries) {
ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
rval = QLA_ERROR;
goto exit_uni_chap;
}
mutex_lock(&ha->chap_sem);
chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
}
if (!(chap_table->flags & BIT_6)) {
ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
rval = QLA_ERROR;
goto exit_unlock_uni_chap;
}
strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
rval = QLA_SUCCESS;
exit_unlock_uni_chap:
mutex_unlock(&ha->chap_sem);
exit_uni_chap:
return rval;
}
/**
* qla4xxx_get_chap_index - Get chap index given username and secret
* @ha: pointer to adapter structure
@ -1524,7 +1636,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
int max_chap_entries = 0;
struct ql4_chap_table *chap_table;
if (is_qla8022(ha))
if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else

Просмотреть файл

@ -3154,6 +3154,10 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
hw->flt_region_chap = start;
hw->flt_chap_size = le32_to_cpu(region->size);
break;
case FLT_REG_ISCSI_DDB:
hw->flt_region_ddb = start;
hw->flt_ddb_size = le32_to_cpu(region->size);
break;
}
}
goto done;
@ -3166,14 +3170,19 @@ no_flash_data:
hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
hw->flt_region_chap = FA_FLASH_ISCSI_CHAP;
hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2;
hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2;
hw->flt_ddb_size = FA_FLASH_DDB_SIZE;
done:
DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x "
"boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt,
hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload,
hw->flt_region_fw));
DEBUG2(ql4_printk(KERN_INFO, ha,
"FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n",
loc, hw->flt_region_flt, hw->flt_region_fdt,
hw->flt_region_boot, hw->flt_region_bootload,
hw->flt_region_fw, hw->flt_region_chap,
hw->flt_chap_size, hw->flt_region_ddb,
hw->flt_ddb_size));
}
static void

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -63,6 +63,12 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_PING = UEVENT_BASE + 22,
ISCSI_UEVENT_GET_CHAP = UEVENT_BASE + 23,
ISCSI_UEVENT_DELETE_CHAP = UEVENT_BASE + 24,
ISCSI_UEVENT_SET_FLASHNODE_PARAMS = UEVENT_BASE + 25,
ISCSI_UEVENT_NEW_FLASHNODE = UEVENT_BASE + 26,
ISCSI_UEVENT_DEL_FLASHNODE = UEVENT_BASE + 27,
ISCSI_UEVENT_LOGIN_FLASHNODE = UEVENT_BASE + 28,
ISCSI_UEVENT_LOGOUT_FLASHNODE = UEVENT_BASE + 29,
ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30,
/* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@ -210,6 +216,31 @@ struct iscsi_uevent {
uint32_t host_no;
uint16_t chap_tbl_idx;
} delete_chap;
struct msg_set_flashnode_param {
uint32_t host_no;
uint32_t flashnode_idx;
uint32_t count;
} set_flashnode;
struct msg_new_flashnode {
uint32_t host_no;
uint32_t len;
} new_flashnode;
struct msg_del_flashnode {
uint32_t host_no;
uint32_t flashnode_idx;
} del_flashnode;
struct msg_login_flashnode {
uint32_t host_no;
uint32_t flashnode_idx;
} login_flashnode;
struct msg_logout_flashnode {
uint32_t host_no;
uint32_t flashnode_idx;
} logout_flashnode;
struct msg_logout_flashnode_sid {
uint32_t host_no;
uint32_t sid;
} logout_flashnode_sid;
} u;
union {
/* messages k -> u */
@ -267,6 +298,9 @@ struct iscsi_uevent {
with each ping request */
uint32_t data_size;
} ping_comp;
struct msg_new_flashnode_ret {
uint32_t flashnode_idx;
} new_flashnode_ret;
} r;
} __attribute__ ((aligned (sizeof(uint64_t))));
@ -274,6 +308,7 @@ enum iscsi_param_type {
ISCSI_PARAM, /* iscsi_param (session, conn, target, LU) */
ISCSI_HOST_PARAM, /* iscsi_host_param */
ISCSI_NET_PARAM, /* iscsi_net_param */
ISCSI_FLASHNODE_PARAM, /* iscsi_flashnode_param */
};
struct iscsi_iface_param_info {
@ -469,6 +504,88 @@ enum iscsi_host_param {
ISCSI_HOST_PARAM_MAX,
};
/* portal type */
#define PORTAL_TYPE_IPV4 "ipv4"
#define PORTAL_TYPE_IPV6 "ipv6"
/* iSCSI Flash Target params */
enum iscsi_flashnode_param {
ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6,
ISCSI_FLASHNODE_PORTAL_TYPE,
ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE,
ISCSI_FLASHNODE_DISCOVERY_SESS,
ISCSI_FLASHNODE_ENTRY_EN,
ISCSI_FLASHNODE_HDR_DGST_EN,
ISCSI_FLASHNODE_DATA_DGST_EN,
ISCSI_FLASHNODE_IMM_DATA_EN,
ISCSI_FLASHNODE_INITIAL_R2T_EN,
ISCSI_FLASHNODE_DATASEQ_INORDER,
ISCSI_FLASHNODE_PDU_INORDER,
ISCSI_FLASHNODE_CHAP_AUTH_EN,
ISCSI_FLASHNODE_SNACK_REQ_EN,
ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN,
ISCSI_FLASHNODE_BIDI_CHAP_EN,
/* make authentication for discovery sessions optional */
ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL,
ISCSI_FLASHNODE_ERL,
ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT,
ISCSI_FLASHNODE_TCP_NAGLE_DISABLE,
ISCSI_FLASHNODE_TCP_WSF_DISABLE,
ISCSI_FLASHNODE_TCP_TIMER_SCALE,
ISCSI_FLASHNODE_TCP_TIMESTAMP_EN,
ISCSI_FLASHNODE_IP_FRAG_DISABLE,
ISCSI_FLASHNODE_MAX_RECV_DLENGTH,
ISCSI_FLASHNODE_MAX_XMIT_DLENGTH,
ISCSI_FLASHNODE_FIRST_BURST,
ISCSI_FLASHNODE_DEF_TIME2WAIT,
ISCSI_FLASHNODE_DEF_TIME2RETAIN,
ISCSI_FLASHNODE_MAX_R2T,
ISCSI_FLASHNODE_KEEPALIVE_TMO,
ISCSI_FLASHNODE_ISID,
ISCSI_FLASHNODE_TSID,
ISCSI_FLASHNODE_PORT,
ISCSI_FLASHNODE_MAX_BURST,
ISCSI_FLASHNODE_DEF_TASKMGMT_TMO,
ISCSI_FLASHNODE_IPADDR,
ISCSI_FLASHNODE_ALIAS,
ISCSI_FLASHNODE_REDIRECT_IPADDR,
ISCSI_FLASHNODE_MAX_SEGMENT_SIZE,
ISCSI_FLASHNODE_LOCAL_PORT,
ISCSI_FLASHNODE_IPV4_TOS,
ISCSI_FLASHNODE_IPV6_TC,
ISCSI_FLASHNODE_IPV6_FLOW_LABEL,
ISCSI_FLASHNODE_NAME,
ISCSI_FLASHNODE_TPGT,
ISCSI_FLASHNODE_LINK_LOCAL_IPV6,
ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX,
ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE,
ISCSI_FLASHNODE_TCP_XMIT_WSF,
ISCSI_FLASHNODE_TCP_RECV_WSF,
ISCSI_FLASHNODE_CHAP_IN_IDX,
ISCSI_FLASHNODE_CHAP_OUT_IDX,
ISCSI_FLASHNODE_USERNAME,
ISCSI_FLASHNODE_USERNAME_IN,
ISCSI_FLASHNODE_PASSWORD,
ISCSI_FLASHNODE_PASSWORD_IN,
ISCSI_FLASHNODE_STATSN,
ISCSI_FLASHNODE_EXP_STATSN,
ISCSI_FLASHNODE_IS_BOOT_TGT,
ISCSI_FLASHNODE_MAX,
};
struct iscsi_flashnode_param_info {
uint32_t len; /* Actual length of the param */
uint16_t param; /* iscsi param value */
uint8_t value[0]; /* length sized value follows */
} __packed;
enum iscsi_discovery_parent_type {
ISCSI_DISC_PARENT_UNKNOWN = 0x1,
ISCSI_DISC_PARENT_SENDTGT = 0x2,
ISCSI_DISC_PARENT_ISNS = 0x3,
};
/* iSCSI port Speed */
enum iscsi_port_speed {
ISCSI_PORT_SPEED_UNKNOWN = 0x1,

Просмотреть файл

@ -427,6 +427,7 @@ extern void iscsi_complete_scsi_task(struct iscsi_task *task,
*/
extern void iscsi_pool_free(struct iscsi_pool *);
extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
extern int iscsi_switch_str_param(char **, char *);
/*
* inline functions to deal with padding.

Просмотреть файл

@ -39,6 +39,8 @@ struct iscsi_task;
struct sockaddr;
struct iscsi_iface;
struct bsg_job;
struct iscsi_bus_flash_session;
struct iscsi_bus_flash_conn;
/**
* struct iscsi_transport - iSCSI Transport template
@ -150,6 +152,19 @@ struct iscsi_transport {
int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf);
int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
int param, char *buf);
int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_bus_flash_conn *fnode_conn,
void *data, int len);
int (*new_flashnode) (struct Scsi_Host *shost, const char *buf,
int len);
int (*del_flashnode) (struct iscsi_bus_flash_session *fnode_sess);
int (*login_flashnode) (struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_bus_flash_conn *fnode_conn);
int (*logout_flashnode) (struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_bus_flash_conn *fnode_conn);
int (*logout_flashnode_sid) (struct iscsi_cls_session *cls_sess);
};
/*
@ -286,6 +301,112 @@ struct iscsi_iface {
#define iscsi_iface_to_shost(_iface) \
dev_to_shost(_iface->dev.parent)
struct iscsi_bus_flash_conn {
struct list_head conn_list; /* item in connlist */
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
struct device dev; /* sysfs transport/container device */
/* iscsi connection parameters */
uint32_t exp_statsn;
uint32_t statsn;
unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
unsigned max_xmit_dlength; /* target_max_recv_dsl */
unsigned max_segment_size;
unsigned tcp_xmit_wsf;
unsigned tcp_recv_wsf;
int hdrdgst_en;
int datadgst_en;
int port;
char *ipaddress;
char *link_local_ipv6_addr;
char *redirect_ipaddr;
uint16_t keepalive_timeout;
uint16_t local_port;
uint8_t snack_req_en;
/* tcp timestamp negotiation status */
uint8_t tcp_timestamp_stat;
uint8_t tcp_nagle_disable;
/* tcp window scale factor */
uint8_t tcp_wsf_disable;
uint8_t tcp_timer_scale;
uint8_t tcp_timestamp_en;
uint8_t ipv4_tos;
uint8_t ipv6_traffic_class;
uint8_t ipv6_flow_label;
uint8_t fragment_disable;
/* Link local IPv6 address is assigned by firmware or driver */
uint8_t is_fw_assigned_ipv6;
};
#define iscsi_dev_to_flash_conn(_dev) \
container_of(_dev, struct iscsi_bus_flash_conn, dev)
#define iscsi_flash_conn_to_flash_session(_conn) \
iscsi_dev_to_flash_session(_conn->dev.parent)
#define ISID_SIZE 6
struct iscsi_bus_flash_session {
struct list_head sess_list; /* item in session_list */
struct iscsi_transport *transport;
unsigned int target_id;
int flash_state; /* persistent or non-persistent */
void *dd_data; /* LLD private data */
struct device dev; /* sysfs transport/container device */
/* iscsi session parameters */
unsigned first_burst;
unsigned max_burst;
unsigned short max_r2t;
int default_taskmgmt_timeout;
int initial_r2t_en;
int imm_data_en;
int time2wait;
int time2retain;
int pdu_inorder_en;
int dataseq_inorder_en;
int erl;
int tpgt;
char *username;
char *username_in;
char *password;
char *password_in;
char *targetname;
char *targetalias;
char *portal_type;
uint16_t tsid;
uint16_t chap_in_idx;
uint16_t chap_out_idx;
/* index of iSCSI discovery session if the entry is
* discovered by iSCSI discovery session
*/
uint16_t discovery_parent_idx;
/* indicates if discovery was done through iSNS discovery service
* or through sendTarget */
uint16_t discovery_parent_type;
/* Firmware auto sendtarget discovery disable */
uint8_t auto_snd_tgt_disable;
uint8_t discovery_sess;
/* indicates if this flashnode entry is enabled or disabled */
uint8_t entry_state;
uint8_t chap_auth_en;
/* enables firmware to auto logout the discovery session on discovery
* completion
*/
uint8_t discovery_logout_en;
uint8_t bidi_chap_en;
/* makes authentication for discovery session optional */
uint8_t discovery_auth_optional;
uint8_t isid[ISID_SIZE];
uint8_t is_boot_target;
};
#define iscsi_dev_to_flash_session(_dev) \
container_of(_dev, struct iscsi_bus_flash_session, dev)
#define iscsi_flash_session_to_shost(_session) \
dev_to_shost(_session->dev.parent)
/*
* session and connection functions that can be used by HW iSCSI LLDs
*/
@ -330,4 +451,34 @@ extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
extern int iscsi_is_session_dev(const struct device *dev);
extern char *iscsi_get_discovery_parent_name(int parent_type);
extern struct device *
iscsi_find_flashnode(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data));
extern struct iscsi_bus_flash_session *
iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
struct iscsi_transport *transport, int dd_size);
extern struct iscsi_bus_flash_conn *
iscsi_create_flashnode_conn(struct Scsi_Host *shost,
struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_transport *transport, int dd_size);
extern void
iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
extern int iscsi_flashnode_bus_match(struct device *dev,
struct device_driver *drv);
extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
extern struct device *
iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
void *data,
int (*fn)(struct device *dev, void *data));
#endif