Merge SCSI misc branch into isci-for-3.6 tag

This commit is contained in:
James Bottomley 2012-10-02 08:55:12 +01:00
Родитель 1c4cf1d584 0644f5393e
Коммит fe709ed827
176 изменённых файлов: 13507 добавлений и 4475 удалений

Просмотреть файл

@ -1350,6 +1350,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
* nohrst, nosrst, norst: suppress hard, soft
and both resets.
* rstonce: only attempt one reset during
hot-unplug link recovery
* dump_id: dump IDENTIFY data.
If there are multiple matching configurations changing

Просмотреть файл

@ -1,3 +1,13 @@
Release Date : Tue. Jun 17, 2012 17:00:00 PST 2012 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford/Kashyap Desai
Current Version : 00.00.06.18-rc1
Old Version : 00.00.06.15-rc1
1. Fix Copyright dates.
2. Add throttlequeuedepth module parameter.
3. Add resetwaittime module parameter.
4. Move poll_aen_lock initializer.
-------------------------------------------------------------------------------
Release Date : Mon. Mar 19, 2012 17:00:00 PST 2012 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford

Просмотреть файл

@ -1,4 +1,4 @@
Copyright (c) 2003-2011 QLogic Corporation
Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 3.x.

Просмотреть файл

@ -1,4 +1,4 @@
Copyright (c) 2003-2011 QLogic Corporation
Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux iSCSI Driver
This program includes a device driver for Linux 3.x.

Просмотреть файл

@ -112,10 +112,8 @@ attempted).
MINOR NUMBERS
The tape driver currently supports 128 drives by default. This number
can be increased by editing st.h and recompiling the driver if
necessary. The upper limit is 2^17 drives if 4 modes for each drive
are used.
The tape driver currently supports up to 2^17 drives if 4 modes for
each drive are used.
The minor numbers consist of the following bit fields:

Просмотреть файл

@ -1645,7 +1645,6 @@ F: drivers/bcma/
F: include/linux/bcma/
BROCADE BFA FC SCSI DRIVER
M: Jing Huang <huangj@brocade.com>
M: Krishna C Gudipati <kgudipat@brocade.com>
L: linux-scsi@vger.kernel.org
S: Supported
@ -3431,6 +3430,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
IBM Power Virtual SCSI/FC Device Drivers
M: Robert Jennings <rcj@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/
X: drivers/scsi/ibmvscsi/ibmvstgt.c
IBM ServeRAID RAID DRIVER
P: Jack Hammer
M: Dave Jeffery <ipslinux@adaptec.com>

Просмотреть файл

@ -5253,16 +5253,20 @@ bool ata_link_offline(struct ata_link *link)
#ifdef CONFIG_PM
static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
int wait)
int *async)
{
struct ata_link *link;
unsigned long flags;
int rc;
int rc = 0;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
if (async) {
*async = -EAGAIN;
return 0;
}
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
@ -5271,10 +5275,10 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_lock_irqsave(ap->lock, flags);
ap->pm_mesg = mesg;
if (wait) {
rc = 0;
if (async)
ap->pm_result = async;
else
ap->pm_result = &rc;
}
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@ -5287,7 +5291,7 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
/* wait and check result */
if (wait) {
if (!async) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
@ -5295,9 +5299,8 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
return rc;
}
static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
{
struct ata_port *ap = to_ata_port(dev);
unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
@ -5312,10 +5315,17 @@ static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
if (mesg.event == PM_EVENT_SUSPEND)
ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
return rc;
}
static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
{
struct ata_port *ap = to_ata_port(dev);
return __ata_port_suspend_common(ap, mesg, NULL);
}
static int ata_port_suspend(struct device *dev)
{
if (pm_runtime_suspended(dev))
@ -5340,16 +5350,22 @@ static int ata_port_poweroff(struct device *dev)
return ata_port_suspend_common(dev, PMSG_HIBERNATE);
}
static int ata_port_resume_common(struct device *dev)
static int __ata_port_resume_common(struct ata_port *ap, int *async)
{
struct ata_port *ap = to_ata_port(dev);
int rc;
rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
return rc;
}
static int ata_port_resume_common(struct device *dev)
{
struct ata_port *ap = to_ata_port(dev);
return __ata_port_resume_common(ap, NULL);
}
static int ata_port_resume(struct device *dev)
{
int rc;
@ -5382,6 +5398,24 @@ static const struct dev_pm_ops ata_port_pm_ops = {
.runtime_idle = ata_port_runtime_idle,
};
/* sas ports don't participate in pm runtime management of ata_ports,
* and need to resume ata devices at the domain level, not the per-port
* level. sas suspend/resume is async to allow parallel port recovery
* since sas has multiple ata_port instances per Scsi_Host.
*/
int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
{
return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
}
EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
int ata_sas_port_async_resume(struct ata_port *ap, int *async)
{
return __ata_port_resume_common(ap, async);
}
EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
/**
* ata_host_suspend - suspend host
* @host: host to suspend
@ -5927,24 +5961,18 @@ int ata_host_start(struct ata_host *host)
}
/**
* ata_sas_host_init - Initialize a host struct
* ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
* @host: host to initialize
* @dev: device host is attached to
* @flags: host flags
* @ops: port_ops
*
* LOCKING:
* PCI/etc. bus probe sem.
*
*/
/* KILLME - the only user left is ipr */
void ata_host_init(struct ata_host *host, struct device *dev,
unsigned long flags, struct ata_port_operations *ops)
struct ata_port_operations *ops)
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->dev = dev;
host->flags = flags;
host->ops = ops;
}
@ -6388,6 +6416,7 @@ static int __init ata_parse_force_one(char **cur,
{ "nohrst", .lflags = ATA_LFLAG_NO_HRST },
{ "nosrst", .lflags = ATA_LFLAG_NO_SRST },
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;

Просмотреть файл

@ -2623,6 +2623,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
*/
while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
max_tries++;
if (link->flags & ATA_LFLAG_RST_ONCE)
max_tries = 1;
if (link->flags & ATA_LFLAG_NO_HRST)
hardreset = NULL;
if (link->flags & ATA_LFLAG_NO_SRST)

Просмотреть файл

@ -1666,7 +1666,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
"MEM failed\n", ioc->name);
return r;
goto out_pci_disable_device;
}
if (sizeof(dma_addr_t) > 4) {
@ -1690,8 +1690,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
pci_release_selected_regions(pdev, ioc->bars);
return r;
goto out_pci_release_region;
}
} else {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
@ -1704,8 +1703,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
pci_release_selected_regions(pdev, ioc->bars);
return r;
goto out_pci_release_region;
}
}
@ -1735,8 +1733,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
" memory!\n", ioc->name);
pci_release_selected_regions(pdev, ioc->bars);
return -EINVAL;
r = -EINVAL;
goto out_pci_release_region;
}
ioc->memmap = mem;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
@ -1750,6 +1748,12 @@ mpt_mapresources(MPT_ADAPTER *ioc)
ioc->pio_chip = (SYSIF_REGS __iomem *)port;
return 0;
out_pci_release_region:
pci_release_selected_regions(pdev, ioc->bars);
out_pci_disable_device:
pci_disable_device(pdev);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

Просмотреть файл

@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
atomic_set(&port->units, 0);
INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);

Просмотреть файл

@ -39,19 +39,25 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
}
static int zfcp_ccw_activate(struct ccw_device *cdev)
/**
* zfcp_ccw_activate - activate adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
* @clear: Status flags to clear.
* @tag: s390dbf trace record tag
*/
static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_clear_adapter_status(adapter, clear);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccresu2");
tag);
zfcp_erp_wait(adapter);
flush_work(&adapter->scan_work);
flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
zfcp_ccw_adapter_put(adapter);
@ -164,7 +170,36 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_ccw_activate(cdev);
zfcp_ccw_activate(cdev, 0, "ccsonl1");
/* scan for remote ports
either at the end of any successful adapter recovery
or only after the adapter recovery for setting a device online */
zfcp_fc_inverse_conditional_port_scan(adapter);
flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
zfcp_ccw_adapter_put(adapter);
return 0;
}
/**
* zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
* @set: Status flags to set.
* @tag: s390dbf trace record tag
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_set_adapter_status(adapter, set);
zfcp_erp_adapter_shutdown(adapter, 0, tag);
zfcp_erp_wait(adapter);
zfcp_ccw_adapter_put(adapter);
return 0;
}
@ -178,16 +213,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
*/
static int zfcp_ccw_set_offline(struct ccw_device *cdev)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
if (!adapter)
return 0;
zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
zfcp_erp_wait(adapter);
zfcp_ccw_adapter_put(adapter);
return 0;
return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
}
/**
@ -207,6 +233,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
switch (event) {
case CIO_GONE:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
zfcp_dbf_hba_basic("ccnigo1", adapter);
break;
}
dev_warn(&cdev->dev, "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
break;
@ -216,6 +247,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
break;
case CIO_OPER:
if (atomic_read(&adapter->status) &
ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
zfcp_dbf_hba_basic("ccniop1", adapter);
break;
}
dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
@ -251,6 +287,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
zfcp_ccw_adapter_put(adapter);
}
static int zfcp_ccw_suspend(struct ccw_device *cdev)
{
zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
return 0;
}
static int zfcp_ccw_thaw(struct ccw_device *cdev)
{
/* trace records for thaw and final shutdown during suspend
can only be found in system dump until the end of suspend
but not after resume because it's based on the memory image
right after the very first suspend (freeze) callback */
zfcp_ccw_activate(cdev, 0, "ccthaw1");
return 0;
}
static int zfcp_ccw_resume(struct ccw_device *cdev)
{
zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
return 0;
}
struct ccw_driver zfcp_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
@ -263,7 +321,7 @@ struct ccw_driver zfcp_ccw_driver = {
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
.freeze = zfcp_ccw_set_offline,
.thaw = zfcp_ccw_activate,
.restore = zfcp_ccw_activate,
.freeze = zfcp_ccw_suspend,
.thaw = zfcp_ccw_thaw,
.restore = zfcp_ccw_resume,
};

Просмотреть файл

@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
shost_for_each_device(sdev, port->adapter->scsi_host) {
shost_for_each_device(sdev, adapter->scsi_host) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||

Просмотреть файл

@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
length = min((u16)sizeof(struct qdio_buffer),
(u16)ZFCP_DBF_PAY_MAX_REC);
while ((char *)pl[payload->counter] && payload->counter < scount) {
while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length);
debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
payload->counter++;
@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
spin_unlock_irqrestore(&dbf->pay_lock, flags);
}
/**
* zfcp_dbf_hba_basic - trace event for basic adapter events
* @adapter: pointer to struct zfcp_adapter
*/
void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf;
unsigned long flags;
spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_BASIC;
debug_event(dbf->hba, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
struct zfcp_adapter *adapter,
struct zfcp_port *port,

Просмотреть файл

@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
ZFCP_DBF_HBA_USS = 2,
ZFCP_DBF_HBA_BIT = 3,
ZFCP_DBF_HBA_BASIC = 4,
};
/**

Просмотреть файл

@ -77,6 +77,7 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
@ -204,6 +205,7 @@ struct zfcp_port {
struct zfcp_adapter *adapter; /* adapter used to access port */
struct list_head unit_list; /* head of logical unit list */
rwlock_t unit_list_lock; /* unit list lock */
atomic_t units; /* zfcp_unit count */
atomic_t status; /* status of this remote port */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */

Просмотреть файл

@ -1230,7 +1230,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
queue_work(adapter->work_queue, &adapter->scan_work);
zfcp_fc_conditional_port_scan(adapter);
queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);

Просмотреть файл

@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@ -98,6 +99,8 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
extern void zfcp_fc_sym_name_update(struct work_struct *);
extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
/* zfcp_fsf.c */
extern struct kmem_cache *zfcp_fsf_qtcb_cache;
@ -158,6 +161,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
extern struct attribute_group zfcp_sysfs_unit_attrs;
extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern struct attribute_group zfcp_sysfs_port_attrs;
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];

Просмотреть файл

@ -26,6 +26,27 @@ static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_FAB] = 0x000000,
};
static bool no_auto_port_rescan;
module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (no_auto_port_rescan)
return;
queue_work(adapter->work_queue, &adapter->scan_work);
}
void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (!no_auto_port_rescan)
return;
queue_work(adapter->work_queue, &adapter->scan_work);
}
/**
* zfcp_fc_post_event - post event to userspace via fc_transport
* @work: work struct with enqueued events
@ -206,7 +227,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
*(u32 *)page);
}
queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)

Просмотреть файл

@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
return;
}
zfcp_dbf_hba_fsf_uss("fssrh_2", req);
zfcp_dbf_hba_fsf_uss("fssrh_4", req);
switch (sr_buf->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
@ -257,7 +257,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
zfcp_cfdc_adapter_access_changed(adapter);
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
queue_work(adapter->work_queue, &adapter->scan_work);
zfcp_fc_conditional_port_scan(adapter);
break;
case FSF_STATUS_READ_CFDC_UPDATED:
zfcp_cfdc_adapter_access_changed(adapter);
@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
}
}
#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{
u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
fdmi_speed |= FC_PORTSPEED_1GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
fdmi_speed |= FC_PORTSPEED_2GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
fdmi_speed |= FC_PORTSPEED_4GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
fdmi_speed |= FC_PORTSPEED_10GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
fdmi_speed |= FC_PORTSPEED_8GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
fdmi_speed |= FC_PORTSPEED_16GBIT;
if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
return fdmi_speed;
}
static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_name(shost) = nsp->fl_wwpn;
fc_host_node_name(shost) = nsp->fl_wwnn;
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) = bottom->fc_link_speed;
fc_host_speed(shost) =
zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
adapter->hydra_version = bottom->adapter_type;
@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) = bottom->supported_speed;
fc_host_supported_speeds(shost) =
zfcp_fsf_convert_portspeed(bottom->supported_speed);
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
@ -771,12 +801,14 @@ out:
static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_scsi_dev *zfcp_sdev;
union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
zfcp_sdev = sdev_to_zfcp(sdev);
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
zfcp_dbf_san_res("fsscth1", req);
zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct scsi_device *sdev = req->data;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED |
ZFCP_STATUS_LUN_SHARED |
@ -1856,11 +1890,13 @@ out:
static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_scsi_dev *zfcp_sdev;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
zfcp_sdev = sdev_to_zfcp(sdev);
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
struct fsf_qual_latency_info *lat_in;
struct latency_cont *lat = NULL;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
struct zfcp_scsi_dev *zfcp_sdev;
struct zfcp_blk_drv_data blktrc;
int ticks = req->adapter->timer_ticks;
@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
!(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
zfcp_sdev = sdev_to_zfcp(scsi->device);
blktrc.flags |= ZFCP_BLK_LAT_VALID;
blktrc.channel_lat = lat_in->channel_lat * ticks;
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
{
struct scsi_cmnd *scmnd = req->data;
struct scsi_device *sdev = scmnd->device;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
return;
zfcp_sdev = sdev_to_zfcp(sdev);
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:

Просмотреть файл

@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_adapter *adapter = qdio->adapter;
struct qdio_buffer_element *sbale;
int sbal_no, sbal_idx;
void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
u64 req_id;
u8 scount;
if (unlikely(qdio_err)) {
memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
if (zfcp_adapter_multi_buffer_active(adapter)) {
void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
struct qdio_buffer_element *sbale;
u64 req_id;
u8 scount;
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
req_id = (u64) sbale->addr;
scount = sbale->scount + 1; /* incl. signaling SBAL */
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
for (sbal_no = 0; sbal_no < scount; sbal_no++) {
sbal_idx = (idx + sbal_no) %

Просмотреть файл

@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
else
retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (atomic_read(&port->units) > 0) {
retval = -EBUSY;
mutex_unlock(&zfcp_sysfs_port_units_mutex);
goto out;
}
/* port is about to be removed, so no more unit_add */
atomic_set(&port->units, -1);
mutex_unlock(&zfcp_sysfs_port_units_mutex);
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
int retval;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_add(port, fcp_lun))
return -EINVAL;
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
return count;
}

Просмотреть файл

@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
put_device(&unit->port->dev);
atomic_dec(&unit->port->units);
kfree(unit);
}
@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
int retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
if (atomic_read(&port->units) == -1) {
/* port is already gone */
retval = -ENODEV;
goto out;
}
unit = zfcp_unit_find(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
return -EEXIST;
retval = -EEXIST;
goto out;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
return -ENOMEM;
if (!unit) {
retval = -ENOMEM;
goto out;
}
unit->port = port;
unit->fcp_lun = fcp_lun;
@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
return -ENOMEM;
retval = -ENOMEM;
goto out;
}
get_device(&port->dev);
if (device_register(&unit->dev)) {
put_device(&unit->dev);
return -ENOMEM;
retval = -ENOMEM;
goto out;
}
if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
device_unregister(&unit->dev);
return -EINVAL;
retval = -EINVAL;
goto out;
}
atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
zfcp_unit_scsi_scan(unit);
return 0;
out:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
return retval;
}
/**

Просмотреть файл

@ -48,7 +48,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
}
if (sreset & BE2_SET_RESET) {
printk(KERN_ERR "Soft Reset did not deassert\n");
printk(KERN_ERR DRV_NAME
" Soft Reset did not deassert\n");
return -EIO;
}
pconline1 = BE2_MPU_IRAM_ONLINE;
@ -67,7 +68,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
i++;
}
if (sreset & BE2_SET_RESET) {
printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
printk(KERN_ERR DRV_NAME
" MPU Online Soft Reset did not deassert\n");
return -EIO;
}
return 0;
@ -93,8 +95,9 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
}
if ((status & 0x80000000) || (!num_loop)) {
printk(KERN_ERR "Failed in be_chk_reset_complete"
"status = 0x%x\n", status);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : Failed in be_chk_reset_complete"
"status = 0x%x\n", status);
return -EIO;
}
@ -169,6 +172,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
be_dws_le_to_cpu(compl, 4);
@ -177,9 +181,12 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
if (compl_status != MCC_STATUS_SUCCESS) {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
dev_err(&ctrl->pdev->dev,
"error in cmd completion: status(compl/extd)=%d/%d\n",
compl_status, extd_status);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
compl_status, extd_status);
return -EBUSY;
}
return 0;
@ -233,22 +240,29 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
{
switch (evt->port_link_status) {
case ASYNC_EVENT_LINK_DOWN:
SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
evt->physical_port);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BC_%d : Link Down on Physical Port %d\n",
evt->physical_port);
phba->state |= BE_ADAPTER_LINK_DOWN;
iscsi_host_for_each_session(phba->shost,
be2iscsi_fail_session);
break;
case ASYNC_EVENT_LINK_UP:
phba->state = BE_ADAPTER_UP;
SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
evt->physical_port);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BC_%d : Link UP on Physical Port %d\n",
evt->physical_port);
break;
default:
SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
"Physical Port %d\n",
evt->port_link_status,
evt->physical_port);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BC_%d : Unexpected Async Notification %d on"
"Physical Port %d\n",
evt->port_link_status,
evt->physical_port);
}
}
@ -279,9 +293,11 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
beiscsi_async_link_state_process(phba,
(struct be_async_event_link_state *) compl);
else
SE_DEBUG(DBG_LVL_1,
" Unsupported Async Event, flags"
" = 0x%08x\n", compl->flags);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG |
BEISCSI_LOG_MBOX,
"BC_%d : Unsupported Async Event, flags"
" = 0x%08x\n", compl->flags);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(ctrl, compl);
@ -312,7 +328,10 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
udelay(100);
}
if (i == mcc_timeout) {
dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : mccq poll timed out\n");
return -EBUSY;
}
return 0;
@ -338,7 +357,11 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
break;
if (cnt > 12000000) {
dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : mbox_db poll timed out\n");
return -EBUSY;
}
@ -360,6 +383,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val |= MPU_MAILBOX_DB_HI_MASK;
@ -368,7 +392,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
status = be_mbox_db_ready_wait(ctrl);
if (status != 0) {
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : be_mbox_db_ready_wait failed\n");
return status;
}
val = 0;
@ -379,18 +406,27 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
status = be_mbox_db_ready_wait(ctrl);
if (status != 0) {
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : be_mbox_db_ready_wait failed\n");
return status;
}
if (be_mcc_compl_is_new(compl)) {
status = be_mcc_compl_process(ctrl, &mbox->compl);
be_mcc_compl_use(compl);
if (status) {
SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : After be_mcc_compl_process\n");
return status;
}
} else {
dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : Invalid Mailbox Completion\n");
return -EBUSY;
}
return 0;
@ -436,7 +472,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
if (status)
return status;
} else {
dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : invalid mailbox completion\n");
return -EBUSY;
}
return 0;
@ -528,7 +567,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@ -563,10 +601,10 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
u8 *endian_check;
SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@ -583,7 +621,8 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
status = be_mbox_notify(ctrl);
if (status)
SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : be_cmd_fw_initialize Failed\n");
spin_unlock(&ctrl->mbox_lock);
return status;
@ -596,11 +635,11 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_cq_create *req = embedded_payload(wrb);
struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
struct be_dma_mem *q_mem = &cq->dma_mem;
void *ctxt = &req->context;
int status;
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@ -608,8 +647,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
if (!q_mem->va)
SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
@ -633,8 +670,10 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
} else
SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
status);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : In be_cmd_cq_create, status=ox%08x\n",
status);
spin_unlock(&ctrl->mbox_lock);
return status;
@ -700,10 +739,14 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
u8 subsys = 0, opcode = 0;
int status;
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BC_%d : In beiscsi_cmd_q_destroy "
"queue_type : %d\n", queue_type);
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@ -759,7 +802,6 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@ -830,6 +872,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_post_sgl_pages_req *req = embedded_payload(wrb);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
unsigned int curr_pages;
u32 internal_page_offset = 0;
@ -860,8 +903,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl);
if (status) {
SE_DEBUG(DBG_LVL_1,
"FW CMD to map iscsi frags failed.\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : FW CMD to map iscsi frags failed.\n");
goto error;
}
} while (num_pages > 0);
@ -890,3 +934,45 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
spin_unlock(&ctrl->mbox_lock);
return status;
}
/**
* be_cmd_set_vlan()- Configure VLAN paramters on the adapter
* @phba: device priv structure instance
* @vlan_tag: TAG to be set
*
* Set the VLAN_TAG for the adapter or Disable VLAN on adapter
*
* returns
* TAG for the MBX Cmd
* **/
int be_cmd_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
unsigned int tag = 0;
struct be_mcc_wrb *wrb;
struct be_cmd_set_vlan_req *req;
struct be_ctrl_info *ctrl = &phba->ctrl;
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
sizeof(*req));
req->interface_hndl = phba->interface_handle;
req->vlan_priority = vlan_tag;
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}

Просмотреть файл

@ -348,6 +348,23 @@ struct be_cmd_get_boot_target_resp {
int boot_session_handle;
};
struct be_cmd_reopen_session_req {
struct be_cmd_req_hdr hdr;
#define BE_REOPEN_ALL_SESSIONS 0x00
#define BE_REOPEN_BOOT_SESSIONS 0x01
#define BE_REOPEN_A_SESSION 0x02
u16 reopen_type;
u16 rsvd;
u32 session_handle;
} __packed;
struct be_cmd_reopen_session_resp {
struct be_cmd_resp_hdr hdr;
u32 rsvd;
u32 session_handle;
} __packed;
struct be_cmd_mac_query_req {
struct be_cmd_req_hdr hdr;
u8 type;
@ -432,6 +449,12 @@ struct be_cmd_get_def_gateway_resp {
struct ip_addr_format ip_addr;
} __packed;
#define BEISCSI_VLAN_DISABLE 0xFFFF
struct be_cmd_set_vlan_req {
struct be_cmd_req_hdr hdr;
u32 interface_hndl;
u32 vlan_priority;
} __packed;
/******************** Create CQ ***************************/
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@ -671,6 +694,9 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
bool is_link_state_evt(u32 trailer);
/* Configuration Functions */
int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
struct be_default_pdu_context {
u32 dw[4];
} __packed;
@ -911,6 +937,7 @@ struct be_cmd_get_all_if_id_req {
#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52

Просмотреть файл

@ -50,21 +50,27 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
struct beiscsi_session *beiscsi_sess;
struct beiscsi_io_task *io_task;
SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
if (!ep) {
SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n");
printk(KERN_ERR
"beiscsi_session_create: invalid ep\n");
return NULL;
}
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
shost = phba->shost;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_session_create\n");
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
"Max cmds per session supported is %d. Using %d. "
"\n", cmds_max,
beiscsi_ep->phba->params.wrbs_per_cxn,
beiscsi_ep->phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Cannot handle %d cmds."
"Max cmds per session supported is %d. Using %d."
"\n", cmds_max,
beiscsi_ep->phba->params.wrbs_per_cxn,
beiscsi_ep->phba->params.wrbs_per_cxn);
cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
}
@ -102,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
struct iscsi_session *sess = cls_session->dd_data;
struct beiscsi_session *beiscsi_sess = sess->dd_data;
SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
printk(KERN_INFO "In beiscsi_session_destroy\n");
pci_pool_destroy(beiscsi_sess->bhs_pool);
iscsi_session_teardown(cls_session);
}
@ -123,11 +129,13 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
struct iscsi_session *sess;
struct beiscsi_session *beiscsi_sess;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
"from iscsi layer=%d\n", cid);
shost = iscsi_session_to_shost(cls_session);
phba = iscsi_host_priv(shost);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_create ,cid"
"from iscsi layer=%d\n", cid);
cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
if (!cls_conn)
return NULL;
@ -154,12 +162,15 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
unsigned int cid)
{
if (phba->conn_table[cid]) {
SE_DEBUG(DBG_LVL_1,
"Connection table already occupied. Detected clash\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Connection table already occupied. Detected clash\n");
return -EINVAL;
} else {
SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn)\n",
cid, beiscsi_conn);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
cid, beiscsi_conn);
phba->conn_table[cid] = beiscsi_conn;
}
return 0;
@ -184,7 +195,6 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
return -EINVAL;
@ -195,17 +205,21 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EINVAL;
if (beiscsi_ep->phba != phba) {
SE_DEBUG(DBG_LVL_8,
"beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
return -EEXIST;
}
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d\n",
beiscsi_conn, conn, beiscsi_ep->ep_cid);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
beiscsi_conn, conn, beiscsi_ep->ep_cid);
return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
}
@ -219,8 +233,9 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
ISCSI_IFACE_TYPE_IPV4,
0, 0);
if (!phba->ipv4_iface) {
shost_printk(KERN_ERR, phba->shost, "Could not "
"create default IPv4 address.\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Could not "
"create default IPv4 address.\n");
return -ENODEV;
}
@ -237,8 +252,9 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
ISCSI_IFACE_TYPE_IPV6,
0, 0);
if (!phba->ipv6_iface) {
shost_printk(KERN_ERR, phba->shost, "Could not "
"create default IPv6 address.\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Could not "
"create default IPv6 address.\n");
return -ENODEV;
}
@ -299,12 +315,14 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
iface_ip = nla_data(nla);
break;
default:
shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
iface_param->param);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Unsupported param %d\n",
iface_param->param);
}
if (!iface_ip || !iface_subnet) {
shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : IP and Subnet Mask required\n");
return -EINVAL;
}
@ -314,6 +332,51 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
return ret;
}
/**
* beiscsi_set_vlan_tag()- Set the VLAN TAG
* @shost: Scsi Host for the driver instance
* @iface_param: Interface paramters
*
* Set the VLAN TAG for the adapter or disable
* the VLAN config
*
* returns
* Success: 0
* Failure: Non-Zero Value
**/
static int
beiscsi_set_vlan_tag(struct Scsi_Host *shost,
struct iscsi_iface_param_info *iface_param)
{
struct beiscsi_hba *phba = iscsi_host_priv(shost);
int ret = 0;
/* Get the Interface Handle */
if (mgmt_get_all_if_id(phba)) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Getting Interface Handle Failed\n");
return -EIO;
}
switch (iface_param->param) {
case ISCSI_NET_PARAM_VLAN_ENABLED:
if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
break;
case ISCSI_NET_PARAM_VLAN_TAG:
ret = mgmt_set_vlan(phba,
*((uint16_t *)iface_param->value));
break;
default:
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BS_%d : Unkown Param Type : %d\n",
iface_param->param);
return -ENOSYS;
}
return ret;
}
static int
beiscsi_set_ipv4(struct Scsi_Host *shost,
struct iscsi_iface_param_info *iface_param,
@ -335,8 +398,9 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
ret = beiscsi_set_static_ip(shost, iface_param,
data, dt_len);
else
shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
iface_param->value[0]);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Invalid BOOTPROTO: %d\n",
iface_param->value[0]);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
@ -349,9 +413,14 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
ret = beiscsi_set_static_ip(shost, iface_param,
data, dt_len);
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
case ISCSI_NET_PARAM_VLAN_TAG:
ret = beiscsi_set_vlan_tag(shost, iface_param);
break;
default:
shost_printk(KERN_ERR, shost, "Param %d not supported\n",
iface_param->param);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Param %d not supported\n",
iface_param->param);
}
return ret;
@ -379,8 +448,9 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
ISCSI_BOOTPROTO_STATIC);
break;
default:
shost_printk(KERN_ERR, shost, "Param %d not supported\n",
iface_param->param);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Param %d not supported\n",
iface_param->param);
}
return ret;
@ -390,6 +460,7 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
void *data, uint32_t dt_len)
{
struct iscsi_iface_param_info *iface_param = NULL;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct nlattr *attrib;
uint32_t rm_len = dt_len;
int ret = 0 ;
@ -404,9 +475,11 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
* BE2ISCSI only supports 1 interface
*/
if (iface_param->iface_num) {
shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
"Only iface_num 0 is supported.\n",
iface_param->iface_num);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Invalid iface_num %d."
"Only iface_num 0 is supported.\n",
iface_param->iface_num);
return -EINVAL;
}
@ -420,9 +493,9 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
data, dt_len);
break;
default:
shost_printk(KERN_ERR, shost,
"Invalid iface type :%d passed\n",
iface_param->iface_type);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Invalid iface type :%d passed\n",
iface_param->iface_type);
break;
}
@ -465,6 +538,27 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
(if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
? "Disabled" : "Enabled");
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL;
else
len = sprintf(buf, "%d\n",
(if_info.vlan_priority &
ISCSI_MAX_VLAN_ID));
break;
case ISCSI_NET_PARAM_VLAN_PRIORITY:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL;
else
len = sprintf(buf, "%d\n",
((if_info.vlan_priority >> 13) &
ISCSI_MAX_VLAN_PRIORITY));
break;
default:
WARN_ON(1);
}
@ -486,6 +580,9 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
case ISCSI_NET_PARAM_IPV4_SUBNET:
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
case ISCSI_NET_PARAM_IPV6_ADDR:
case ISCSI_NET_PARAM_VLAN_ENABLED:
case ISCSI_NET_PARAM_VLAN_ID:
case ISCSI_NET_PARAM_VLAN_PRIORITY:
len = be2iscsi_get_if_param(phba, iface, param, buf);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
@ -518,7 +615,10 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param);
beiscsi_log(beiscsi_ep->phba, KERN_INFO,
BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_ep_get_param,"
" param= %d\n", param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@ -541,9 +641,14 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct beiscsi_hba *phba = NULL;
int ret;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_set_param,"
" param= %d\n", param);
ret = iscsi_set_param(cls_conn, param, buf, buflen);
if (ret)
return ret;
@ -593,7 +698,9 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
tag = be_cmd_get_initname(phba);
if (!tag) {
SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Getting Initiator Name Failed\n");
return -EBUSY;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@ -604,9 +711,12 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
"status = %d extd_status = %d\n",
status, extd_status);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BS_%d : MailBox Command Failed with "
"status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -EAGAIN;
}
@ -650,7 +760,9 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
tag = be_cmd_get_port_speed(phba);
if (!tag) {
SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Getting Port Speed Failed\n");
return -EBUSY;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@ -661,9 +773,12 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
"status = %d extd_status = %d\n",
status, extd_status);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BS_%d : MailBox Command Failed with "
"status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -EAGAIN;
}
@ -704,20 +819,24 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_get_host_param,"
" param= %d\n", param);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
status = beiscsi_get_macaddr(buf, phba);
if (status < 0) {
SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_get_macaddr Failed\n");
return status;
}
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
status = beiscsi_get_initname(buf, phba);
if (status < 0) {
SE_DEBUG(DBG_LVL_1,
"Retreiving Initiator Name Failed\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Retreiving Initiator Name Failed\n");
return status;
}
break;
@ -728,8 +847,8 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
case ISCSI_HOST_PARAM_PORT_SPEED:
status = beiscsi_get_port_speed(shost);
if (status) {
SE_DEBUG(DBG_LVL_1,
"Retreiving Port Speed Failed\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Retreiving Port Speed Failed\n");
return status;
}
status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
@ -746,7 +865,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
int rc;
if (strlen(phba->mac_address))
return strlcpy(buf, phba->mac_address, PAGE_SIZE);
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
memset(&resp, 0, sizeof(resp));
rc = mgmt_get_nic_conf(phba, &resp);
@ -768,8 +887,12 @@ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct beiscsi_hba *phba = NULL;
phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_get_stats\n");
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->dataout_pdus = conn->dataout_pdus_cnt;
@ -829,11 +952,16 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
beiscsi_log(beiscsi_conn->phba, KERN_INFO,
BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_start\n");
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep)
SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
beiscsi_log(beiscsi_conn->phba, KERN_ERR,
BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_start , no beiscsi_ep\n");
beiscsi_conn->login_in_progress = 0;
beiscsi_set_params_for_offld(beiscsi_conn, &params);
@ -907,19 +1035,27 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
unsigned int tag, wrb_num;
int ret = -ENOMEM;
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_open_conn\n");
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
if (beiscsi_ep->ep_cid == 0xFFFF) {
SE_DEBUG(DBG_LVL_1, "No free cid available\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : No free cid available\n");
return ret;
}
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d\n",
beiscsi_ep->ep_cid);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
beiscsi_ep->ep_cid);
phba->ep_array[beiscsi_ep->ep_cid -
phba->fw_config.iscsi_cid_start] = ep;
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
phba->params.cxns_per_ctrl * 2)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Failed in allocate iscsi cid\n");
goto free_ep;
}
@ -928,9 +1064,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
sizeof(struct tcp_connect_and_offload_in),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for mgmt_open_connection"
"\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Failed to allocate memory for"
" mgmt_open_connection\n");
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
return -ENOMEM;
}
@ -938,9 +1076,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
if (!tag) {
SE_DEBUG(DBG_LVL_1,
"mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@ -953,9 +1092,12 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
" status = %d extd_status = %d\n",
status, extd_status);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BS_%d : mgmt_open_connection Failed"
" status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@ -968,7 +1110,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_ep = ep->dd_data;
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
beiscsi_ep->cid_vld = 1;
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Success\n");
}
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@ -996,18 +1139,19 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct iscsi_endpoint *ep;
int ret;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect\n");
if (shost)
phba = iscsi_host_priv(shost);
else {
ret = -ENXIO;
SE_DEBUG(DBG_LVL_1, "shost is NULL\n");
printk(KERN_ERR
"beiscsi_ep_connect shost is NULL\n");
return ERR_PTR(ret);
}
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : The Adapter state is Not UP\n");
return ERR_PTR(ret);
}
@ -1022,7 +1166,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
beiscsi_ep->openiscsi_ep = ep;
ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
if (ret) {
SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Failed in beiscsi_open_conn\n");
goto free_ep;
}
@ -1044,7 +1189,9 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_poll\n");
beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_ep_poll\n");
if (beiscsi_ep->cid_vld == 1)
return 1;
else
@ -1064,8 +1211,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
if (!tag) {
SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x\n",
beiscsi_ep->ep_cid);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : upload failed for cid 0x%x\n",
beiscsi_ep->ep_cid);
ret = -EAGAIN;
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@ -1086,7 +1235,8 @@ static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
if (phba->conn_table[cid])
phba->conn_table[cid] = NULL;
else {
SE_DEBUG(DBG_LVL_8, "Connection table Not occupied.\n");
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : Connection table Not occupied.\n");
return -EINVAL;
}
return 0;
@ -1104,38 +1254,40 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_hba *phba;
unsigned int tag;
uint8_t mgmt_invalidate_flag, tcp_upload_flag;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
beiscsi_ep->ep_cid);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
beiscsi_ep->ep_cid);
if (!beiscsi_ep->conn) {
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect, no "
"beiscsi_ep\n");
return;
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE;
tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL;
} else {
mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE;
tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
}
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect ep_cid = %d\n",
beiscsi_ep->ep_cid);
tag = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid, 1,
savecfg_flag);
beiscsi_ep->ep_cid,
mgmt_invalidate_flag,
savecfg_flag);
if (!tag) {
SE_DEBUG(DBG_LVL_1,
"mgmt_invalidate_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
free_mcc_tag(&phba->ctrl, tag);
}
beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
@ -1152,6 +1304,9 @@ umode_t be2iscsi_attr_is_visible(int param_type, int param)
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
case ISCSI_NET_PARAM_IPV4_GW:
case ISCSI_NET_PARAM_IPV6_ADDR:
case ISCSI_NET_PARAM_VLAN_ID:
case ISCSI_NET_PARAM_VLAN_PRIORITY:
case ISCSI_NET_PARAM_VLAN_ENABLED:
return S_IRUGO;
default:
return 0;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -24,6 +24,8 @@
#include <linux/pci.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@ -34,7 +36,7 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
#define BUILD_STR "4.2.162.0"
#define BUILD_STR "4.4.58.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@ -84,23 +86,7 @@
#define MAX_CMD_SZ 65536
#define IIOC_SCSI_DATA 0x05 /* Write Operation */
#define DBG_LVL 0x00000001
#define DBG_LVL_1 0x00000001
#define DBG_LVL_2 0x00000002
#define DBG_LVL_3 0x00000004
#define DBG_LVL_4 0x00000008
#define DBG_LVL_5 0x00000010
#define DBG_LVL_6 0x00000020
#define DBG_LVL_7 0x00000040
#define DBG_LVL_8 0x00000080
#define SE_DEBUG(debug_mask, fmt, args...) \
do { \
if (debug_mask & DBG_LVL) { \
printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
printk(fmt, ##args); \
} \
} while (0);
#define INVALID_SESS_HANDLE 0xFFFFFFFF
#define BE_ADAPTER_UP 0x00000000
#define BE_ADAPTER_LINK_DOWN 0x00000001
@ -351,6 +337,8 @@ struct beiscsi_hba {
struct mgmt_session_info boot_sess;
struct invalidate_command_table inv_tbl[128];
unsigned int attr_log_enable;
};
struct beiscsi_session {
@ -860,4 +848,20 @@ struct hwi_context_memory {
struct hwi_async_pdu_context *pasync_ctx;
};
/* Logging related definitions */
#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */
#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */
#define BEISCSI_LOG_MISC 0x0004 /* Miscllaneous Events */
#define BEISCSI_LOG_EH 0x0008 /* Error Handler */
#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
#define beiscsi_log(phba, level, mask, fmt, arg...) \
do { \
uint32_t log_value = phba->attr_log_enable; \
if (((mask) & log_value) || (level[1] <= '3')) \
shost_printk(level, phba->shost, \
fmt, __LINE__, ##arg); \
} while (0)
#endif

Просмотреть файл

@ -23,6 +23,53 @@
#include "be_mgmt.h"
#include "be_iscsi.h"
/**
* mgmt_reopen_session()- Reopen a session based on reopen_type
* @phba: Device priv structure instance
* @reopen_type: Type of reopen_session FW should do.
* @sess_handle: Session Handle of the session to be re-opened
*
* return
* the TAG used for MBOX Command
*
**/
unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
unsigned int reopen_type,
unsigned int sess_handle)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_reopen_session_req *req;
unsigned int tag = 0;
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_get_boot_target\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
sizeof(struct be_cmd_reopen_session_resp));
/* set the reopen_type,sess_handle */
req->reopen_type = reopen_type;
req->session_handle = sess_handle;
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return tag;
}
unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
@ -30,7 +77,10 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
struct be_cmd_get_boot_target_req *req;
unsigned int tag = 0;
SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_get_boot_target\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
@ -62,7 +112,10 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
struct be_cmd_get_session_resp *resp;
struct be_sge *sge;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In beiscsi_get_session_info\n");
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
@ -121,16 +174,16 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
phba->fw_config.iscsi_cid_count =
pfw_cfg->ulp[0].sq_count;
if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
SE_DEBUG(DBG_LVL_8,
"FW reported MAX CXNS as %d\t"
"Max Supported = %d.\n",
phba->fw_config.iscsi_cid_count,
BE2_MAX_SESSIONS);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BG_%d : FW reported MAX CXNS as %d\t"
"Max Supported = %d.\n",
phba->fw_config.iscsi_cid_count,
BE2_MAX_SESSIONS);
phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
}
} else {
shost_printk(KERN_WARNING, phba->shost,
"Failed in mgmt_get_fw_config\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_get_fw_config\n");
}
spin_unlock(&ctrl->mbox_lock);
@ -150,9 +203,9 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
sizeof(struct be_mgmt_controller_attributes),
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for mgmt_check_supported_fw"
"\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed to allocate memory for "
"mgmt_check_supported_fw\n");
return -ENOMEM;
}
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
@ -169,18 +222,23 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl);
if (!status) {
struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
resp->params.hba_attribs.flashrom_version_string);
SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
resp->params.hba_attribs.firmware_version_string);
SE_DEBUG(DBG_LVL_8,
"Developer Build, not performing version check...\n");
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BG_%d : Firmware Version of CMD : %s\n"
"Firmware Version is : %s\n"
"Developer Build, not performing version check...\n",
resp->params.hba_attribs
.flashrom_version_string,
resp->params.hba_attribs.
firmware_version_string);
phba->fw_config.iscsi_features =
resp->params.hba_attribs.iscsi_features;
SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
phba->fw_config.iscsi_features);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : phba->fw_config.iscsi_features = %d\n",
phba->fw_config.iscsi_features);
} else
SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_check_supported_fw\n");
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
@ -229,9 +287,10 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
OPCODE_COMMON_READ_FLASH, sizeof(*req));
break;
default:
shost_printk(KERN_WARNING, phba->shost,
"Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.
h_vendor.vendor_cmd[0]);
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Unsupported cmd = 0x%x\n\n",
bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
spin_unlock(&ctrl->mbox_lock);
return -ENOSYS;
}
@ -275,8 +334,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
status = be_mcc_notify_wait(phba);
if (status)
shost_printk(KERN_WARNING, phba->shost,
" mgmt_epfw_cleanup , FAILED\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
"BG_%d : mgmt_epfw_cleanup , FAILED\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
@ -459,8 +518,9 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
beiscsi_ep->ip_type = BE2_IPV6;
} else{
shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
dst_addr->sa_family);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : unknown addr family %d\n",
dst_addr->sa_family);
spin_unlock(&ctrl->mbox_lock);
free_mcc_tag(&phba->ctrl, tag);
return -EINVAL;
@ -471,7 +531,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
if (phba->nxt_cqid == phba->num_cpus)
phba->nxt_cqid = 0;
req->cq_id = phwi_context->be_cq[i].id;
SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d\n", i, req->cq_id);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BG_%d : i=%d cq_id=%d\n", i, req->cq_id);
req->defq_id = def_hdr_id;
req->hdr_ring_id = def_hdr_id;
req->data_ring_id = def_data_id;
@ -506,8 +567,8 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
if (!status)
phba->interface_handle = pbe_allid->if_hndl_list[0];
else {
shost_printk(KERN_WARNING, phba->shost,
"Failed in mgmt_get_all_if_id\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed in mgmt_get_all_if_id\n");
}
spin_unlock(&ctrl->mbox_lock);
@ -550,9 +611,10 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1,
"mgmt_exec_nonemb_cmd Failed status = %d"
"extd_status = %d\n", status, extd_status);
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
"extd_status = %d\n", status, extd_status);
rc = -EIO;
goto free_tag;
}
@ -573,7 +635,8 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
{
cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
return -ENOMEM;
}
memset(cmd->va, 0, size);
@ -629,8 +692,8 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
if (rc < 0)
shost_printk(KERN_WARNING, phba->shost,
"Failed to Modify existing IP Address\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Modify existing IP Address\n");
return rc;
}
@ -684,8 +747,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
if (if_info.dhcp_state) {
shost_printk(KERN_WARNING, phba->shost,
"DHCP Already Enabled\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : DHCP Already Enabled\n");
return 0;
}
/* The ip_param->len is 1 in DHCP case. Setting
@ -712,8 +775,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
if (rc < 0) {
shost_printk(KERN_WARNING, phba->shost,
"Failed to Delete existing dhcp\n");
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Delete existing dhcp\n");
return rc;
}
}
@ -732,8 +796,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
if (rc) {
shost_printk(KERN_WARNING, phba->shost,
"Failed to Get Gateway Addr\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Get Gateway Addr\n");
return rc;
}
@ -743,8 +807,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
IP_ACTION_DEL, IP_V4_LEN);
if (rc) {
shost_printk(KERN_WARNING, phba->shost,
"Failed to clear Gateway Addr Set\n");
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG,
"BG_%d : Failed to clear Gateway Addr Set\n");
return rc;
}
}
@ -783,8 +848,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
if (rt_val) {
shost_printk(KERN_WARNING, phba->shost,
"Failed to Get Gateway Addr\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Get Gateway Addr\n");
return rt_val;
}
@ -793,8 +858,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
gateway_param->len);
if (rt_val) {
shost_printk(KERN_WARNING, phba->shost,
"Failed to clear Gateway Addr Set\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to clear Gateway Addr Set\n");
return rt_val;
}
}
@ -804,8 +869,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
gateway_param->len);
if (rt_val)
shost_printk(KERN_WARNING, phba->shost,
"Failed to Set Gateway Addr\n");
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Set Gateway Addr\n");
return rt_val;
}
@ -924,3 +989,150 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
spin_unlock(&ctrl->mbox_lock);
return tag;
}
/**
* be_mgmt_get_boot_shandle()- Get the session handle
* @phba: device priv structure instance
* @s_handle: session handle returned for boot session.
*
* Get the boot target session handle. In case of
* crashdump mode driver has to issue and MBX Cmd
* for FW to login to boot target
*
* return
* Success: 0
* Failure: Non-Zero value
*
**/
int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
unsigned int *s_handle)
{
struct be_cmd_get_boot_target_resp *boot_resp;
struct be_mcc_wrb *wrb;
unsigned int tag, wrb_num;
uint8_t boot_retry = 3;
unsigned short status, extd_status;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
do {
/* Get the Boot Target Session Handle and Count*/
tag = mgmt_get_boot_target(phba);
if (!tag) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BG_%d : Getting Boot Target Info Failed\n");
return -EAGAIN;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : mgmt_get_boot_target Failed"
" status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -EBUSY;
}
wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag);
boot_resp = embedded_payload(wrb);
/* Check if the there are any Boot targets configured */
if (!boot_resp->boot_session_count) {
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d ;No boot targets configured\n");
return -ENXIO;
}
/* FW returns the session handle of the boot session */
if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
*s_handle = boot_resp->boot_session_handle;
return 0;
}
/* Issue MBX Cmd to FW to login to the boot target */
tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
INVALID_SESS_HANDLE);
if (!tag) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : mgmt_reopen_session Failed\n");
return -EAGAIN;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : mgmt_reopen_session Failed"
" status = %d extd_status = %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -EBUSY;
}
free_mcc_tag(&phba->ctrl, tag);
} while (--boot_retry);
/* Couldn't log into the boot target */
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : Login to Boot Target Failed\n");
return -ENXIO;
}
/**
* mgmt_set_vlan()- Issue and wait for CMD completion
* @phba: device private structure instance
* @vlan_tag: VLAN tag
*
* Issue the MBX Cmd and wait for the completion of the
* command.
*
* returns
* Success: 0
* Failure: Non-Xero Value
**/
int mgmt_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
unsigned int tag, wrb_num;
unsigned short status, extd_status;
tag = be_cmd_set_vlan(phba, vlan_tag);
if (!tag) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
"BG_%d : VLAN Setting Failed\n");
return -EBUSY;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
"BS_%d : status : %d extd_status : %d\n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -EAGAIN;
}
free_mcc_tag(&phba->ctrl, tag);
return 0;
}

Просмотреть файл

@ -108,6 +108,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct bsg_job *job,
struct be_dma_mem *nonemb_cmd);
#define BEISCSI_NO_RST_ISSUE 0
struct iscsi_invalidate_connection_params_in {
struct be_cmd_req_hdr hdr;
unsigned int session_handle;
@ -274,6 +275,10 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
unsigned int reopen_type,
unsigned sess_handle);
unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
u32 boot_session_handle,
struct be_dma_mem *nonemb_cmd);
@ -290,4 +295,10 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
int mgmt_set_gateway(struct beiscsi_hba *phba,
struct iscsi_iface_param_info *gateway_param);
int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
unsigned int *s_handle);
unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
#endif

Просмотреть файл

@ -775,7 +775,8 @@ bfa_intx(struct bfa_s *bfa)
if (!intr)
return BFA_TRUE;
bfa_msix_lpu_err(bfa, intr);
if (bfa->intr_enabled)
bfa_msix_lpu_err(bfa, intr);
return BFA_TRUE;
}
@ -803,11 +804,17 @@ bfa_isr_enable(struct bfa_s *bfa)
writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
bfa->iocfc.intr_mask = ~umsk;
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
/*
* Set the flag indicating successful enabling of interrupts
*/
bfa->intr_enabled = BFA_TRUE;
}
void
bfa_isr_disable(struct bfa_s *bfa)
{
bfa->intr_enabled = BFA_FALSE;
bfa_isr_mode_set(bfa, BFA_FALSE);
writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
bfa_msix_uninstall(bfa);
@ -1022,7 +1029,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
{
u8 *dm_kva = NULL;
u64 dm_pa = 0;
int i, per_reqq_sz, per_rspq_sz, dbgsz;
int i, per_reqq_sz, per_rspq_sz;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
@ -1083,11 +1090,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
BFA_CACHELINE_SZ);
/* Claim IOCFC kva memory */
dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
if (dbgsz > 0) {
bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
bfa_mem_kva_curp(iocfc) += dbgsz;
}
bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
}
/*
@ -1429,8 +1433,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
/* kva memory setup for IOCFC */
bfa_mem_kva_setup(meminfo, iocfc_kva,
((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
}
/*

Просмотреть файл

@ -168,7 +168,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
/*
* bfa_q_deq - dequeue an element from head of the queue
*/
#define bfa_q_deq(_q, _qe) { \
#define bfa_q_deq(_q, _qe) do { \
if (!list_empty(_q)) { \
(*((struct list_head **) (_qe))) = bfa_q_next(_q); \
bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
@ -177,7 +177,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
} \
}
} while (0)
/*
* bfa_q_deq_tail - dequeue an element from tail of the queue

Просмотреть файл

@ -93,6 +93,7 @@ struct bfa_lport_cfg_s {
wwn_t pwwn; /* port wwn */
wwn_t nwwn; /* node wwn */
struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */
enum bfa_lport_role roles; /* FCS port roles */
u32 rsvd;
bfa_boolean_t preboot_vp; /* vport created from PBC */
@ -192,6 +193,18 @@ struct bfa_lport_stats_s {
u32 ns_gidft_unknown_rsp;
u32 ns_gidft_alloc_wait;
u32 ns_rnnid_sent;
u32 ns_rnnid_accepts;
u32 ns_rnnid_rsp_err;
u32 ns_rnnid_rejects;
u32 ns_rnnid_alloc_wait;
u32 ns_rsnn_nn_sent;
u32 ns_rsnn_nn_accepts;
u32 ns_rsnn_nn_rsp_err;
u32 ns_rsnn_nn_rejects;
u32 ns_rsnn_nn_alloc_wait;
/*
* Mgmt Server stats
*/
@ -410,6 +423,11 @@ struct bfa_rport_remote_link_stats_s {
u32 icc; /* Invalid CRC Count */
};
struct bfa_rport_qualifier_s {
wwn_t pwwn; /* Port WWN */
u32 pid; /* port ID */
u32 rsvd;
};
#define BFA_MAX_IO_INDEX 7
#define BFA_NO_IO_INDEX 9

Просмотреть файл

@ -1279,6 +1279,7 @@ enum {
GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
GS_RFT_ID = 0x0217, /* Register fc4type on ID */
GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
GS_RSNN_NN = 0x0239, /* Register symbolic NN on NN */
GS_RPN_ID = 0x0212, /* Register port name */
GS_RNN_ID = 0x0213, /* Register node name */
GS_RCS_ID = 0x0214, /* Register class of service */
@ -1356,6 +1357,15 @@ struct fcgs_rspnid_req_s {
u8 spn[256]; /* symbolic port name */
};
/*
* RSNN_NN
*/
struct fcgs_rsnn_nn_req_s {
wwn_t node_name; /* Node name */
u8 snn_len; /* symbolic node name length */
u8 snn[256]; /* symbolic node name */
};
/*
* RPN_ID
*/

Просмотреть файл

@ -1251,6 +1251,27 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
u16
fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
wwn_t node_name, u8 *name)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rsnn_nn_req_s *rsnn_nn =
(struct fcgs_rsnn_nn_req_s *) (cthdr + 1);
u32 d_id = bfa_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN);
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
rsnn_nn->snn_len = (u8) strlen((char *)name);
strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
u16
fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
{

Просмотреть файл

@ -166,6 +166,8 @@ enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name);
u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id,
wwn_t node_name, u8 *name);
u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, enum bfa_lport_role role);

Просмотреть файл

@ -1466,7 +1466,13 @@ bfa_status_t
bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
struct bfa_itnim_ioprofile_s *ioprofile)
{
struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
struct bfa_fcpim_s *fcpim;
if (!itnim)
return BFA_STATUS_NO_FCPIM_NEXUS;
fcpim = BFA_FCPIM(itnim->bfa);
if (!fcpim->io_profile)
return BFA_STATUS_IOPROFILE_OFF;
@ -1484,6 +1490,10 @@ void
bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
{
int j;
if (!itnim)
return;
memset(&itnim->stats, 0, sizeof(itnim->stats));
memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
for (j = 0; j < BFA_IOBUCKET_MAX; j++)

Просмотреть файл

@ -76,6 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
fcs->bfa = bfa;
fcs->bfad = bfad;
fcs->min_cfg = min_cfg;
fcs->num_rport_logins = 0;
bfa->fcs = BFA_TRUE;
fcbuild_init();
@ -118,6 +119,18 @@ bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
port_cfg->pwwn = ioc->attr->pwwn;
}
/*
* Stop FCS operations.
*/
void
bfa_fcs_stop(struct bfa_fcs_s *fcs)
{
bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
bfa_wc_up(&fcs->wc);
bfa_fcs_fabric_modstop(fcs);
bfa_wc_wait(&fcs->wc);
}
/*
* fcs pbc vport initialization
*/
@ -153,6 +166,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
fcs->driver_info = *driver_info;
bfa_fcs_fabric_psymb_init(&fcs->fabric);
bfa_fcs_fabric_nsymb_init(&fcs->fabric);
}
/*
@ -213,6 +227,8 @@ static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
static void bfa_fcs_fabric_delay(void *cbarg);
static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
static void bfa_fcs_fabric_delete_comp(void *cbarg);
static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric);
static void bfa_fcs_fabric_stop_comp(void *cbarg);
static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
struct fchs_s *fchs, u16 len);
static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
@ -250,6 +266,10 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
/*
* Beginning state before fabric creation.
*/
@ -334,6 +354,11 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
bfa_fcs_fabric_delete(fabric);
break;
case BFA_FCS_FABRIC_SM_STOP:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
bfa_fcs_fabric_stop(fabric);
break;
default:
bfa_sm_fault(fabric->fcs, event);
}
@ -585,6 +610,11 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
bfa_fcs_fabric_delete(fabric);
break;
case BFA_FCS_FABRIC_SM_STOP:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping);
bfa_fcs_fabric_stop(fabric);
break;
case BFA_FCS_FABRIC_SM_AUTH_FAILED:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
@ -682,7 +712,62 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
}
}
/*
* Fabric is being stopped, awaiting vport stop completions.
*/
static void
bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event)
{
bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
bfa_trc(fabric->fcs, event);
switch (event) {
case BFA_FCS_FABRIC_SM_STOPCOMP:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
break;
case BFA_FCS_FABRIC_SM_LINK_UP:
break;
case BFA_FCS_FABRIC_SM_LINK_DOWN:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
break;
default:
bfa_sm_fault(fabric->fcs, event);
}
}
/*
* Fabric is being stopped, cleanup without FLOGO
*/
static void
bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event)
{
bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
bfa_trc(fabric->fcs, event);
switch (event) {
case BFA_FCS_FABRIC_SM_STOPCOMP:
case BFA_FCS_FABRIC_SM_LOGOCOMP:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
bfa_wc_down(&(fabric->fcs)->wc);
break;
case BFA_FCS_FABRIC_SM_LINK_DOWN:
/*
* Ignore - can get this event if we get notified about IOC down
* before the fabric completion callbk is done.
*/
break;
default:
bfa_sm_fault(fabric->fcs, event);
}
}
/*
* fcs_fabric_private fabric private functions
@ -759,6 +844,44 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
}
/*
* Node Symbolic Name Creation for base port and all vports
*/
void
bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
{
struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
strncpy((char *)&port_cfg->node_sym_name, model,
BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
strncat((char *)&port_cfg->node_sym_name,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
/* Driver Version */
strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
strncat((char *)&port_cfg->node_sym_name,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
/* Host machine name */
strncat((char *)&port_cfg->node_sym_name,
(char *)driver_info->host_machine_name,
BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
strncat((char *)&port_cfg->node_sym_name,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
}
/*
* bfa lps login completion callback
*/
@ -918,6 +1041,28 @@ bfa_fcs_fabric_delay(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
}
/*
* Stop all vports and wait for vport stop completions.
*/
static void
bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
{
struct bfa_fcs_vport_s *vport;
struct list_head *qe, *qen;
bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric);
list_for_each_safe(qe, qen, &fabric->vport_q) {
vport = (struct bfa_fcs_vport_s *) qe;
bfa_wc_up(&fabric->stop_wc);
bfa_fcs_vport_fcs_stop(vport);
}
bfa_wc_up(&fabric->stop_wc);
bfa_fcs_lport_stop(&fabric->bport);
bfa_wc_wait(&fabric->stop_wc);
}
/*
* Computes operating BB_SCN value
*/
@ -978,6 +1123,14 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
}
static void
bfa_fcs_fabric_stop_comp(void *cbarg)
{
struct bfa_fcs_fabric_s *fabric = cbarg;
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP);
}
/*
* fcs_fabric_public fabric public functions
*/
@ -1038,6 +1191,19 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
}
/*
* Fabric module stop -- stop FCS actions
*/
void
bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs)
{
struct bfa_fcs_fabric_s *fabric;
bfa_trc(fcs, 0);
fabric = &fcs->fabric;
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP);
}
/*
* Fabric module start -- kick starts FCS actions
*/
@ -1219,8 +1385,11 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
return;
}
}
bfa_trc(fabric->fcs, els_cmd->els_code);
bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
if (!bfa_fcs_fabric_is_switched(fabric))
bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
bfa_trc(fabric->fcs, fchs->type);
}
/*
@ -1294,7 +1463,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
u16 reqlen;
struct fchs_s fchs;
fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE);
/*
* Do not expect this failure -- expect remote node to retry
*/
@ -1387,6 +1556,13 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
}
}
void
bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
{
struct bfa_fcs_fabric_s *fabric = uarg;
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP);
}
/*
* Returns FCS vf structure for a given vf_id.
*

Просмотреть файл

@ -62,9 +62,9 @@ struct bfa_fcs_s;
#define N2N_LOCAL_PID 0x010000
#define N2N_REMOTE_PID 0x020000
#define BFA_FCS_RETRY_TIMEOUT 2000
#define BFA_FCS_MAX_NS_RETRIES 5
#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
#define BFA_FCS_MAX_RPORT_LOGINS 1024
struct bfa_fcs_lport_ns_s {
bfa_sm_t sm; /* state machine */
@ -72,6 +72,8 @@ struct bfa_fcs_lport_ns_s {
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
u8 num_rnnid_retries;
u8 num_rsnn_nn_retries;
};
@ -205,6 +207,7 @@ struct bfa_fcs_fabric_s {
struct bfa_lps_s *lps; /* lport login services */
u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
/* attached fabric's ip addr */
struct bfa_wc_s stop_wc; /* wait counter for stop */
};
#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
@ -264,6 +267,7 @@ struct bfa_fcs_fabric_s;
#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name)
#define bfa_fcs_lport_is_initiator(_lport) \
((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
#define bfa_fcs_lport_get_nrports(_lport) \
@ -286,9 +290,8 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
wwn_t rport_wwns[], int *nrports);
void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
struct bfa_rport_qualifier_s rport[], int *nrports);
wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
int index, int nrports, bfa_boolean_t bwwn);
@ -324,12 +327,17 @@ void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
struct bfa_fcs_lport_s *port, u32 pid);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid(
struct bfa_fcs_lport_s *port, u32 pid);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
struct bfa_fcs_lport_s *port, wwn_t pwwn);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
struct bfa_fcs_lport_s *port, wwn_t nwwn);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier(
struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid);
void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *rport);
void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
@ -338,6 +346,8 @@ void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
struct bfa_fcxp_s *fcxp_alloced);
void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
@ -382,6 +392,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
@ -419,6 +430,7 @@ struct bfa_fcs_rport_s {
struct bfa_fcs_s *fcs; /* fcs instance */
struct bfad_rport_s *rp_drv; /* driver peer instance */
u32 pid; /* port ID of rport */
u32 old_pid; /* PID before rport goes offline */
u16 maxfrsize; /* maximum frame size */
__be16 reply_oxid; /* OX_ID of inbound requests */
enum fc_cos fc_cos; /* FC classes of service supp */
@ -459,7 +471,7 @@ struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
struct bfa_fcs_lport_s *port, wwn_t rnwwn);
void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
void bfa_fcs_rport_set_max_logins(u32 max_logins);
void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
struct fchs_s *fchs, u16 len);
void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
@ -505,12 +517,13 @@ struct bfa_fcs_itnim_s {
struct bfa_fcxp_s *fcxp; /* FCXP in use */
struct bfa_itnim_stats_s stats; /* itn statistics */
};
#define bfa_fcs_fcxp_alloc(__fcs) \
bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
NULL, 0, 0, NULL, NULL, NULL, NULL)
#define bfa_fcs_fcxp_alloc(__fcs, __req) \
bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0, \
NULL, NULL, NULL, NULL, __req)
#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
__alloc_cbarg, __req) \
bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
__alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req)
static inline struct bfad_port_s *
bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
@ -592,7 +605,7 @@ bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim);
bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
@ -676,6 +689,7 @@ struct bfa_fcs_s {
struct bfa_fcs_stats_s stats; /* FCS statistics */
struct bfa_wc_s wc; /* waiting counter */
int fcs_aen_seq;
u32 num_rport_logins;
};
/*
@ -702,6 +716,9 @@ enum bfa_fcs_fabric_event {
BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
BFA_FCS_FABRIC_SM_START = 16, /* from driver */
BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
};
/*
@ -727,6 +744,26 @@ enum rport_event {
RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */
};
/*
* fcs_itnim_sm FCS itnim state machine events
*/
enum bfa_fcs_itnim_event {
BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */
};
/*
@ -741,6 +778,7 @@ void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
struct bfa_fcs_driver_info_s *driver_info);
void bfa_fcs_exit(struct bfa_fcs_s *fcs);
void bfa_fcs_stop(struct bfa_fcs_s *fcs);
/*
* bfa fcs vf public functions
@ -766,11 +804,13 @@ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
struct fchs_s *fchs, u16 len);
void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
wwn_t fabric_name);
u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,

Просмотреть файл

@ -40,25 +40,6 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
enum bfa_itnim_aen_event event);
/*
* fcs_itnim_sm FCS itnim state machine events
*/
enum bfa_fcs_itnim_event {
BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */
BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
};
static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
@ -69,6 +50,8 @@ static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
@ -99,7 +82,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
bfa_trc(itnim->fcs, event);
switch (event) {
case BFA_FCS_ITNIM_SM_ONLINE:
case BFA_FCS_ITNIM_SM_FCS_ONLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
itnim->prli_retries = 0;
bfa_fcs_itnim_send_prli(itnim, NULL);
@ -138,6 +121,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_INITIATOR:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
@ -166,12 +150,13 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
switch (event) {
case BFA_FCS_ITNIM_SM_RSP_OK:
if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) {
if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR)
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
} else {
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
}
else
bfa_sm_set_state(itnim,
bfa_fcs_itnim_sm_hal_rport_online);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_RSP_ERROR:
@ -194,6 +179,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_INITIATOR:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
bfa_fcxp_discard(itnim->fcxp);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@ -207,6 +193,44 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
}
}
static void
bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
{
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
switch (event) {
case BFA_FCS_ITNIM_SM_HAL_ONLINE:
if (!itnim->bfa_itnim)
itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa,
itnim->rport->bfa_rport, itnim);
if (itnim->bfa_itnim) {
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
} else {
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE);
}
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
bfa_fcs_itnim_free(itnim);
break;
default:
bfa_sm_fault(itnim->fcs, event);
}
}
static void
bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
@ -238,6 +262,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_INITIATOR:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
bfa_timer_stop(&itnim->timer);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@ -275,9 +300,8 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
bfa_itnim_offline(itnim->bfa_itnim);
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@ -372,8 +396,14 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
break;
/*
* fcs_online is expected here for well known initiator ports
*/
case BFA_FCS_ITNIM_SM_FCS_ONLINE:
bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
break;
case BFA_FCS_ITNIM_SM_RSP_ERROR:
case BFA_FCS_ITNIM_SM_ONLINE:
case BFA_FCS_ITNIM_SM_INITIATOR:
break;
@ -426,11 +456,12 @@ bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(itnim->fcs, itnim->rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
itnim->stats.fcxp_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
bfa_fcs_itnim_send_prli, itnim);
bfa_fcs_itnim_send_prli, itnim, BFA_TRUE);
return;
}
itnim->fcxp = fcxp;
@ -483,7 +514,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
if (prli_resp->parampage.servparams.initiator) {
bfa_trc(itnim->fcs, prli_resp->parampage.type);
itnim->rport->scsi_function =
BFA_RPORT_INITIATOR;
BFA_RPORT_INITIATOR;
itnim->stats.prli_rsp_acc++;
itnim->stats.initiator++;
bfa_sm_send_event(itnim,
@ -531,7 +562,11 @@ bfa_fcs_itnim_timeout(void *arg)
static void
bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
{
bfa_itnim_delete(itnim->bfa_itnim);
if (itnim->bfa_itnim) {
bfa_itnim_delete(itnim->bfa_itnim);
itnim->bfa_itnim = NULL;
}
bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
}
@ -552,7 +587,6 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
struct bfa_fcs_lport_s *port = rport->port;
struct bfa_fcs_itnim_s *itnim;
struct bfad_itnim_s *itnim_drv;
struct bfa_itnim_s *bfa_itnim;
/*
* call bfad to allocate the itnim
@ -570,20 +604,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
itnim->fcs = rport->fcs;
itnim->itnim_drv = itnim_drv;
/*
* call BFA to create the itnim
*/
bfa_itnim =
bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
if (bfa_itnim == NULL) {
bfa_trc(port->fcs, rport->pwwn);
bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
WARN_ON(1);
return NULL;
}
itnim->bfa_itnim = bfa_itnim;
itnim->bfa_itnim = NULL;
itnim->seq_rec = BFA_FALSE;
itnim->rec_support = BFA_FALSE;
itnim->conf_comp = BFA_FALSE;
@ -613,20 +634,12 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
* Notification from rport that PLOGI is complete to initiate FC-4 session.
*/
void
bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim)
{
itnim->stats.onlines++;
if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) {
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
} else {
/*
* For well known addresses, we set the itnim to initiator
* state
*/
itnim->stats.initiator++;
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
}
if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid))
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE);
}
/*

Просмотреть файл

@ -131,6 +131,8 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
else
bfa_wc_down(&(port->fabric->stop_wc));
break;
case BFA_FCS_PORT_SM_OFFLINE:
@ -166,6 +168,8 @@ bfa_fcs_lport_sm_online(
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
else
bfa_wc_down(&(port->fabric->stop_wc));
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
list_for_each_safe(qe, qen, &port->rport_q) {
@ -222,6 +226,8 @@ bfa_fcs_lport_sm_offline(
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
else
bfa_wc_down(&(port->fabric->stop_wc));
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
list_for_each_safe(qe, qen, &port->rport_q) {
@ -267,6 +273,8 @@ bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
else
bfa_wc_down(&(port->fabric->stop_wc));
}
break;
@ -340,7 +348,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -370,7 +378,7 @@ bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -507,7 +515,7 @@ bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -552,7 +560,7 @@ bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_len);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -684,7 +692,7 @@ bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -853,6 +861,25 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
return NULL;
}
/*
* OLD_PID based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe;
list_for_each(qe, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
if (rport->old_pid == pid)
return rport;
}
bfa_trc(port->fcs, pid);
return NULL;
}
/*
* PWWN based Lookup for a R-Port in the Port R-Port Queue
*/
@ -891,6 +918,26 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
return NULL;
}
/*
* PWWN & PID based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port,
wwn_t pwwn, u32 pid)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe;
list_for_each(qe, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid)
return rport;
}
bfa_trc(port->fcs, pwwn);
return NULL;
}
/*
* Called by rport module when new rports are discovered.
*/
@ -938,6 +985,16 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
}
/*
* Called by fabric for base port and by vport for virtual ports
* when target mode driver is unloaded.
*/
void
bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port)
{
bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP);
}
/*
* Called by fabric to delete base lport and associated resources.
*
@ -1657,10 +1714,11 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
bfa_fcs_lport_fdmi_send_rhba, fdmi);
bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE);
return;
}
fdmi->fcxp = fcxp;
@ -1931,10 +1989,11 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
bfa_fcs_lport_fdmi_send_rprt, fdmi);
bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE);
return;
}
fdmi->fcxp = fcxp;
@ -2146,10 +2205,11 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
bfa_fcs_lport_fdmi_send_rpa, fdmi);
bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE);
return;
}
fdmi->fcxp = fcxp;
@ -2736,10 +2796,11 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
bfa_fcs_lport_ms_send_gmal, ms);
bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE);
return;
}
ms->fcxp = fcxp;
@ -2936,10 +2997,11 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
bfa_fcs_lport_ms_send_gfn, ms);
bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE);
return;
}
ms->fcxp = fcxp;
@ -3012,11 +3074,12 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ms_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
bfa_fcs_lport_ms_send_plogi, ms);
bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE);
return;
}
ms->fcxp = fcxp;
@ -3166,6 +3229,10 @@ static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_timeout(void *arg);
static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
@ -3202,6 +3269,20 @@ static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_process_gidft_pids(
struct bfa_fcs_lport_s *port,
u32 *pid_buf, u32 n_pids);
@ -3226,6 +3307,8 @@ enum vport_ns_event {
NSSM_EVENT_RFTID_SENT = 9,
NSSM_EVENT_RFFID_SENT = 10,
NSSM_EVENT_GIDFT_SENT = 11,
NSSM_EVENT_RNNID_SENT = 12,
NSSM_EVENT_RSNN_NN_SENT = 13,
};
static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
@ -3266,6 +3349,21 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_sending_rnn_id(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_sending_rsnn_nn(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rsnn_nn_retry(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
/*
* Start in offline state - awaiting linkup
*/
@ -3333,8 +3431,9 @@ bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
break;
case NSSM_EVENT_RSP_OK:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
ns->num_rnnid_retries = 0;
bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
@ -3373,6 +3472,176 @@ bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
}
}
static void
bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RNNID_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_OK:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
ns->num_rnnid_retries = 0;
ns->num_rsnn_nn_retries = 0;
bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
break;
case NSSM_EVENT_RSP_ERROR:
if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) {
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry);
ns->port->stats.ns_retries++;
ns->num_rnnid_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout, ns,
BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_rspn_id);
bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
}
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_fcxp_discard(ns->fcxp);
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSNN_NN_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_OK:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
ns->num_rsnn_nn_retries = 0;
bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
break;
case NSSM_EVENT_RSP_ERROR:
if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) {
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry);
ns->port->stats.ns_retries++;
ns->num_rsnn_nn_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout,
ns, BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_rspn_id);
bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
}
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_discard(ns->fcxp);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
@ -3770,11 +4039,12 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_plogi, ns);
bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@ -3852,6 +4122,162 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
}
}
/*
* Register node name for port_id
*/
static void
bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rnnid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port),
bfa_fcs_lport_get_fcid(port),
bfa_fcs_lport_get_nwwn(port));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_rnn_id_response, (void *)ns,
FC_MAX_PDUSZ, FC_FCCT_TOV);
port->stats.ns_rnnid_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT);
}
static void
bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_rnnid_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rnnid_accepts++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
return;
}
port->stats.ns_rnnid_rejects++;
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
/*
* Register the symbolic node name for a given node name.
*/
static void
bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
u8 *nsymbl;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rsnn_nn_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name(
bfa_fcs_get_base_port(port->fcs)));
len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port),
bfa_fcs_lport_get_nwwn(port), nsymbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns,
FC_MAX_PDUSZ, FC_FCCT_TOV);
port->stats.ns_rsnn_nn_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT);
}
static void
bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_rsnn_nn_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rsnn_nn_accepts++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
return;
}
port->stats.ns_rsnn_nn_rejects++;
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
/*
* Register the symbolic port name.
*/
@ -3870,11 +4296,12 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rspnid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rspn_id, ns);
bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@ -3971,11 +4398,12 @@ bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rftid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rft_id, ns);
bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@ -4044,11 +4472,12 @@ bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_rffid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rff_id, ns);
bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@ -4127,11 +4556,12 @@ bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
port->stats.ns_gidft_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_gid_ft, ns);
bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE);
return;
}
ns->fcxp = fcxp;
@ -4261,6 +4691,10 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
struct fcgs_gidft_resp_s *gidft_entry;
struct bfa_fcs_rport_s *rport;
u32 ii;
struct bfa_fcs_fabric_s *fabric = port->fabric;
struct bfa_fcs_vport_s *vport;
struct list_head *qe;
u8 found = 0;
for (ii = 0; ii < n_pids; ii++) {
gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
@ -4268,6 +4702,29 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
if (gidft_entry->pid == port->pid)
continue;
/*
* Ignore PID if it is of base port
* (Avoid vports discovering base port as remote port)
*/
if (gidft_entry->pid == fabric->bport.pid)
continue;
/*
* Ignore PID if it is of vport created on the same base port
* (Avoid vport discovering every other vport created on the
* same port as remote port)
*/
list_for_each(qe, &fabric->vport_q) {
vport = (struct bfa_fcs_vport_s *) qe;
if (vport->lport.pid == gidft_entry->pid)
found = 1;
}
if (found) {
found = 0;
continue;
}
/*
* Check if this rport already exists
*/
@ -4335,7 +4792,8 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
bfa_trc(port->fcs, port->pid);
bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online))
bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
}
static void
@ -4355,6 +4813,70 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
}
}
void
bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
u8 *psymbl = &symbl[0];
int len;
if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
return;
/* Avoid sending RSPN in the following states. */
if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) ||
bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) ||
bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry))
return;
memset(symbl, 0, sizeof(symbl));
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp) {
port->stats.ns_rspnid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE);
return;
}
ns->fcxp = fcxp;
if (port->vport) {
/*
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
strlen((char *)&bfa_fcs_lport_get_psym_name(
bfa_fcs_get_base_port(port->fcs))));
/* Ensure we have a null terminating string. */
((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
bfa_fcs_get_base_port(port->fcs)))] = 0;
strncat((char *)psymbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port), 0, psymbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
port->stats.ns_rspnid_sent++;
}
/*
* FCS SCN
*/
@ -4529,10 +5051,11 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
bfa_fcs_lport_scn_send_scr, scn);
bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE);
return;
}
scn->fcxp = fcxp;
@ -4614,7 +5137,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -4688,14 +5211,33 @@ static void
bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
{
struct bfa_fcs_rport_s *rport;
struct bfa_fcs_fabric_s *fabric = port->fabric;
struct bfa_fcs_vport_s *vport;
struct list_head *qe;
bfa_trc(port->fcs, rpid);
/*
* Ignore PID if it is of base port or of vports created on the
* same base port. It is to avoid vports discovering base port or
* other vports created on same base port as remote port
*/
if (rpid == fabric->bport.pid)
return;
list_for_each(qe, &fabric->vport_q) {
vport = (struct bfa_fcs_vport_s *) qe;
if (vport->lport.pid == rpid)
return;
}
/*
* If this is an unknown device, then it just came online.
* Otherwise let rport handle the RSCN event.
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
if (!rport)
rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid);
if (rport == NULL) {
/*
* If min cfg mode is enabled, we donot need to
@ -4888,15 +5430,15 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
}
void
bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
wwn_t rport_wwns[], int *nrports)
bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
struct bfa_rport_qualifier_s rports[], int *nrports)
{
struct list_head *qh, *qe;
struct bfa_fcs_rport_s *rport = NULL;
int i;
struct bfa_fcs_s *fcs;
if (port == NULL || rport_wwns == NULL || *nrports == 0)
if (port == NULL || rports == NULL || *nrports == 0)
return;
fcs = port->fcs;
@ -4916,7 +5458,13 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
continue;
}
rport_wwns[i] = rport->pwwn;
if (!rport->pwwn && !rport->pid) {
qe = bfa_q_next(qe);
continue;
}
rports[i].pwwn = rport->pwwn;
rports[i].pid = rport->pid;
i++;
qe = bfa_q_next(qe);
@ -5760,6 +6308,16 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_cleanup++;
}
/*
* Stop notification from fabric SM. To be invoked from within FCS.
*/
void
bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport)
{
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
}
/*
* delete notification from fabric SM. To be invoked from within FCS.
*/

Просмотреть файл

@ -29,6 +29,12 @@ BFA_TRC_FILE(FCS, RPORT);
static u32
bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
/* In millisecs */
/*
* bfa_fcs_rport_max_logins is max count of bfa_fcs_rports
* whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports
*/
static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS;
/*
* forward declarations
*/
@ -36,8 +42,10 @@ static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
struct fc_logi_s *plogi);
static void bfa_fcs_rport_timeout(void *arg);
@ -76,6 +84,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
enum rport_event event);
@ -87,6 +96,8 @@ static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
@ -123,6 +134,10 @@ static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
@ -130,6 +145,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
{BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
{BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
@ -167,8 +183,8 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcs_rport_send_plogiacc(rport, NULL);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
@ -252,8 +268,8 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcs_rport_hal_online(rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_DELETE:
@ -348,9 +364,9 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@ -370,9 +386,9 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
switch (event) {
case RPSM_EVENT_ACCEPTED:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
rport->plogi_retries = 0;
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
@ -397,6 +413,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_stats(rport->port, rport_del_max_plogi_retry);
rport->old_pid = rport->pid;
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
@ -443,9 +460,9 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@ -453,6 +470,70 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
}
}
/*
* PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function
*/
static void
bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FC4_FCS_ONLINE:
if (rport->scsi_function == BFA_RPORT_INITIATOR) {
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
break;
}
if (!rport->bfa_rport)
rport->bfa_rport =
bfa_rport_create(rport->fcs->bfa, rport);
if (rport->bfa_rport) {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcs_rport_hal_online(rport);
} else {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcs_rport_fcs_offline_action(rport);
}
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
rport->plogi_pending = BFA_TRUE;
bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_SCN:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcs_rport_fcs_offline_action(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
break;
}
}
/*
* PLOGI is complete. Awaiting BFA rport online callback. FC-4s
* are offline.
@ -468,41 +549,34 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_HCB_ONLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
bfa_fcs_rport_online_action(rport);
bfa_fcs_rport_hal_online_action(rport);
break;
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_PLOGI_COMP:
break;
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_LOGO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
rport->plogi_pending = BFA_TRUE;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_fcs_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_SCN:
/*
* @todo
* Ignore SCN - PLOGI just completed, FC-4 login should detect
* device failures.
*/
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcs_rport_fcs_offline_action(rport);
break;
default:
@ -537,18 +611,18 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
@ -579,7 +653,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@ -592,24 +666,16 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@ -642,14 +708,14 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
}
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@ -659,7 +725,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
@ -668,7 +734,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@ -696,21 +762,21 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@ -719,7 +785,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@ -756,13 +822,13 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_SCN:
@ -774,14 +840,14 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
bfa_fcs_rport_hal_offline_action(rport);
break;
default:
@ -803,13 +869,19 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
bfa_fcs_rport_hal_offline(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
if (rport->pid && (rport->prlo == BFA_TRUE))
bfa_fcs_rport_send_prlo_acc(rport);
if (rport->pid && (rport->prlo == BFA_FALSE))
bfa_fcs_rport_send_logo_acc(rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
break;
case RPSM_EVENT_HCB_ONLINE:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
@ -835,7 +907,20 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
bfa_fcs_rport_hal_offline(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
break;
case RPSM_EVENT_HCB_ONLINE:
case RPSM_EVENT_DELETE:
/* Rport is being deleted */
break;
default:
@ -857,13 +942,23 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
bfa_fcs_rport_hal_offline(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
/*
* Rport is going offline. Just ack the logo
*/
bfa_fcs_rport_send_logo_acc(rport);
break;
case RPSM_EVENT_PRLO_RCVD:
bfa_fcs_rport_send_prlo_acc(rport);
break;
case RPSM_EVENT_HCB_ONLINE:
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
/*
* rport is already going offline.
@ -907,24 +1002,23 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
*/
case RPSM_EVENT_ADDRESS_CHANGE:
if (bfa_fcs_lport_is_online(rport->port)) {
if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_plogi_sending);
rport->plogi_retries = 0;
bfa_fcs_rport_send_plogi(rport, NULL);
}
} else {
if (!bfa_fcs_lport_is_online(rport->port)) {
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
}
if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
rport->plogi_retries = 0;
bfa_fcs_rport_send_plogi(rport, NULL);
}
break;
@ -1001,7 +1095,11 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
if (rport->pid && (rport->prlo == BFA_TRUE))
bfa_fcs_rport_send_prlo_acc(rport);
if (rport->pid && (rport->prlo == BFA_FALSE))
bfa_fcs_rport_send_logo_acc(rport);
break;
case RPSM_EVENT_LOGO_IMP:
@ -1040,7 +1138,14 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
break;
case RPSM_EVENT_ADDRESS_CHANGE:
break;
@ -1072,7 +1177,11 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_free(rport);
@ -1126,9 +1235,9 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
case RPSM_EVENT_PLOGI_SEND:
@ -1190,9 +1299,9 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@ -1254,9 +1363,9 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@ -1296,6 +1405,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_fcs_rport_sm_nsdisc_sending);
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
rport->old_pid = rport->pid;
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
@ -1343,9 +1453,9 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_hal_online(rport);
bfa_fcs_rport_fcs_online_action(rport);
break;
default:
@ -1353,7 +1463,63 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
}
}
/*
* Rport needs to be deleted
* waiting for ITNIM clean up to finish
*/
static void
bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
bfa_fcs_rport_hal_offline(rport);
break;
case RPSM_EVENT_DELETE:
case RPSM_EVENT_PLOGI_RCVD:
/* Ignore these events */
break;
default:
bfa_sm_fault(rport->fcs, event);
break;
}
}
/*
* RPort needs to be deleted
* waiting for BFA/FW to finish current processing
*/
static void
bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_HCB_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_DELETE:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_PLOGI_RCVD:
/* Ignore these events */
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* fcs_rport_private FCS RPORT provate functions
@ -1370,10 +1536,11 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_plogi, rport);
bfa_fcs_rport_send_plogi, rport, BFA_TRUE);
return;
}
rport->fcxp = fcxp;
@ -1490,10 +1657,11 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->reply_oxid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_plogiacc, rport);
bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE);
return;
}
rport->fcxp = fcxp;
@ -1522,10 +1690,11 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_adisc, rport);
bfa_fcs_rport_send_adisc, rport, BFA_TRUE);
return;
}
rport->fcxp = fcxp;
@ -1585,10 +1754,11 @@ bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_nsdisc, rport);
bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE);
return;
}
rport->fcxp = fcxp;
@ -1741,10 +1911,11 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
port = rport->port;
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_logo, rport);
bfa_fcs_rport_send_logo, rport, BFA_FALSE);
return;
}
rport->fcxp = fcxp;
@ -1778,7 +1949,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
port = rport->port;
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -1849,7 +2020,7 @@ bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
bfa_fcs_itnim_is_initiator(rport->itnim);
}
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -1886,7 +2057,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -1920,7 +2091,7 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
*/
if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -1957,6 +2128,15 @@ bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
bfa_rport_online(rport->bfa_rport, &rport_info);
}
static void
bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport)
{
if (rport->bfa_rport)
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
else
bfa_cb_rport_offline(rport);
}
static struct bfa_fcs_rport_s *
bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
{
@ -1967,6 +2147,11 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
/*
* allocate rport
*/
if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) {
bfa_trc(fcs, rpid);
return NULL;
}
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
!= BFA_STATUS_OK) {
bfa_trc(fcs, rpid);
@ -1981,16 +2166,9 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
rport->rp_drv = rport_drv;
rport->pid = rpid;
rport->pwwn = pwwn;
rport->old_pid = 0;
/*
* allocate BFA rport
*/
rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
if (!rport->bfa_rport) {
bfa_trc(fcs, rpid);
kfree(rport_drv);
return NULL;
}
rport->bfa_rport = NULL;
/*
* allocate FC-4s
@ -2001,14 +2179,13 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
rport->itnim = bfa_fcs_itnim_create(rport);
if (!rport->itnim) {
bfa_trc(fcs, rpid);
bfa_sm_send_event(rport->bfa_rport,
BFA_RPORT_SM_DELETE);
kfree(rport_drv);
return NULL;
}
}
bfa_fcs_lport_add_rport(port, rport);
fcs->num_rport_logins++;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
@ -2024,20 +2201,28 @@ static void
bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfa_fcs_s *fcs = port->fcs;
/*
* - delete FC-4s
* - delete BFA rport
* - remove from queue of rports
*/
rport->plogi_pending = BFA_FALSE;
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_delete(rport->itnim);
if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_offline(rport);
}
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
if (rport->bfa_rport) {
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
rport->bfa_rport = NULL;
}
bfa_fcs_lport_del_rport(port, rport);
fcs->num_rport_logins--;
kfree(rport->rp_drv);
}
@ -2071,7 +2256,18 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
}
static void
bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport)
{
if ((!rport->pid) || (!rport->pwwn)) {
bfa_trc(rport->fcs, rport->pid);
bfa_sm_fault(rport->fcs, rport->pid);
}
bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE);
}
static void
bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
@ -2086,7 +2282,7 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
}
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_rport_online(rport->itnim);
bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
};
@ -2102,15 +2298,28 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
}
static void
bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport)
{
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_offline(rport);
bfa_fcs_itnim_rport_offline(rport->itnim);
}
static void
bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
char rpwwn_buf[BFA_STRING_32];
if (!rport->bfa_rport) {
bfa_fcs_rport_fcs_offline_action(rport);
return;
}
rport->stats.offlines++;
rport->plogi_pending = BFA_FALSE;
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
@ -2340,7 +2549,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
}
/*
* brief
* This routine BFA callback for bfa_rport_online() call.
@ -2508,7 +2716,7 @@ bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
bfa_trc(rport->fcs, rport->pid);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
if (!fcxp)
return;
len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
@ -2534,7 +2742,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
bfa_trc(rport->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(rport->fcs);
fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE);
if (!fcxp)
return;
@ -2582,6 +2790,17 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
}
/*
* Called by BFAD to set the max limit on number of bfa_fcs_rport allocation
* which limits number of concurrent logins to remote ports
*/
void
bfa_fcs_rport_set_max_logins(u32 max_logins)
{
if (max_logins > 0)
bfa_fcs_rport_max_logins = max_logins;
}
void
bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
struct bfa_rport_attr_s *rport_attr)
@ -2605,9 +2824,11 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
rport_attr->curr_speed = rport->rpf.rpsc_speed;
rport_attr->assigned_speed = rport->rpf.assigned_speed;
qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
qos_attr.qos_flow_id =
cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
if (rport->bfa_rport) {
qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
qos_attr.qos_flow_id =
cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
}
rport_attr->qos_attr = qos_attr;
rport_attr->trl_enforced = BFA_FALSE;
@ -2940,10 +3161,11 @@ bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(rport->fcs, rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
fcxp = fcxp_alloced ? fcxp_alloced :
bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
bfa_fcs_rpf_send_rpsc2, rpf);
bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE);
return;
}
rpf->fcxp = fcxp;

Просмотреть файл

@ -92,7 +92,6 @@ static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
@ -599,8 +598,9 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
break;
case IOC_E_HWERROR:
case IOC_E_HWFAILED:
/*
* HB failure notification, ignore.
* HB failure / HW error notification, ignore.
*/
break;
default:
@ -632,6 +632,10 @@ bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
case IOC_E_HWERROR:
/* Ignore - already in hwfail state */
break;
default:
bfa_sm_fault(ioc, event);
}
@ -1455,7 +1459,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
bfa_trc(ioc, i);
bfa_trc(ioc, fwhdr->md5sum[i]);
bfa_trc(ioc, drv_fwhdr->md5sum[i]);
@ -1480,7 +1484,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature) {
if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
bfa_trc(ioc, fwhdr.signature);
bfa_trc(ioc, drv_fwhdr->signature);
return BFA_FALSE;
@ -1704,7 +1708,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
loff += sizeof(u32);
@ -2260,6 +2264,12 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
void
bfa_ioc_suspend(struct bfa_ioc_s *ioc)
{
ioc->dbg_fwsave_once = BFA_TRUE;
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
/*
* Initialize memory for saving firmware trace. Driver must initialize
@ -2269,7 +2279,7 @@ void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
ioc->dbg_fwsave = dbg_fwsave;
ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
}
/*
@ -2856,7 +2866,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
/*
* Save firmware trace if configured.
*/
static void
void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
{
int tlen;

Просмотреть файл

@ -820,6 +820,7 @@ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
void bfa_ioc_detach(struct bfa_ioc_s *ioc);
void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_pcifn_class clscode);
void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
@ -866,6 +867,7 @@ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
/*
* asic block configuration related APIs

Просмотреть файл

@ -121,6 +121,7 @@ struct bfa_s {
bfa_boolean_t fcs; /* FCS is attached to BFA */
struct bfa_msix_s msix;
int bfa_aen_seq;
bfa_boolean_t intr_enabled; /* Status of interrupts */
};
extern bfa_boolean_t bfa_auto_recover;

Просмотреть файл

@ -440,9 +440,11 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
INIT_LIST_HEAD(&mod->fcxp_free_q);
INIT_LIST_HEAD(&mod->fcxp_req_free_q);
INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
INIT_LIST_HEAD(&mod->fcxp_unused_q);
INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
mod->fcxp_list = fcxp;
@ -450,7 +452,14 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
fcxp->fcxp_mod = mod;
fcxp->fcxp_tag = i;
list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
if (i < (mod->num_fcxps / 2)) {
list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
fcxp->req_rsp = BFA_TRUE;
} else {
list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
fcxp->req_rsp = BFA_FALSE;
}
bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
fcxp->reqq_waiting = BFA_FALSE;
@ -514,7 +523,8 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (!cfg->drvcfg.min_cfg)
mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
INIT_LIST_HEAD(&mod->wait_q);
INIT_LIST_HEAD(&mod->req_wait_q);
INIT_LIST_HEAD(&mod->rsp_wait_q);
claim_fcxps_mem(mod);
}
@ -542,7 +552,8 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
struct list_head *qe, *qen;
/* Enqueue unused fcxp resources to free_q */
list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
fcxp = (struct bfa_fcxp_s *) qe;
@ -559,11 +570,14 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
}
static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp;
bfa_q_deq(&fm->fcxp_free_q, &fcxp);
if (req)
bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
else
bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
if (fcxp)
list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
@ -642,7 +656,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
struct bfa_fcxp_wqe_s *wqe;
bfa_q_deq(&mod->wait_q, &wqe);
if (fcxp->req_rsp)
bfa_q_deq(&mod->req_wait_q, &wqe);
else
bfa_q_deq(&mod->rsp_wait_q, &wqe);
if (wqe) {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
@ -657,7 +675,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
list_del(&fcxp->qe);
list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
if (fcxp->req_rsp)
list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
else
list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
}
static void
@ -900,21 +922,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
* Address (given the sge index).
* @param[in] get_rsp_sglen function ptr to be called to get a response SG
* len (given the sge index).
* @param[in] req Allocated FCXP is used to send req or rsp?
* request - BFA_TRUE, response - BFA_FALSE
*
* @return FCXP instance. NULL on failure.
*/
struct bfa_fcxp_s *
bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_s *fcxp = NULL;
WARN_ON(bfa == NULL);
fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
if (fcxp == NULL)
return NULL;
@ -1071,17 +1095,20 @@ bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
}
void
bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
void *caller, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
WARN_ON(!list_empty(&mod->fcxp_free_q));
if (req)
WARN_ON(!list_empty(&mod->fcxp_req_free_q));
else
WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
wqe->alloc_cbfn = alloc_cbfn;
wqe->alloc_cbarg = alloc_cbarg;
@ -1094,7 +1121,10 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
wqe->rsp_sga_cbfn = rsp_sga_cbfn;
wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
list_add_tail(&wqe->qe, &mod->wait_q);
if (req)
list_add_tail(&wqe->qe, &mod->req_wait_q);
else
list_add_tail(&wqe->qe, &mod->rsp_wait_q);
}
void
@ -1102,7 +1132,8 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
list_del(&wqe->qe);
}
@ -1153,8 +1184,13 @@ bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
int i;
for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
list_add_tail(qe, &mod->fcxp_unused_q);
if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
list_add_tail(qe, &mod->fcxp_req_unused_q);
} else {
bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
list_add_tail(qe, &mod->fcxp_rsp_unused_q);
}
}
}
@ -1404,11 +1440,11 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_FWRSP:
case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_lps_logout_comp(lps);
break;
case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
@ -1786,6 +1822,8 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
else
bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
/*
@ -4237,6 +4275,10 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
case BFA_RPORT_SM_OFFLINE:
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_off_unexp);
bfa_sm_fault(rp->bfa, event);
@ -4353,6 +4395,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_offp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_rport_offline_cb(rp);
break;
default:
@ -4731,8 +4774,10 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
WARN_ON(speed == 0);
WARN_ON(speed == BFA_PORT_SPEED_AUTO);
rport->rport_info.speed = speed;
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
if (rport) {
rport->rport_info.speed = speed;
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
}
}
/* Set Rport LUN Mask */

Просмотреть файл

@ -97,10 +97,13 @@ struct bfa_fcxp_mod_s {
struct bfa_s *bfa; /* backpointer to BFA */
struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
u16 num_fcxps; /* max num FCXP requests */
struct list_head fcxp_free_q; /* free FCXPs */
struct list_head fcxp_active_q; /* active FCXPs */
struct list_head wait_q; /* wait queue for free fcxp */
struct list_head fcxp_unused_q; /* unused fcxps */
struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */
struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */
struct list_head fcxp_active_q; /* active FCXPs */
struct list_head req_wait_q; /* wait queue for free req_fcxp */
struct list_head rsp_wait_q; /* wait queue for free rsp_fcxp */
struct list_head fcxp_req_unused_q; /* unused req_fcxps */
struct list_head fcxp_rsp_unused_q; /* unused rsp_fcxps */
u32 req_pld_sz;
u32 rsp_pld_sz;
struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
@ -197,6 +200,7 @@ struct bfa_fcxp_s {
struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
struct bfa_reqq_wait_s reqq_wqe;
bfa_boolean_t reqq_waiting;
bfa_boolean_t req_rsp; /* Used to track req/rsp fcxp */
};
struct bfa_fcxp_wqe_s {
@ -586,20 +590,22 @@ void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
/*
* bfa fcxp API functions
*/
struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
int nreq_sgles, int nrsp_sgles,
bfa_fcxp_get_sgaddr_t get_req_sga,
bfa_fcxp_get_sglen_t get_req_sglen,
bfa_fcxp_get_sgaddr_t get_rsp_sga,
bfa_fcxp_get_sglen_t get_rsp_sglen);
void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_get_sglen_t get_rsp_sglen,
bfa_boolean_t req);
void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn,
void *cbarg, void *bfad_fcxp,
int nreq_sgles, int nrsp_sgles,
bfa_fcxp_get_sgaddr_t get_req_sga,
bfa_fcxp_get_sglen_t get_req_sglen,
bfa_fcxp_get_sgaddr_t get_rsp_sga,
bfa_fcxp_get_sglen_t get_rsp_sglen);
bfa_fcxp_get_sglen_t get_rsp_sglen,
bfa_boolean_t req);
void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
struct bfa_fcxp_wqe_s *wqe);
void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
@ -658,6 +664,7 @@ u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_cvl_event(void *bfad, void *uarg);

Просмотреть файл

@ -57,6 +57,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1;
int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
@ -148,6 +149,8 @@ MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
" Range[64k|128k|256k|512k|1024k|2048k]");
module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
static void
bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@ -736,6 +739,9 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
}
}
/* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
pci_enable_pcie_error_reporting(pdev);
bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
@ -806,6 +812,8 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
}
}
pci_save_state(pdev);
return 0;
out_release_region:
@ -822,6 +830,8 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
pci_iounmap(pdev, bfad->pci_bar0_kva);
pci_iounmap(pdev, bfad->pci_bar2_kva);
pci_release_regions(pdev);
/* Disable PCIE Advanced Error Recovery (AER) */
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@ -1258,6 +1268,16 @@ bfad_setup_intr(struct bfad_s *bfad)
error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
if (error) {
/* In CT1 & CT2, try to allocate just one vector */
if (bfa_asic_id_ctc(pdev->device)) {
printk(KERN_WARNING "bfa %s: trying one msix "
"vector failed to allocate %d[%d]\n",
bfad->pci_name, bfad->nvec, error);
bfad->nvec = 1;
error = pci_enable_msix(bfad->pcidev,
msix_entries, bfad->nvec);
}
/*
* Only error number of vector is available.
* We don't have a mechanism to map multiple
@ -1267,12 +1287,13 @@ bfad_setup_intr(struct bfad_s *bfad)
* vectors. Linux doesn't duplicate vectors
* in the MSIX table for this case.
*/
printk(KERN_WARNING "bfad%d: "
"pci_enable_msix failed (%d),"
" use line based.\n", bfad->inst_no, error);
goto line_based;
if (error) {
printk(KERN_WARNING "bfad%d: "
"pci_enable_msix failed (%d), "
"use line based.\n",
bfad->inst_no, error);
goto line_based;
}
}
/* Disable INTX in MSI-X mode */
@ -1470,6 +1491,197 @@ bfad_pci_remove(struct pci_dev *pdev)
kfree(bfad);
}
/*
* PCI Error Recovery entry, error detected.
*/
static pci_ers_result_t
bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct bfad_s *bfad = pci_get_drvdata(pdev);
unsigned long flags;
pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
dev_printk(KERN_ERR, &pdev->dev,
"error detected state: %d - flags: 0x%x\n",
state, bfad->bfad_flags);
switch (state) {
case pci_channel_io_normal: /* non-fatal error */
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfad->bfad_flags &= ~BFAD_EEH_BUSY;
/* Suspend/fail all bfa operations */
bfa_ioc_suspend(&bfad->bfa.ioc);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
del_timer_sync(&bfad->hal_tmo);
ret = PCI_ERS_RESULT_CAN_RECOVER;
break;
case pci_channel_io_frozen: /* fatal error */
init_completion(&bfad->comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfad->bfad_flags |= BFAD_EEH_BUSY;
/* Suspend/fail all bfa operations */
bfa_ioc_suspend(&bfad->bfa.ioc);
bfa_fcs_stop(&bfad->bfa_fcs);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_for_completion(&bfad->comp);
bfad_remove_intr(bfad);
del_timer_sync(&bfad->hal_tmo);
pci_disable_device(pdev);
ret = PCI_ERS_RESULT_NEED_RESET;
break;
case pci_channel_io_perm_failure: /* PCI Card is DEAD */
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfad->bfad_flags |= BFAD_EEH_BUSY |
BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
/* If the error_detected handler is called with the reason
* pci_channel_io_perm_failure - it will subsequently call
* pci_remove() entry point to remove the pci device from the
* system - So defer the cleanup to pci_remove(); cleaning up
* here causes inconsistent state during pci_remove().
*/
ret = PCI_ERS_RESULT_DISCONNECT;
break;
default:
WARN_ON(1);
}
return ret;
}
int
restart_bfa(struct bfad_s *bfad)
{
unsigned long flags;
struct pci_dev *pdev = bfad->pcidev;
bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
&bfad->meminfo, &bfad->hal_pcidev);
/* Enable Interrupt and wait bfa_init completion */
if (bfad_setup_intr(bfad)) {
dev_printk(KERN_WARNING, &pdev->dev,
"%s: bfad_setup_intr failed\n", bfad->pci_name);
bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
return -1;
}
init_completion(&bfad->comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_iocfc_init(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
/* Set up interrupt handler for each vectors */
if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
bfad_install_msix_handler(bfad))
dev_printk(KERN_WARNING, &pdev->dev,
"%s: install_msix failed.\n", bfad->pci_name);
bfad_init_timer(bfad);
wait_for_completion(&bfad->comp);
bfad_drv_start(bfad);
return 0;
}
/*
* PCI Error Recovery entry, re-initialize the chip.
*/
static pci_ers_result_t
bfad_pci_slot_reset(struct pci_dev *pdev)
{
struct bfad_s *bfad = pci_get_drvdata(pdev);
u8 byte;
dev_printk(KERN_ERR, &pdev->dev,
"bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
if (pci_enable_device(pdev)) {
dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
"PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
/*
* Read some byte (e.g. DMA max. payload size which can't
* be 0xff any time) to make sure - we did not hit another PCI error
* in the middle of recovery. If we did, then declare permanent failure.
*/
pci_read_config_byte(pdev, 0x68, &byte);
if (byte == 0xff) {
dev_printk(KERN_ERR, &pdev->dev,
"slot_reset failed ... got another PCI error !\n");
goto out_disable_device;
}
pci_save_state(pdev);
pci_set_master(pdev);
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
goto out_disable_device;
pci_cleanup_aer_uncorrect_error_status(pdev);
if (restart_bfa(bfad) == -1)
goto out_disable_device;
pci_enable_pcie_error_reporting(pdev);
dev_printk(KERN_WARNING, &pdev->dev,
"slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
return PCI_ERS_RESULT_RECOVERED;
out_disable_device:
pci_disable_device(pdev);
return PCI_ERS_RESULT_DISCONNECT;
}
static pci_ers_result_t
bfad_pci_mmio_enabled(struct pci_dev *pdev)
{
unsigned long flags;
struct bfad_s *bfad = pci_get_drvdata(pdev);
dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
/* Fetch FW diagnostic information */
bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
/* Cancel all pending IOs */
spin_lock_irqsave(&bfad->bfad_lock, flags);
init_completion(&bfad->comp);
bfa_fcs_stop(&bfad->bfa_fcs);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_for_completion(&bfad->comp);
bfad_remove_intr(bfad);
del_timer_sync(&bfad->hal_tmo);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
static void
bfad_pci_resume(struct pci_dev *pdev)
{
unsigned long flags;
struct bfad_s *bfad = pci_get_drvdata(pdev);
dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
/* wait until the link is online */
bfad_rport_online_wait(bfad);
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfad->bfad_flags &= ~BFAD_EEH_BUSY;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
struct pci_device_id bfad_id_table[] = {
{
.vendor = BFA_PCI_VENDOR_ID_BROCADE,
@ -1513,11 +1725,22 @@ struct pci_device_id bfad_id_table[] = {
MODULE_DEVICE_TABLE(pci, bfad_id_table);
/*
* PCI error recovery handlers.
*/
static struct pci_error_handlers bfad_err_handler = {
.error_detected = bfad_pci_error_detected,
.slot_reset = bfad_pci_slot_reset,
.mmio_enabled = bfad_pci_mmio_enabled,
.resume = bfad_pci_resume,
};
static struct pci_driver bfad_pci_driver = {
.name = BFAD_DRIVER_NAME,
.id_table = bfad_id_table,
.probe = bfad_pci_probe,
.remove = __devexit_p(bfad_pci_remove),
.err_handler = &bfad_err_handler,
};
/*
@ -1546,6 +1769,7 @@ bfad_init(void)
bfa_auto_recover = ioc_auto_recover;
bfa_fcs_rport_set_del_timeout(rport_del_timeout);
bfa_fcs_rport_set_max_logins(max_rport_logins);
error = pci_register_driver(&bfad_pci_driver);
if (error) {

Просмотреть файл

@ -587,6 +587,37 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *)vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
char *sym_name = fc_vport->symbolic_name;
struct bfa_fcs_vport_s *fcs_vport;
wwn_t pwwn;
unsigned long flags;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (fcs_vport == NULL)
return;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (strlen(sym_name) > 0) {
strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name);
bfa_fcs_lport_ns_util_send_rspn_id(
BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
struct fc_function_template bfad_im_fc_function_template = {
/* Target dynamic attributes */
@ -640,6 +671,7 @@ struct fc_function_template bfad_im_fc_function_template = {
.vport_create = bfad_im_vport_create,
.vport_delete = bfad_im_vport_delete,
.vport_disable = bfad_im_vport_disable,
.set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
.bsg_request = bfad_im_bsg_request,
.bsg_timeout = bfad_im_bsg_timeout,
};
@ -792,6 +824,13 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 16Gbps PCIe dual port FC HBA");
} else if (!strcmp(model, "Brocade-1867")) {
if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 16Gbps PCIe single port FC HBA for IBM");
else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 16Gbps PCIe dual port FC HBA for IBM");
} else
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
@ -909,15 +948,16 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
struct bfad_port_s *port = im_port->port;
struct bfad_s *bfad = im_port->bfad;
int nrports = 2048;
wwn_t *rports = NULL;
struct bfa_rport_qualifier_s *rports = NULL;
unsigned long flags;
rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
GFP_ATOMIC);
if (rports == NULL)
return snprintf(buf, PAGE_SIZE, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);

Просмотреть файл

@ -535,7 +535,8 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_lport_get_rports_s),
sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
!= BFA_STATUS_OK) {
iocmd->status = BFA_STATUS_VERSION_FAIL;
return 0;
}
@ -552,8 +553,9 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
goto out;
}
bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
&iocmd->nrports);
bfa_fcs_lport_get_rport_quals(fcs_port,
(struct bfa_rport_qualifier_s *)iocmd_bufptr,
&iocmd->nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
@ -578,7 +580,11 @@ bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
goto out;
}
fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (iocmd->pid)
fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
iocmd->rpwwn, iocmd->pid);
else
fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
if (fcs_rport == NULL) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@ -671,9 +677,11 @@ bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
sizeof(struct bfa_rport_stats_s));
memcpy((void *)&iocmd->stats.hal_stats,
(void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
sizeof(struct bfa_rport_hal_stats_s));
if (bfa_fcs_rport_get_halrport(fcs_rport)) {
memcpy((void *)&iocmd->stats.hal_stats,
(void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
sizeof(struct bfa_rport_hal_stats_s));
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
@ -709,7 +717,8 @@ bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
rport = bfa_fcs_rport_get_halrport(fcs_rport);
memset(&rport->stats, 0, sizeof(rport->stats));
if (rport)
memset(&rport->stats, 0, sizeof(rport->stats));
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
@ -744,7 +753,8 @@ bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
fcs_rport->rpf.assigned_speed = iocmd->speed;
/* Set this speed in f/w only if the RPSC speed is not available */
if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
if (fcs_rport->bfa_rport)
bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
out:
@ -1030,9 +1040,10 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
else {
iocmd->status = BFA_STATUS_OK;
memcpy((void *)&iocmd->iostats, (void *)
&(bfa_fcs_itnim_get_halitn(itnim)->stats),
sizeof(struct bfa_itnim_iostats_s));
if (bfa_fcs_itnim_get_halitn(itnim))
memcpy((void *)&iocmd->iostats, (void *)
&(bfa_fcs_itnim_get_halitn(itnim)->stats),
sizeof(struct bfa_itnim_iostats_s));
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@ -2949,13 +2960,13 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
spin_lock_irqsave(&bfad->bfad_lock, flags);
/* Allocate bfa_fcxp structure */
hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
drv_fcxp->num_req_sgles,
drv_fcxp->num_rsp_sgles,
bfad_fcxp_get_req_sgaddr_cb,
bfad_fcxp_get_req_sglen_cb,
bfad_fcxp_get_rsp_sgaddr_cb,
bfad_fcxp_get_rsp_sglen_cb);
bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
if (!hal_fcxp) {
bfa_trc(bfad, 0);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);

Просмотреть файл

@ -319,6 +319,8 @@ struct bfa_bsg_rport_attr_s {
u16 vf_id;
wwn_t pwwn;
wwn_t rpwwn;
u32 pid;
u32 rsvd;
struct bfa_rport_attr_s attr;
};

Просмотреть файл

@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/aer.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
@ -56,7 +57,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
#define BFAD_DRIVER_VERSION "3.0.23.0"
#define BFAD_DRIVER_VERSION "3.1.2.0"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
@ -81,6 +82,8 @@
#define BFAD_FC4_PROBE_DONE 0x00000200
#define BFAD_PORT_DELETE 0x00000001
#define BFAD_INTX_ON 0x00000400
#define BFAD_EEH_BUSY 0x00000800
#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000
/*
* BFAD related definition
*/

Просмотреть файл

@ -1216,6 +1216,15 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
return 0;
}
if (bfad->bfad_flags & BFAD_EEH_BUSY) {
if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
cmnd->result = DID_NO_CONNECT << 16;
else
cmnd->result = DID_REQUEUE << 16;
done(cmnd);
return 0;
}
sg_cnt = scsi_dma_map(cmnd);
if (sg_cnt < 0)
return SCSI_MLQUEUE_HOST_BUSY;

Просмотреть файл

@ -1807,7 +1807,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
}
memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);

Просмотреть файл

@ -1422,7 +1422,8 @@ static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
"DID_NEXUS_FAILURE" };
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={

Просмотреть файл

@ -641,8 +641,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
h->state = TPGS_STATE_STANDBY;
break;
case TPGS_STATE_OFFLINE:
case TPGS_STATE_UNAVAILABLE:
/* Path unusable for unavailable/offline */
/* Path unusable */
err = SCSI_DH_DEV_OFFLINED;
break;
default:

Просмотреть файл

@ -790,29 +790,19 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1815"},
{"IBM", "1818"},
{"IBM", "3526"},
{"SGI", "TP9400"},
{"SGI", "TP9500"},
{"SGI", "TP9700"},
{"SGI", "TP9"},
{"SGI", "IS"},
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_I"},
{"SUN", "LCSM100_S"},
{"SUN", "LCSM100_E"},
{"SUN", "LCSM100_F"},
{"DELL", "MD3000"},
{"DELL", "MD3000i"},
{"DELL", "MD32xx"},
{"DELL", "MD32xxi"},
{"DELL", "MD36xxi"},
{"DELL", "MD36xxf"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
{"SUN", "CSM100_R_FC"},
{"SUN", "CSM"},
{"SUN", "LCSM100"},
{"SUN", "STK6580_6780"},
{"SUN", "SUN_6180"},
{"SUN", "ArrayStorage"},
{"DELL", "MD3"},
{"NETAPP", "INF-01-00"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{NULL, NULL},
};
@ -863,7 +853,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
RDAC_NAME);
return 0;
return -ENOMEM;
}
scsi_dh_data->scsi_dh = &rdac_dh;

Просмотреть файл

@ -99,6 +99,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@ -118,13 +127,22 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
{0x3350103C, "Smart Array", &SA5_access},
{0x3351103C, "Smart Array", &SA5_access},
{0x3352103C, "Smart Array", &SA5_access},
{0x3353103C, "Smart Array", &SA5_access},
{0x3354103C, "Smart Array", &SA5_access},
{0x3355103C, "Smart Array", &SA5_access},
{0x3356103C, "Smart Array", &SA5_access},
{0x3350103C, "Smart Array P222", &SA5_access},
{0x3351103C, "Smart Array P420", &SA5_access},
{0x3352103C, "Smart Array P421", &SA5_access},
{0x3353103C, "Smart Array P822", &SA5_access},
{0x3354103C, "Smart Array P420i", &SA5_access},
{0x3355103C, "Smart Array P220i", &SA5_access},
{0x3356103C, "Smart Array P721m", &SA5_access},
{0x1920103C, "Smart Array", &SA5_access},
{0x1921103C, "Smart Array", &SA5_access},
{0x1922103C, "Smart Array", &SA5_access},
{0x1923103C, "Smart Array", &SA5_access},
{0x1924103C, "Smart Array", &SA5_access},
{0x1925103C, "Smart Array", &SA5_access},
{0x1926103C, "Smart Array", &SA5_access},
{0x1928103C, "Smart Array", &SA5_access},
{0x334d103C, "Smart Array P822se", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@ -2609,7 +2627,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
/* not in reqQ, if also not in cmpQ, must have already completed */
found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
if (!found) {
dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n",
dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
msg);
return SUCCESS;
}
@ -3265,7 +3283,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.Timeout = 0; /* Don't time out */
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd;
c->Request.CDB[1] = 0x03; /* Reset target above */
c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
/* If bytes 4-7 are zero, it means reset the */
/* LunID device */
c->Request.CDB[4] = 0x00;
@ -3337,7 +3355,8 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
void __iomem *page_remapped = ioremap(page_base, page_offs + size);
void __iomem *page_remapped = ioremap_nocache(page_base,
page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
}

Просмотреть файл

@ -1,7 +1,3 @@
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
ibmvscsic-y += ibmvscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o

Просмотреть файл

@ -2241,6 +2241,21 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
return 0;
}
/**
* ibmvfc_match_evt - Match function for specified event
* @evt: ibmvfc event struct
* @match: event to match
*
* Returns:
* 1 if event matches key / 0 if event does not match key
**/
static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
{
if (evt == match)
return 1;
return 0;
}
/**
* ibmvfc_abort_task_set - Abort outstanding commands to the device
* @sdev: scsi device to abort commands
@ -2322,7 +2337,20 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
if (rc) {
sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
ibmvfc_reset_host(vhost);
rsp_rc = 0;
rsp_rc = -EIO;
rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
if (rc == SUCCESS)
rsp_rc = 0;
rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
if (rc != SUCCESS) {
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_hard_reset_host(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rsp_rc = 0;
}
goto out;
}
}
@ -2597,8 +2625,10 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_SCN_FABRIC:
case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
if (vhost->state < IBMVFC_HALTED) {
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
}
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:

Просмотреть файл

@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.9"
#define IBMVFC_DRIVER_DATE "(August 5, 2010)"
#define IBMVFC_DRIVER_VERSION "1.0.10"
#define IBMVFC_DRIVER_DATE "(August 24, 2012)"
#define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45

Просмотреть файл

@ -93,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
static struct scsi_transport_template *ibmvscsi_transport_template;
#define IBMVSCSI_VERSION "1.5.9"
static struct ibmvscsi_ops *ibmvscsi_ops;
MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher");
MODULE_LICENSE("GPL");
@ -118,6 +118,316 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
module_param_named(client_reserve, client_reserve, int, S_IRUGO );
MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata);
/* ------------------------------------------------------------
* Routines for managing the command/response queue
*/
/**
* ibmvscsi_handle_event: - Interrupt handler for crq events
* @irq: number of irq to handle, not used
* @dev_instance: ibmvscsi_host_data of host that received interrupt
*
* Disables interrupts and schedules srp_task
* Always returns IRQ_HANDLED
*/
static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
{
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)dev_instance;
vio_disable_interrupts(to_vio_dev(hostdata->dev));
tasklet_schedule(&hostdata->srp_task);
return IRQ_HANDLED;
}
/**
* release_crq_queue: - Deallocates data and unregisters CRQ
* @queue: crq_queue to initialize and register
* @host_data: ibmvscsi_host_data of host
*
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
* the crq with the hypervisor.
*/
static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests)
{
long rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
free_irq(vdev->irq, (void *)hostdata);
tasklet_kill(&hostdata->srp_task);
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long)queue->msgs);
}
/**
* crq_queue_next_crq: - Returns the next entry in message queue
* @queue: crq_queue to use
*
* Returns pointer to next entry in queue, or NULL if there are no new
* entried in the CRQ.
*/
static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
{
struct viosrp_crq *crq;
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
} else
crq = NULL;
spin_unlock_irqrestore(&queue->lock, flags);
return crq;
}
/**
* ibmvscsi_send_crq: - Send a CRQ
* @hostdata: the adapter
* @word1: the first 64 bits of the data
* @word2: the second 64 bits of the data
*/
static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
u64 word1, u64 word2)
{
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
/**
* ibmvscsi_task: - Process srps asynchronously
* @data: ibmvscsi_host_data of host
*/
static void ibmvscsi_task(void *data)
{
struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
struct viosrp_crq *crq;
int done = 0;
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = 0x00;
}
vio_enable_interrupts(vdev);
crq = crq_queue_next_crq(&hostdata->queue);
if (crq != NULL) {
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = 0x00;
} else {
done = 1;
}
}
}
static void gather_partition_info(void)
{
struct device_node *rootdn;
const char *ppartition_name;
const unsigned int *p_number_ptr;
/* Retrieve information about this partition */
rootdn = of_find_node_by_path("/");
if (!rootdn) {
return;
}
ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (ppartition_name)
strncpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
partition_number = *p_number_ptr;
of_node_put(rootdn);
}
static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
{
memset(&hostdata->madapter_info, 0x00,
sizeof(hostdata->madapter_info));
dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
strncpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name));
hostdata->madapter_info.partition_number = partition_number;
hostdata->madapter_info.mad_version = 1;
hostdata->madapter_info.os_type = 2;
}
/**
* reset_crq_queue: - resets a crq after a failure
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
*/
static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
/* Close the CRQ */
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
/* Clean out the queue */
memset(queue->msgs, 0x00, PAGE_SIZE);
queue->cur = 0;
set_adapter_info(hostdata);
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
} else if (rc != 0) {
dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
}
return rc;
}
/**
* initialize_crq_queue: - Initializes and registers CRQ with hypervisor
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
* Allocates a page for messages, maps it for dma, and registers
* the crq with the hypervisor.
* Returns zero on success.
*/
static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests)
{
int rc;
int retrc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
if (!queue->msgs)
goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(hostdata->dev, queue->msg_token))
goto map_failed;
gather_partition_info();
set_adapter_info(hostdata);
retrc = rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
if (rc == H_RESOURCE)
/* maybe kexecing and resource is busy. try a reset */
rc = ibmvscsi_reset_crq_queue(queue,
hostdata);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
retrc = 0;
} else if (rc != 0) {
dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
goto reg_crq_failed;
}
queue->cur = 0;
spin_lock_init(&queue->lock);
tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
(unsigned long)hostdata);
if (request_irq(vdev->irq,
ibmvscsi_handle_event,
0, "ibmvscsi", (void *)hostdata) != 0) {
dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
vdev->irq);
goto req_irq_failed;
}
rc = vio_enable_interrupts(vdev);
if (rc != 0) {
dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
goto req_irq_failed;
}
return retrc;
req_irq_failed:
tasklet_kill(&hostdata->srp_task);
rc = 0;
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
reg_crq_failed:
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)queue->msgs);
malloc_failed:
return -1;
}
/**
* reenable_crq_queue: - reenables a crq after
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
*/
static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
/* Re-enable the CRQ */
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
if (rc)
dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
return rc;
}
/* ------------------------------------------------------------
* Routines for the event pool and event structs
*/
@ -611,7 +921,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
}
if ((rc =
ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list);
del_timer(&evt_struct->timer);
@ -1420,8 +1730,8 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
* @hostdata: ibmvscsi_host_data of host
*
*/
void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata)
static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata)
{
long rc;
unsigned long flags;
@ -1433,8 +1743,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
case 0x01: /* Initialization message */
dev_info(hostdata->dev, "partner initialized\n");
/* Send back a response */
if ((rc = ibmvscsi_ops->send_crq(hostdata,
0xC002000000000000LL, 0)) == 0) {
rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
if (rc == 0) {
/* Now login */
init_adapter(hostdata);
} else {
@ -1541,6 +1851,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
host_config = &evt_struct->iu.mad.host_config;
/* The transport length field is only 16-bit */
length = min(0xffff, length);
/* Set up a lun reset SRP command */
memset(host_config, 0x00, sizeof(*host_config));
host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
@ -1840,17 +2153,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
smp_rmb();
hostdata->reset_crq = 0;
rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
if (!rc)
rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
vio_enable_interrupts(to_vio_dev(hostdata->dev));
} else if (hostdata->reenable_crq) {
smp_rmb();
action = "enable";
rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
hostdata->reenable_crq = 0;
if (!rc)
rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
} else
return;
@ -1944,7 +2257,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto init_crq_failed;
}
rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
if (rc != 0 && rc != H_RESOURCE) {
dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
goto kill_kthread;
@ -1974,7 +2287,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
* to fail if the other end is not acive. In that case we don't
* want to scan
*/
if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
|| rc == H_RESOURCE) {
/*
* Wait around max init_timeout secs for the adapter to finish
@ -2002,7 +2315,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
add_host_failed:
release_event_pool(&hostdata->pool, hostdata);
init_pool_failed:
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
kill_kthread:
kthread_stop(hostdata->work_thread);
init_crq_failed:
@ -2018,7 +2331,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
unmap_persist_bufs(hostdata);
release_event_pool(&hostdata->pool, hostdata);
ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
kthread_stop(hostdata->work_thread);
@ -2039,7 +2352,10 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
static int ibmvscsi_resume(struct device *dev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
return ibmvscsi_ops->resume(hostdata);
vio_disable_interrupts(to_vio_dev(hostdata->dev));
tasklet_schedule(&hostdata->srp_task);
return 0;
}
/**
@ -2076,9 +2392,7 @@ int __init ibmvscsi_module_init(void)
driver_template.can_queue = max_requests;
max_events = max_requests + 2;
if (firmware_has_feature(FW_FEATURE_VIO))
ibmvscsi_ops = &rpavscsi_ops;
else
if (!firmware_has_feature(FW_FEATURE_VIO))
return -ENODEV;
ibmvscsi_transport_template =

Просмотреть файл

@ -107,26 +107,4 @@ struct ibmvscsi_host_data {
dma_addr_t adapter_info_addr;
};
/* routines for managing a command/response queue */
void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata);
struct ibmvscsi_ops {
int (*init_crq_queue)(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests);
void (*release_crq_queue)(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests);
int (*reset_crq_queue)(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata);
int (*reenable_crq_queue)(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata);
int (*send_crq)(struct ibmvscsi_host_data *hostdata,
u64 word1, u64 word2);
int (*resume) (struct ibmvscsi_host_data *hostdata);
};
extern struct ibmvscsi_ops rpavscsi_ops;
#endif /* IBMVSCSI_H */

Просмотреть файл

@ -1,368 +0,0 @@
/* ------------------------------------------------------------
* rpa_vscsi.c
* (C) Copyright IBM Corporation 1994, 2003
* Authors: Colin DeVilbiss (devilbis@us.ibm.com)
* Santiago Leon (santil@us.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* ------------------------------------------------------------
* RPA-specific functions of the SCSI host adapter for Virtual I/O devices
*
* This driver allows the Linux SCSI peripheral drivers to directly
* access devices in the hosting partition, either on an iSeries
* hypervisor system or a converged hypervisor system.
*/
#include <asm/vio.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/hvcall.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include "ibmvscsi.h"
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
/* ------------------------------------------------------------
* Routines for managing the command/response queue
*/
/**
* rpavscsi_handle_event: - Interrupt handler for crq events
* @irq: number of irq to handle, not used
* @dev_instance: ibmvscsi_host_data of host that received interrupt
*
* Disables interrupts and schedules srp_task
* Always returns IRQ_HANDLED
*/
static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
{
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)dev_instance;
vio_disable_interrupts(to_vio_dev(hostdata->dev));
tasklet_schedule(&hostdata->srp_task);
return IRQ_HANDLED;
}
/**
* release_crq_queue: - Deallocates data and unregisters CRQ
* @queue: crq_queue to initialize and register
* @host_data: ibmvscsi_host_data of host
*
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
* the crq with the hypervisor.
*/
static void rpavscsi_release_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests)
{
long rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
free_irq(vdev->irq, (void *)hostdata);
tasklet_kill(&hostdata->srp_task);
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long)queue->msgs);
}
/**
* crq_queue_next_crq: - Returns the next entry in message queue
* @queue: crq_queue to use
*
* Returns pointer to next entry in queue, or NULL if there are no new
* entried in the CRQ.
*/
static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
{
struct viosrp_crq *crq;
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
} else
crq = NULL;
spin_unlock_irqrestore(&queue->lock, flags);
return crq;
}
/**
* rpavscsi_send_crq: - Send a CRQ
* @hostdata: the adapter
* @word1: the first 64 bits of the data
* @word2: the second 64 bits of the data
*/
static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
u64 word1, u64 word2)
{
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
/**
* rpavscsi_task: - Process srps asynchronously
* @data: ibmvscsi_host_data of host
*/
static void rpavscsi_task(void *data)
{
struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
struct viosrp_crq *crq;
int done = 0;
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = 0x00;
}
vio_enable_interrupts(vdev);
if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
crq->valid = 0x00;
} else {
done = 1;
}
}
}
static void gather_partition_info(void)
{
struct device_node *rootdn;
const char *ppartition_name;
const unsigned int *p_number_ptr;
/* Retrieve information about this partition */
rootdn = of_find_node_by_path("/");
if (!rootdn) {
return;
}
ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (ppartition_name)
strncpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
partition_number = *p_number_ptr;
of_node_put(rootdn);
}
static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
{
memset(&hostdata->madapter_info, 0x00,
sizeof(hostdata->madapter_info));
dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
strncpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name));
hostdata->madapter_info.partition_number = partition_number;
hostdata->madapter_info.mad_version = 1;
hostdata->madapter_info.os_type = 2;
}
/**
* reset_crq_queue: - resets a crq after a failure
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
*/
static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
/* Close the CRQ */
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
/* Clean out the queue */
memset(queue->msgs, 0x00, PAGE_SIZE);
queue->cur = 0;
set_adapter_info(hostdata);
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
} else if (rc != 0) {
dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
}
return rc;
}
/**
* initialize_crq_queue: - Initializes and registers CRQ with hypervisor
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
* Allocates a page for messages, maps it for dma, and registers
* the crq with the hypervisor.
* Returns zero on success.
*/
static int rpavscsi_init_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata,
int max_requests)
{
int rc;
int retrc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
if (!queue->msgs)
goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(hostdata->dev, queue->msg_token))
goto map_failed;
gather_partition_info();
set_adapter_info(hostdata);
retrc = rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
if (rc == H_RESOURCE)
/* maybe kexecing and resource is busy. try a reset */
rc = rpavscsi_reset_crq_queue(queue,
hostdata);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
retrc = 0;
} else if (rc != 0) {
dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
goto reg_crq_failed;
}
queue->cur = 0;
spin_lock_init(&queue->lock);
tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
(unsigned long)hostdata);
if (request_irq(vdev->irq,
rpavscsi_handle_event,
0, "ibmvscsi", (void *)hostdata) != 0) {
dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
vdev->irq);
goto req_irq_failed;
}
rc = vio_enable_interrupts(vdev);
if (rc != 0) {
dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
goto req_irq_failed;
}
return retrc;
req_irq_failed:
tasklet_kill(&hostdata->srp_task);
rc = 0;
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
reg_crq_failed:
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)queue->msgs);
malloc_failed:
return -1;
}
/**
* reenable_crq_queue: - reenables a crq after
* @queue: crq_queue to initialize and register
* @hostdata: ibmvscsi_host_data of host
*
*/
static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
struct ibmvscsi_host_data *hostdata)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
/* Re-enable the CRQ */
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
if (rc)
dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
return rc;
}
/**
* rpavscsi_resume: - resume after suspend
* @hostdata: ibmvscsi_host_data of host
*
*/
static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
{
vio_disable_interrupts(to_vio_dev(hostdata->dev));
tasklet_schedule(&hostdata->srp_task);
return 0;
}
struct ibmvscsi_ops rpavscsi_ops = {
.init_crq_queue = rpavscsi_init_crq_queue,
.release_crq_queue = rpavscsi_release_crq_queue,
.reset_crq_queue = rpavscsi_reset_crq_queue,
.reenable_crq_queue = rpavscsi_reenable_crq_queue,
.send_crq = rpavscsi_send_crq,
.resume = rpavscsi_resume,
};

Просмотреть файл

@ -192,7 +192,7 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds [] = {
static int ipr_max_bus_speeds[] = {
IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
};
@ -562,9 +562,26 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->u.add_data = add_data;
}
#else
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
#endif
/**
* ipr_lock_and_done - Acquire lock and complete command
* @ipr_cmd: ipr command struct
*
* Return value:
* none
**/
static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
{
unsigned long lock_flags;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_cmd->done(ipr_cmd);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
/**
* ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
* @ipr_cmd: ipr command struct
@ -611,33 +628,49 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
void (*fast_done) (struct ipr_cmnd *))
{
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
/**
* ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd;
ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
list_del(&ipr_cmd->queue);
ipr_init_ipr_cmnd(ipr_cmd);
return ipr_cmd;
}
/**
* ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
return ipr_cmd;
}
/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
@ -1002,7 +1035,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
**/
static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
{
switch(proto) {
switch (proto) {
case IPR_PROTO_SATA:
case IPR_PROTO_SAS_STP:
res->ata_class = ATA_DEV_ATA;
@ -3043,7 +3076,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
}
#else
#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
#endif
/**
@ -3055,7 +3088,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
**/
static void ipr_release_dump(struct kref *kref)
{
struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
unsigned long lock_flags = 0;
int i;
@ -3142,7 +3175,7 @@ restart:
break;
}
}
} while(did_work);
} while (did_work);
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->add_to_ml) {
@ -3268,7 +3301,7 @@ static ssize_t ipr_show_log_level(struct device *dev,
* number of bytes printed to buffer
**/
static ssize_t ipr_store_log_level(struct device *dev,
struct device_attribute *attr,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
@ -3315,7 +3348,7 @@ static ssize_t ipr_store_diagnostics(struct device *dev,
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -3682,7 +3715,7 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
unsigned long lock_flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -3746,7 +3779,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
len = snprintf(fname, 99, "%s", buf);
fname[len-1] = '\0';
if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
return -EIO;
}
@ -4612,7 +4645,7 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
* Return value:
* SUCCESS / FAILED
**/
static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
int rc;
@ -4634,7 +4667,7 @@ static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
return rc;
}
static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
int rc;
@ -4701,7 +4734,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
}
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
}
/**
@ -4725,7 +4758,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -4753,7 +4786,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
* Return value:
* SUCCESS / FAILED
**/
static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
@ -4811,10 +4844,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
res->resetting_device = 0;
LEAVE;
return (rc ? FAILED : SUCCESS);
return rc ? FAILED : SUCCESS;
}
static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
@ -4910,7 +4943,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
* Return value:
* SUCCESS / FAILED
**/
static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
@ -4979,7 +5012,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
res->needs_sync_complete = 1;
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
@ -4989,7 +5022,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
* Return value:
* SUCCESS / FAILED
**/
static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
struct ipr_cmnd *ipr_cmd;
struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
LIST_HEAD(doneq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_HANDLED;
rc = IRQ_HANDLED;
goto unlock_out;
}
ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->done(ipr_cmd);
list_move_tail(&ipr_cmd->queue, &doneq);
rc = IRQ_HANDLED;
@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_HANDLED;
rc = IRQ_HANDLED;
goto unlock_out;
} else
break;
}
@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
unlock_out:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
return rc;
}
@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
unsigned long lock_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
} else
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
}
/**
* ipr_queuecommand - Queue a mid-layer request
* @shost: scsi host struct
* @scsi_cmd: scsi command struct
* @done: done function
*
* This function queues a request generated by the mid-layer.
*
@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
**/
static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
void (*done) (struct scsi_cmnd *))
static int ipr_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
int rc = 0;
unsigned long lock_flags;
int rc;
scsi_cmd->scsi_done = done;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata;
ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
/*
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
return 0;
spin_unlock_irqrestore(shost->host_lock, lock_flags);
goto err_nodev;
}
if (ipr_is_gata(res) && res->sata_port)
return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
if (ipr_is_gata(res) && res->sata_port) {
rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return rc;
}
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ipr_cmd->scsi_cmd = scsi_cmd;
ioarcb->res_handle = res->res_handle;
ipr_cmd->done = ipr_scsi_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
if (scsi_cmd->underflow == 0)
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
if (res->needs_sync_complete) {
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
if (ipr_is_gscsi(res))
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@ -5859,24 +5905,47 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
if (likely(rc == 0)) {
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
}
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
if (unlikely(rc != 0)) {
list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_lock_irqsave(shost->host_lock, lock_flags);
if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
if (unlikely(ioa_cfg->ioa_is_dead)) {
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
ioarcb->res_handle = res->res_handle;
if (res->needs_sync_complete) {
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
err_nodev:
spin_lock_irqsave(shost->host_lock, lock_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
}
static DEF_SCSI_QCMD(ipr_queuecommand)
/**
* ipr_ioctl - IOCTL handler
* @sdev: scsi device struct
@ -5907,7 +5976,7 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
* Return value:
* pointer to buffer with description string
**/
static const char * ipr_ioa_info(struct Scsi_Host *host)
static const char *ipr_ioa_info(struct Scsi_Host *host)
{
static char buffer[512];
struct ipr_ioa_cfg *ioa_cfg;
@ -5965,7 +6034,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@ -6005,7 +6074,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@ -6330,7 +6399,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
int i;
if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
if (__is_processor(ipr_blocked_processors[i]))
return 1;
}
@ -6608,7 +6677,7 @@ static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
* none
**/
static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_mode_pages *mode_pages)
struct ipr_mode_pages *mode_pages)
{
int i, entry_length;
struct ipr_dev_bus_entry *bus;
@ -8022,7 +8091,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
rc = ipr_cmd->job_step(ipr_cmd);
} while(rc == IPR_RC_JOB_CONTINUE);
} while (rc == IPR_RC_JOB_CONTINUE);
}
/**
@ -8283,7 +8352,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
if (ioa_cfg->ipr_cmd_pool)
pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
@ -8363,8 +8432,8 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
dma_addr_t dma_addr;
int i;
ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
sizeof(struct ipr_cmnd), 512, 0);
ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
sizeof(struct ipr_cmnd), 512, 0);
if (!ioa_cfg->ipr_cmd_pool)
return -ENOMEM;
@ -8378,7 +8447,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
if (!ipr_cmd) {
ipr_free_cmd_blks(ioa_cfg);
@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops);
ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
@ -8964,7 +9032,7 @@ static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
int target, lun;
for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
}
@ -9010,7 +9078,7 @@ static void __ipr_remove(struct pci_dev *pdev)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@ -9139,7 +9207,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);

Просмотреть файл

@ -38,8 +38,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.5.3"
#define IPR_DRIVER_DATE "(March 10, 2012)"
#define IPR_DRIVER_VERSION "2.5.4"
#define IPR_DRIVER_DATE "(July 11, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@ -1525,6 +1525,7 @@ struct ipr_cmnd {
struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
void (*fast_done) (struct ipr_cmnd *);
void (*done) (struct ipr_cmnd *);
int (*job_step) (struct ipr_cmnd *);
int (*job_step_failed) (struct ipr_cmnd *);

Просмотреть файл

@ -1044,7 +1044,7 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
return SCI_SUCCESS;
}
void isci_host_scan_start(struct Scsi_Host *shost)
void isci_host_start(struct Scsi_Host *shost)
{
struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
@ -1079,7 +1079,6 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
{
task->lldd_task = NULL;
if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
@ -1087,16 +1086,19 @@ void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_ta
dev_dbg(&ihost->pdev->dev,
"%s: Normal - ireq/task = %p/%p\n",
__func__, ireq, task);
task->lldd_task = NULL;
task->task_done(task);
} else {
dev_dbg(&ihost->pdev->dev,
"%s: Error - ireq/task = %p/%p\n",
__func__, ireq, task);
if (sas_protocol_ata(task->task_proto))
task->lldd_task = NULL;
sas_task_abort(task);
}
}
} else
task->lldd_task = NULL;
if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
wake_up_all(&ihost->eventq);
@ -1120,10 +1122,16 @@ void isci_host_completion_routine(unsigned long data)
sci_controller_completion_handler(ihost);
spin_unlock_irq(&ihost->scic_lock);
/* the coalesence timeout doubles at each encoding step, so
/*
* we subtract SCI_MAX_PORTS to account for the number of dummy TCs
* issued for hardware issue workaround
*/
active = isci_tci_active(ihost) - SCI_MAX_PORTS;
/*
* the coalesence timeout doubles at each encoding step, so
* update it based on the ilog2 value of the outstanding requests
*/
active = isci_tci_active(ihost);
writel(SMU_ICC_GEN_VAL(NUMBER, active) |
SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
&ihost->smu_registers->interrupt_coalesce_control);

Просмотреть файл

@ -473,7 +473,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *);
void isci_host_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);

Просмотреть файл

@ -156,7 +156,7 @@ static struct scsi_host_template isci_sht = {
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = isci_host_scan_finished,
.scan_start = isci_host_scan_start,
.scan_start = isci_host_start,
.change_queue_depth = sas_change_queue_depth,
.change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
@ -721,11 +721,67 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
}
}
#ifdef CONFIG_PM
static int isci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct isci_host *ihost;
int i;
for_each_isci_host(i, ihost, pdev) {
sas_suspend_ha(&ihost->sas_ha);
isci_host_deinit(ihost);
}
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int isci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct isci_host *ihost;
int rc, i;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
rc = pcim_enable_device(pdev);
if (rc) {
dev_err(&pdev->dev,
"enabling device failure after resume(%d)\n", rc);
return rc;
}
pci_set_master(pdev);
for_each_isci_host(i, ihost, pdev) {
sas_prep_resume_ha(&ihost->sas_ha);
isci_host_init(ihost);
isci_host_start(ihost->sas_ha.core.shost);
wait_for_start(ihost);
sas_resume_ha(&ihost->sas_ha);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
#endif
static struct pci_driver isci_pci_driver = {
.name = DRV_NAME,
.id_table = isci_id_table,
.probe = isci_pci_probe,
.remove = __devexit_p(isci_pci_remove),
#ifdef CONFIG_PM
.driver.pm = &isci_pm_ops,
#endif
};
static __init int isci_init(void)

Просмотреть файл

@ -55,7 +55,7 @@ static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
static struct scsi_host_template iscsi_sw_tcp_sht;
static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = 512;
static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
static int iscsi_sw_tcp_dbg;

Просмотреть файл

@ -580,10 +580,7 @@ int sas_ata_init(struct domain_device *found_dev)
struct ata_port *ap;
int rc;
ata_host_init(&found_dev->sata_dev.ata_host,
ha->dev,
sata_port_info.flags,
&sas_sata_ops);
ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
&sata_port_info,
shost);
@ -700,6 +697,92 @@ void sas_probe_sata(struct asd_sas_port *port)
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV);
}
}
static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
bool retry = false;
list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
int rc;
if (!dev_is_sata(dev))
continue;
sas_ata_wait_eh(dev);
rc = dev->sata_dev.pm_result;
if (rc == -EAGAIN)
retry = true;
else if (rc) {
/* since we don't have a
* ->port_{suspend|resume} routine in our
* ata_port ops, and no entanglements with
* acpi, suspend should just be mechanical trip
* through eh, catch cases where these
* assumptions are invalidated
*/
WARN_ONCE(1, "failed %s %s error: %d\n", func,
dev_name(&dev->rphy->dev), rc);
}
/* if libata failed to power manage the device, tear it down */
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, func, -ENODEV);
}
return retry;
}
void sas_suspend_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
retry:
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
if (!dev_is_sata(dev))
continue;
sata = &dev->sata_dev;
if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
continue;
sata->pm_result = -EIO;
ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
}
mutex_unlock(&port->ha->disco_mutex);
if (sas_ata_flush_pm_eh(port, __func__))
goto retry;
}
void sas_resume_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
retry:
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
if (!dev_is_sata(dev))
continue;
sata = &dev->sata_dev;
if (sata->ap->pm_mesg.event == PM_EVENT_ON)
continue;
sata->pm_result = -EIO;
ata_sas_port_async_resume(sata->ap, &sata->pm_result);
}
mutex_unlock(&port->ha->disco_mutex);
if (sas_ata_flush_pm_eh(port, __func__))
goto retry;
}
/**

Просмотреть файл

@ -24,6 +24,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
@ -180,16 +181,18 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (i->dft->lldd_dev_found) {
res = i->dft->lldd_dev_found(dev);
if (res) {
printk("sas: driver on pcidev %s cannot handle "
"device %llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
}
kref_get(&dev->kref);
if (!i->dft->lldd_dev_found)
return 0;
res = i->dft->lldd_dev_found(dev);
if (res) {
printk("sas: driver on pcidev %s cannot handle "
"device %llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
}
set_bit(SAS_DEV_FOUND, &dev->state);
kref_get(&dev->kref);
return res;
}
@ -200,7 +203,10 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (i->dft->lldd_dev_gone) {
if (!i->dft->lldd_dev_gone)
return;
if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) {
i->dft->lldd_dev_gone(dev);
sas_put_device(dev);
}
@ -234,6 +240,47 @@ static void sas_probe_devices(struct work_struct *work)
}
}
static void sas_suspend_devices(struct work_struct *work)
{
struct asd_sas_phy *phy;
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct Scsi_Host *shost = port->ha->core.shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
sas_suspend_sata(port);
/* lldd is free to forget the domain_device across the
* suspension, we force the issue here to keep the reference
* counts aligned
*/
list_for_each_entry(dev, &port->dev_list, dev_list_node)
sas_notify_lldd_dev_gone(dev);
/* we are suspending, so we know events are disabled and
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
if (si->dft->lldd_port_formed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
}
}
static void sas_resume_devices(struct work_struct *work)
{
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_RESUME, &port->disc.pending);
sas_resume_sata(port);
}
/**
* sas_discover_end_dev -- discover an end device (SSP, etc)
* @end: pointer to domain device of interest
@ -530,6 +577,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
[DISCE_PROBE] = sas_probe_devices,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
[DISCE_DESTRUCT] = sas_destruct_devices,
};

Просмотреть файл

@ -41,6 +41,7 @@ static const char *sas_phye_str[] = {
[1] = "PHYE_OOB_DONE",
[2] = "PHYE_OOB_ERROR",
[3] = "PHYE_SPINUP_HOLD",
[4] = "PHYE_RESUME_TIMEOUT",
};
void sas_dprint_porte(int phyid, enum port_event pe)

Просмотреть файл

@ -134,7 +134,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
&phy->port_events[event].work, ha);
}
static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
struct sas_ha_struct *ha = phy->ha;
@ -159,7 +159,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
sas_ha->notify_ha_event = notify_ha_event;
sas_ha->notify_port_event = notify_port_event;
sas_ha->notify_phy_event = notify_phy_event;
sas_ha->notify_phy_event = sas_notify_phy_event;
return 0;
}

Просмотреть файл

@ -178,7 +178,7 @@ Undo_phys:
return error;
}
int sas_unregister_ha(struct sas_ha_struct *sas_ha)
static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
/* Set the state to unregistered to avoid further unchained
* events to be queued, and flush any in-progress drainers
@ -189,7 +189,11 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
spin_unlock_irq(&sas_ha->lock);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
}
int sas_unregister_ha(struct sas_ha_struct *sas_ha)
{
sas_disable_events(sas_ha);
sas_unregister_ports(sas_ha);
/* flush unregistration work */
@ -381,6 +385,90 @@ int sas_set_phy_speed(struct sas_phy *phy,
return ret;
}
void sas_prep_resume_ha(struct sas_ha_struct *ha)
{
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
phy->port_events_pending = 0;
phy->phy_events_pending = 0;
phy->frame_rcvd_size = 0;
}
}
EXPORT_SYMBOL(sas_prep_resume_ha);
static int phys_suspended(struct sas_ha_struct *ha)
{
int i, rc = 0;
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_phy *phy = ha->sas_phy[i];
if (phy->suspended)
rc++;
}
return rc;
}
void sas_resume_ha(struct sas_ha_struct *ha)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
/* deform ports on phys that did not resume
* at this point we may be racing the phy coming back (as posted
* by the lldd). So we post the event and once we are in the
* libsas context check that the phy remains suspended before
* tearing it down.
*/
i = phys_suspended(ha);
if (i)
dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
i, i > 1 ? "s" : "");
wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_phy *phy = ha->sas_phy[i];
if (phy->suspended) {
dev_warn(&phy->phy->dev, "resume timeout\n");
sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
}
}
/* all phys are back up or timed out, turn on i/o so we can
* flush out disks that did not return
*/
scsi_unblock_requests(ha->core.shost);
sas_drain_work(ha);
}
EXPORT_SYMBOL(sas_resume_ha);
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;
sas_disable_events(ha);
scsi_block_requests(ha->core.shost);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
sas_discover_event(port, DISCE_SUSPEND);
}
/* flush suspend events while unregistered */
mutex_lock(&ha->drain_mutex);
__sas_drain_work(ha);
mutex_unlock(&ha->drain_mutex);
}
EXPORT_SYMBOL(sas_suspend_ha);
static void sas_phy_release(struct sas_phy *phy)
{
kfree(phy->hostdata);

Просмотреть файл

@ -89,6 +89,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);

Просмотреть файл

@ -94,6 +94,25 @@ static void sas_phye_spinup_hold(struct work_struct *work)
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
static void sas_phye_resume_timeout(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
return;
}
phy->error = 0;
phy->suspended = 0;
sas_deform_port(phy, 1);
}
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
@ -105,6 +124,8 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
[PHYE_OOB_DONE] = sas_phye_oob_done,
[PHYE_OOB_ERROR] = sas_phye_oob_error,
[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
[PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
};
static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {

Просмотреть файл

@ -39,6 +39,49 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
return true;
}
static void sas_resume_port(struct asd_sas_phy *phy)
{
struct domain_device *dev;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
if (port->suspended)
port->suspended = 0;
else {
/* we only need to handle "link returned" actions once */
return;
}
/* if the port came back:
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
continue;
}
if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
dev->ex_dev.ex_change_count = -1;
for (i = 0; i < dev->ex_dev.num_phys; i++) {
struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
phy->phy_change_count = -1;
}
}
}
sas_discover_event(port, DISCE_RESUME);
}
/**
* sas_form_port -- add this phy to a port
* @phy: the phy of interest
@ -58,7 +101,14 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
sas_deform_port(phy, 0);
else {
else if (phy->suspended) {
phy->suspended = 0;
sas_resume_port(phy);
/* phy came back, try to cancel the timeout */
wake_up(&sas_ha->eh_wait_q);
return;
} else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
phy->port->num_phys);

Просмотреть файл

@ -73,6 +73,8 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
/* Error Attention event polling interval */
#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
@ -684,6 +686,7 @@ struct lpfc_hba {
#define LPFC_FCF_FOV 1 /* Fast fcf failover */
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
uint32_t cfg_fcf_failover_policy;
uint32_t cfg_fcp_io_sched;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@ -695,6 +698,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_wq_count;
uint32_t cfg_fcp_eq_count;
uint32_t cfg_fcp_io_channel;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@ -732,7 +736,7 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
uint32_t fcp_qidx; /* next work queue to post work to */
atomic_t fcp_qidx; /* next work queue to post work to */
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */

Просмотреть файл

@ -3643,18 +3643,25 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
int val = 0, i;
/* fcp_imax is only valid for SLI4 */
if (phba->sli_rev != LPFC_SLI_REV4)
return -EINVAL;
/* Sanity check on user data */
if (!isdigit(buf[0]))
return -EINVAL;
if (sscanf(buf, "%i", &val) != 1)
return -EINVAL;
/* Value range is [636,651042] */
if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST)
/*
* Value range for the HBA is [5000,5000000]
* The value for each EQ depends on how many EQs are configured.
*/
if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY)
for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
lpfc_modify_fcp_eq_delay(phba, i);
return strlen(buf);
@ -3662,13 +3669,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
/*
# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
# for the HBA.
#
# Value range is [636,651042]. Default value is 10000.
# Value range is [5,000 to 5,000,000]. Default value is 50,000.
*/
static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX;
static int lpfc_fcp_imax = LPFC_DEF_IMAX;
module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_imax,
"Set the maximum number of fast-path FCP interrupts per second");
"Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)
/**
@ -3687,14 +3695,19 @@ lpfc_param_show(fcp_imax)
static int
lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
{
if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) {
if (phba->sli_rev != LPFC_SLI_REV4) {
phba->cfg_fcp_imax = 0;
return 0;
}
if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
phba->cfg_fcp_imax = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3016 fcp_imax: %d out of range, using default\n", val);
phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX;
phba->cfg_fcp_imax = LPFC_DEF_IMAX;
return 0;
}
@ -3764,6 +3777,16 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
*/
LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
# range is [0,1]. Default value is 0.
# For [0], FCP commands are issued to Work Queues ina round robin fashion.
# For [1], FCP commands are issued to a Work Queue associated with the
# current CPU.
*/
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
@ -3844,20 +3867,32 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
/*
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
# This parameter is ignored and will eventually be depricated
#
# Value range is [1,31]. Default value is 4.
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP work queues, if possible");
/*
# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 1.
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP event queues, if possible");
/*
# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of FCP I/O channels");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
@ -3882,6 +3917,17 @@ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
*/
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
# 0 = disabled (default)
# 1 = enabled
# Value range is [0,1]. Default value is 0.
*/
unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
@ -3976,6 +4022,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_topology,
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
&dev_attr_lpfc_fcp_io_sched,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
&dev_attr_lpfc_multi_ring_support,
@ -4002,6 +4049,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_wq_count,
&dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
@ -4964,6 +5012,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@ -4980,6 +5029,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg);

Просмотреть файл

@ -195,7 +195,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & 0xff) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@ -1234,7 +1234,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & 0xff) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@ -1714,6 +1714,8 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
phba->sli4_hba.lnk_info.lnk_no);
link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
@ -4796,7 +4798,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
menlo_resp->xri = rsp->ulpContext;
if (rsp->ulpStatus) {
if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (rsp->un.ulpWord[4] & 0xff) {
switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;

Просмотреть файл

@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -391,6 +390,7 @@ extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
extern unsigned int lpfc_prot_mask;
extern unsigned char lpfc_prot_guard;
extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
@ -457,6 +457,8 @@ int lpfc_sli4_queue_create(struct lpfc_hba *);
void lpfc_sli4_queue_destroy(struct lpfc_hba *);
void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli_abts_recover_port(struct lpfc_vport *,
struct lpfc_nodelist *);
int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
int lpfc_issue_reg_vfi(struct lpfc_vport *);
int lpfc_issue_unreg_vfi(struct lpfc_vport *);

Просмотреть файл

@ -104,7 +104,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@ -633,7 +634,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
(irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
/* CT command is being retried */
@ -783,7 +785,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
retry = 1;
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch (irsp->un.ulpWord[4]) {
switch ((irsp->un.ulpWord[4] &
IOERR_PARAM_MASK)) {
case IOERR_NO_RESOURCES:
/* We don't increment the retry
* count for this case.
@ -908,8 +912,10 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
(irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_SLI_DOWN) ||
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;

Просмотреть файл

@ -490,9 +490,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
len += snprintf(buf+len, size-len,
"Ring %d: CMD GetInx:%d (Max:%d Next:%d "
"Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
i, pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx,
pring->flag, pgpp->rspPutInx, pring->numRiocb);
i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
pring->sli.sli3.next_cmdidx,
pring->sli.sli3.local_getidx,
pring->flag, pgpp->rspPutInx,
pring->sli.sli3.numRiocb);
}
if (phba->sli_rev <= LPFC_SLI_REV3) {
@ -557,6 +559,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
case NLP_STE_PRLI_ISSUE:
statep = "PRLI ";
break;
case NLP_STE_LOGO_ISSUE:
statep = "LOGO ";
break;
case NLP_STE_UNMAPPED_NODE:
statep = "UNMAP ";
break;
@ -581,8 +586,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7));
len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
ndlp->nlp_rpi, ndlp->nlp_flag);
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
len += snprintf(buf+len, size-len, "RPI:%03d ",
ndlp->nlp_rpi);
else
len += snprintf(buf+len, size-len, "RPI:none ");
len += snprintf(buf+len, size-len, "flag:x%08x ",
ndlp->nlp_flag);
if (!ndlp->nlp_type)
len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
@ -1999,207 +2009,298 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
int len = 0, fcp_qidx;
int len = 0;
char *pbuffer;
int x, cnt;
int max_cnt;
struct lpfc_queue *qp = NULL;
if (!debug->buffer)
debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
if (!debug->buffer)
return 0;
pbuffer = debug->buffer;
max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
if (*ppos)
return 0;
/* Get slow-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path EQ information:\n");
if (phba->sli4_hba.sp_eq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
phba->sli4_hba.sp_eq->queue_id,
phba->sli4_hba.sp_eq->entry_count,
phba->sli4_hba.sp_eq->entry_size,
phba->sli4_hba.sp_eq->host_index,
phba->sli4_hba.sp_eq->hba_index);
}
spin_lock_irq(&phba->hbalock);
/* Get fast-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path EQ information:\n");
if (phba->sli4_hba.fp_eq) {
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
fcp_qidx++) {
if (phba->sli4_hba.fp_eq[fcp_qidx]) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
}
}
}
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Fast-path event queue */
if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
cnt = phba->cfg_fcp_io_channel;
/* Get mailbox complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX CQ information:\n");
if (phba->sli4_hba.mbx_cq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.mbx_cq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
phba->sli4_hba.mbx_cq->queue_id,
phba->sli4_hba.mbx_cq->entry_count,
phba->sli4_hba.mbx_cq->entry_size,
phba->sli4_hba.mbx_cq->host_index,
phba->sli4_hba.mbx_cq->hba_index);
}
for (x = 0; x < cnt; x++) {
/* Get slow-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS CQ information:\n");
if (phba->sli4_hba.els_cq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.els_cq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID [%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
phba->sli4_hba.els_cq->queue_id,
phba->sli4_hba.els_cq->entry_count,
phba->sli4_hba.els_cq->entry_size,
phba->sli4_hba.els_cq->host_index,
phba->sli4_hba.els_cq->hba_index);
}
/* Fast-path EQ */
qp = phba->sli4_hba.hba_eq[x];
if (!qp)
goto proc_cq;
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
fcp_qidx = 0;
if (phba->sli4_hba.fcp_cq) {
do {
if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\nHBA EQ info: "
"EQ-STAT[max:x%x noE:x%x "
"bs:x%x proc:x%llx]\n",
qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"EQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id,
qp->entry_count,
qp->entry_size,
qp->host_index,
qp->hba_index);
/* Reset max counter */
qp->EQ_max_eqe = 0;
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
proc_cq:
/* Fast-path FCP CQ */
qp = phba->sli4_hba.fcp_cq[x];
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tFCP CQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"AssocEQID[%02d]: "
"CQ STAT[max:x%x relw:x%x "
"xabt:x%x wq:x%llx]\n",
qp->assoc_qid,
qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
}
} while (++fcp_qidx < phba->cfg_fcp_eq_count);
len += snprintf(pbuffer+len,
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->hba_index);
/* Reset max counter */
qp->CQ_max_cqe = 0;
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
if (len >= max_cnt)
goto too_big;
/* Get mailbox queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX MQ information:\n");
if (phba->sli4_hba.mbx_wq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.mbx_wq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
phba->sli4_hba.mbx_wq->queue_id,
phba->sli4_hba.mbx_wq->entry_count,
phba->sli4_hba.mbx_wq->entry_size,
phba->sli4_hba.mbx_wq->host_index,
phba->sli4_hba.mbx_wq->hba_index);
}
/* Fast-path FCP WQ */
qp = phba->sli4_hba.fcp_wq[x];
/* Get slow-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS WQ information:\n");
if (phba->sli4_hba.els_wq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.els_wq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.els_wq->entry_count,
phba->sli4_hba.els_wq->entry_size,
phba->sli4_hba.els_wq->host_index,
phba->sli4_hba.els_wq->hba_index);
}
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tFCP WQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"AssocCQID[%02d]: "
"WQ-STAT[oflow:x%x posted:x%llx]\n",
qp->assoc_qid,
qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tWQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id,
qp->entry_count,
qp->entry_size,
qp->host_index,
qp->hba_index);
/* Get fast-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP WQ information:\n");
if (phba->sli4_hba.fcp_wq) {
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
fcp_qidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_qidx])
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
if (x)
continue;
len += snprintf(pbuffer+len,
/* Only EQ 0 has slow path CQs configured */
/* Slow-path mailbox CQ */
qp = phba->sli4_hba.mbx_cq;
if (qp) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
len += snprintf(pbuffer+len,
"\tMBX CQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], WQE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
"AssocEQID[%02d]: "
"CQ-STAT[mbox:x%x relw:x%x "
"xabt:x%x wq:x%llx]\n",
qp->assoc_qid,
qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->hba_index);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
}
/* Slow-path MBOX MQ */
qp = phba->sli4_hba.mbx_wq;
if (qp) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tMBX MQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"AssocCQID[%02d]:\n",
phba->sli4_hba.mbx_wq->assoc_qid);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tWQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->hba_index);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
}
/* Slow-path ELS response CQ */
qp = phba->sli4_hba.els_cq;
if (qp) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tELS CQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"AssocEQID[%02d]: "
"CQ-STAT[max:x%x relw:x%x "
"xabt:x%x wq:x%llx]\n",
qp->assoc_qid,
qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID [%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->hba_index);
/* Reset max counter */
qp->CQ_max_cqe = 0;
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
}
/* Slow-path ELS WQ */
qp = phba->sli4_hba.els_wq;
if (qp) {
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tELS WQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"AssocCQID[%02d]: "
" WQ-STAT[oflow:x%x "
"posted:x%llx]\n",
qp->assoc_qid,
qp->q_cnt_1,
(unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tWQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]",
qp->queue_id, qp->entry_count,
qp->entry_size, qp->host_index,
qp->hba_index);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
}
if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
/* Slow-path RQ header */
qp = phba->sli4_hba.hdr_rq;
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tRQ info: ");
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"AssocCQID[%02d]: "
"RQ-STAT[nopost:x%x nobuf:x%x "
"trunc:x%x rcv:x%llx]\n",
qp->assoc_qid,
qp->q_cnt_1, qp->q_cnt_2,
qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tHQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]\n",
qp->queue_id,
qp->entry_count,
qp->entry_size,
qp->host_index,
qp->hba_index);
/* Slow-path RQ data */
qp = phba->sli4_hba.dat_rq;
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\t\tDQID[%02d], "
"QE-CNT[%04d], QE-SIZE[%04d], "
"HOST-IDX[%04d], PORT-IDX[%04d]\n",
qp->queue_id,
qp->entry_count,
qp->entry_size,
qp->host_index,
qp->hba_index);
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
}
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
/* Get receive queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path RQ information:\n");
if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.hdr_rq->assoc_qid);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tHQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
phba->sli4_hba.hdr_rq->queue_id,
phba->sli4_hba.hdr_rq->entry_count,
phba->sli4_hba.hdr_rq->entry_size,
phba->sli4_hba.hdr_rq->host_index,
phba->sli4_hba.hdr_rq->hba_index);
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tDQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.dat_rq->entry_count,
phba->sli4_hba.dat_rq->entry_size,
phba->sli4_hba.dat_rq->host_index,
phba->sli4_hba.dat_rq->hba_index);
}
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
too_big:
len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
@ -2408,31 +2509,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) {
case LPFC_IDIAG_EQ:
/* Slow-path event queue */
if (phba->sli4_hba.sp_eq &&
phba->sli4_hba.sp_eq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.sp_eq, index, count);
if (rc)
goto error_out;
idiag.ptr_private = phba->sli4_hba.sp_eq;
goto pass_check;
}
/* Fast-path event queue */
if (phba->sli4_hba.fp_eq) {
for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
if (phba->sli4_hba.fp_eq[qidx] &&
phba->sli4_hba.fp_eq[qidx]->queue_id ==
/* HBA event queue */
if (phba->sli4_hba.hba_eq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
qidx++) {
if (phba->sli4_hba.hba_eq[qidx] &&
phba->sli4_hba.hba_eq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fp_eq[qidx],
phba->sli4_hba.hba_eq[qidx],
index, count);
if (rc)
goto error_out;
idiag.ptr_private =
phba->sli4_hba.fp_eq[qidx];
phba->sli4_hba.hba_eq[qidx];
goto pass_check;
}
}
@ -2479,7 +2570,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx];
goto pass_check;
}
} while (++qidx < phba->cfg_fcp_eq_count);
} while (++qidx < phba->cfg_fcp_io_channel);
}
goto error_out;
break;
@ -2511,7 +2602,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
qidx++) {
if (!phba->sli4_hba.fcp_wq[qidx])
continue;
if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
@ -4490,7 +4582,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_mbx_wq(phba);
lpfc_debug_dump_els_wq(phba);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
lpfc_debug_dump_hdr_rq(phba);
@ -4501,14 +4593,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_mbx_cq(phba);
lpfc_debug_dump_els_cq(phba);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
/*
* Dump Event Queues (EQs)
*/
lpfc_debug_dump_sp_eq(phba);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
}

Просмотреть файл

@ -36,6 +36,9 @@
/* dumpHostSlim output buffer size */
#define LPFC_DUMPHOSTSLIM_SIZE 4096
/* dumpSLIqinfo output buffer size */
#define LPFC_DUMPSLIQINFO_SIZE 4096
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
@ -366,7 +369,7 @@ static inline void
lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
{
/* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_wq_count)
if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
@ -388,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
int fcp_cqidx, fcp_cqid;
/* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_wq_count)
if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
if (phba->intr_type == MSIX) {
if (fcp_cqidx >= phba->cfg_fcp_eq_count)
if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return;
} else {
if (fcp_cqidx > 0)
@ -410,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
}
/**
* lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue
* lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
* @phba: Pointer to HBA context object.
* @fcp_wqidx: Index to a FCP work queue.
*
@ -418,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
* associated to the FCP work queue specified by the @fcp_wqidx.
**/
static inline void
lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
{
struct lpfc_queue *qdesc;
int fcp_eqidx, fcp_eqid;
int fcp_cqidx, fcp_cqid;
/* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_wq_count)
if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
if (phba->intr_type == MSIX) {
if (fcp_cqidx >= phba->cfg_fcp_eq_count)
if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return;
} else {
if (fcp_cqidx > 0)
return;
}
if (phba->cfg_fcp_eq_count == 0) {
fcp_eqidx = -1;
fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
qdesc = phba->sli4_hba.sp_eq;
} else {
fcp_eqidx = fcp_cqidx;
fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
}
fcp_eqidx = fcp_cqidx;
fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
"EQ[Idx:%d|Qid:%d]\n",
@ -542,25 +539,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
}
/**
* lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
* @phba: Pointer to HBA context object.
*
* This function dumps all entries from the slow-path event queue.
**/
static inline void
lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
{
printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
"EQ[Qid:%d]:\n",
phba->sli4_hba.mbx_wq->queue_id,
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.mbx_cq->queue_id,
phba->sli4_hba.els_cq->queue_id,
phba->sli4_hba.sp_eq->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
}
/**
* lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
* @phba: Pointer to HBA context object.
@ -574,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
{
int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++)
for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
break;
if (wq_idx < phba->cfg_fcp_wq_count) {
if (wq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
return;
@ -644,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
do {
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
break;
} while (++cq_idx < phba->cfg_fcp_eq_count);
} while (++cq_idx < phba->cfg_fcp_io_channel);
if (cq_idx < phba->cfg_fcp_eq_count) {
if (cq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
return;
@ -677,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{
int eq_idx;
for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) {
if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid)
for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
break;
}
if (eq_idx < phba->cfg_fcp_eq_count) {
if (eq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]);
lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
return;
}
if (phba->sli4_hba.sp_eq->queue_id == qid) {
printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
}
}
void lpfc_debug_dump_all_queues(struct lpfc_hba *);

Просмотреть файл

@ -145,6 +145,7 @@ struct lpfc_node_rrq {
#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
ACC */
#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
@ -201,10 +202,11 @@ struct lpfc_node_rrq {
#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */
#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */
#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */
#define NLP_STE_MAX_STATE 0x8
#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */
#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */
#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */
#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */
#define NLP_STE_MAX_STATE 0x9
#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
/* For UNUSED_NODE state, the node has just been allocated.

Просмотреть файл

@ -962,7 +962,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((phba->fcoe_cvl_eventtag_attn ==
phba->fcoe_cvl_eventtag) &&
(irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_SLI_ABORTED))
goto stop_rr_fcf_flogi;
else
phba->fcoe_cvl_eventtag_attn =
@ -1108,8 +1109,10 @@ flogifail:
/* Start discovery */
lpfc_disc_start(vport);
} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
(irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
IOERR_SLI_ABORTED) &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
IOERR_SLI_DOWN))) &&
(phba->link_state != LPFC_CLEAR_LA)) {
/* If FLOGI failed enable link interrupt. */
lpfc_issue_clear_la(phba, vport);
@ -1476,6 +1479,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
return ndlp;
memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
ndlp, ndlp->nlp_DID, new_ndlp);
if (!new_ndlp) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
@ -1527,6 +1534,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* The new_ndlp is replacing ndlp totally, so we need
* to put ndlp on UNUSED list and try to free it.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
/* Fix up the rport accordingly */
rport = ndlp->rport;
@ -1559,23 +1569,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_drop_node(vport, ndlp);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3180 PLOGI confirm SWAP: %x %x\n",
new_ndlp->nlp_DID, keepDID);
lpfc_unreg_rpi(vport, ndlp);
/* Two ndlps cannot have the same did */
ndlp->nlp_DID = keepDID;
if (phba->sli_rev == LPFC_SLI_REV4)
memcpy(&ndlp->active_rrqs.xri_bitmap,
&rrq.xri_bitmap,
sizeof(ndlp->active_rrqs.xri_bitmap));
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over the
* state and names.
* and the did has already been swapped, copy over state.
* The new WWNs are already in new_ndlp since thats what
* we looked it up by in the begining of this routine.
*/
memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
new_ndlp->nlp_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Since we are switching over to the new_ndlp, the old
* ndlp should be put in the NPR state, unless we have
* already started re-discovery on it.
*/
if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
(ndlp->nlp_state == NLP_STE_MAPPED_NODE))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Fix up the rport accordingly */
rport = ndlp->rport;
if (rport) {
@ -2367,6 +2388,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_sli *psli;
struct lpfcMboxq *mbox;
unsigned long flags;
uint32_t skip_recovery = 0;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@ -2381,47 +2404,52 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"LOGO cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0105 LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
if (lpfc_els_chk_latt(vport)) {
skip_recovery = 1;
goto out;
}
/* Check to see if link went down during discovery */
if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
/* NLP_EVT_DEVICE_RM should unregister the RPI
* which should abort all outstanding IOs.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
skip_recovery = 1;
goto out;
}
if (irsp->ulpStatus) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
skip_recovery = 1;
goto out;
}
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2756 LOGO failure DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (lpfc_error_lost_link(irsp))
if (lpfc_error_lost_link(irsp)) {
skip_recovery = 1;
goto out;
else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_LOGO);
} else
/* Good status, call state machine.
* This will unregister the rpi if needed.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_LOGO);
}
}
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
out:
lpfc_els_free_iocb(phba, cmdiocb);
/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
@ -2436,9 +2464,30 @@ out:
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
skip_recovery = 1;
}
}
}
/*
* If the node is a target, the handling attempts to recover the port.
* For any other port type, the rpi is unregistered as an implicit
* LOGO.
*/
if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irqsave(shost->host_lock, flags);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irqrestore(shost->host_lock, flags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
"Recovery Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout,
vport->num_disc_nodes);
lpfc_disc_start(vport);
}
return;
}
@ -2501,10 +2550,27 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue LOGO: did:x%x",
ndlp->nlp_DID, 0, 0);
/*
* If we are issuing a LOGO, we may try to recover the remote NPort
* by issuing a PLOGI later. Even though we issue ELS cmds by the
* VPI, if we have a valid RPI, and that RPI gets unreg'ed while
* that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
* for that ELS cmd. To avoid this situation, lets get rid of the
* RPI right now, before any ELS cmds are sent.
*/
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_ISSUE_LOGO;
spin_unlock_irq(shost->host_lock);
if (lpfc_unreg_rpi(vport, ndlp)) {
lpfc_els_free_iocb(phba, elsiocb);
return 0;
}
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(shost->host_lock);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@ -2920,7 +2986,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
case ELS_CMD_LOGO:
if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
}
break;
case ELS_CMD_FDISC:
@ -3007,7 +3073,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
break;
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
if (cmd == ELS_CMD_FLOGI) {
if (PCI_DEVICE_ID_HORNET ==
@ -3214,7 +3280,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES))) {
/* Don't reset timer for no resources */
/* If discovery / RSCN timer is running, reset it */
@ -3273,7 +3340,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return 1;
case ELS_CMD_LOGO:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
return 1;
}
@ -3533,13 +3600,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for this
* ndlp. If no other discovery threads are using this ndlp.
* we should free all resources associated with it.
*/
lpfc_nlp_not_used(ndlp);
if (ndlp) {
if (NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_put(ndlp);
/* This is the end of the default RPI cleanup logic for
* this ndlp. If no other discovery threads are using
* this ndlp, free all resources associated with it.
*/
lpfc_nlp_not_used(ndlp);
} else {
lpfc_drop_node(ndlp->vport, ndlp);
}
}
return;
@ -6803,7 +6874,8 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
(icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
(icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@ -7985,3 +8057,47 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
* @vport: pointer to virtual port object.
* @ndlp: nodelist pointer for the impacted node.
*
* The driver calls this routine in response to an SLI4 XRI ABORT CQE
* or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
* the driver is required to send a LOGO to the remote node before it
* attempts to recover its login to the remote node.
*/
void
lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost;
struct lpfc_hba *phba;
unsigned long flags = 0;
shost = lpfc_shost_from_vport(vport);
phba = vport->phba;
if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_log(phba, KERN_INFO,
LOG_SLI, "3093 No rport recovery needed. "
"rport in state 0x%x\n", ndlp->nlp_state);
return;
}
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3094 Start rport recovery on shost id 0x%x "
"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
"flags 0x%x\n",
shost->host_no, ndlp->nlp_DID,
vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
ndlp->nlp_flag);
/*
* The rport is not responding. Remove the FCP-2 flag to prevent
* an ADISC in the follow-up recovery code.
*/
spin_lock_irqsave(shost->host_lock, flags);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
spin_unlock_irqrestore(shost->host_lock, flags);
lpfc_issue_els_logo(vport, ndlp, 0);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
}

Просмотреть файл

@ -123,6 +123,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
"rport devlosscb: sid:x%x did:x%x flg:x%x",
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/* Don't defer this if we are in the process of deleting the vport
* or unloading the driver. The unload will cleanup the node
* appropriately we just need to cleanup the ndlp rport info here.
@ -142,6 +146,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
if (ndlp->nlp_type & NLP_FABRIC) {
/* If the WWPN of the rport and ndlp don't match, ignore it */
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
put_device(&rport->dev);
return;
}
}
evtp = &ndlp->dev_loss_evt;
if (!list_empty(&evtp->evt_listp))
@ -202,6 +215,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
"rport devlosstmo:did:x%x type:x%x id:x%x",
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/* Don't defer this if we are in the process of deleting the vport
* or unloading the driver. The unload will cleanup the node
* appropriately we just need to cleanup the ndlp rport info here.
@ -3492,7 +3509,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
struct static_vport_info *vport_info;
int rc = 0, i;
int mbx_wait_rc = 0, i;
struct fc_vport_identifiers vport_id;
struct fc_vport *new_fc_vport;
struct Scsi_Host *shost;
@ -3509,7 +3526,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
" allocate mailbox memory\n");
return;
}
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb = &pmb->u.mb;
vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
@ -3523,24 +3540,31 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
/* free dma buffer from previous round */
if (pmb->context1) {
mp = (struct lpfc_dmabuf *)pmb->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
pmb->vport = phba->pport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
LPFC_MBOX_TMO);
if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0544 lpfc_create_static_vport failed to"
" issue dump mailbox command ret 0x%x "
"status 0x%x\n",
rc, mb->mbxStatus);
mbx_wait_rc, mb->mbxStatus);
goto out;
}
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
mp = (struct lpfc_dmabuf *) pmb->context2;
mp = (struct lpfc_dmabuf *)pmb->context1;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@ -3604,9 +3628,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
if (rc != MBX_TIMEOUT) {
if (pmb->context2) {
mp = (struct lpfc_dmabuf *) pmb->context2;
if (mbx_wait_rc != MBX_TIMEOUT) {
if (pmb->context1) {
mp = (struct lpfc_dmabuf *)pmb->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
@ -3834,6 +3858,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3183 rport register x%06x, rport %p role x%x\n",
ndlp->nlp_DID, rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
@ -3850,6 +3878,10 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
"rport delete: did:x%x flg:x%x type x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3184 rport unregister x%06x, rport %p\n",
ndlp->nlp_DID, rport);
fc_remote_port_delete(rport);
return;
@ -3964,6 +3996,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
[NLP_STE_ADISC_ISSUE] = "ADISC",
[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
[NLP_STE_PRLI_ISSUE] = "PRLI",
[NLP_STE_LOGO_ISSUE] = "LOGO",
[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
[NLP_STE_MAPPED_NODE] = "MAPPED",
[NLP_STE_NPR_NODE] = "NPR",
@ -4330,6 +4363,26 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
return 0;
}
/**
* lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
* @phba: Pointer to HBA context object.
* @pmb: Pointer to mailbox object.
*
* This function will issue an ELS LOGO command after completing
* the UNREG_RPI.
**/
void
lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
ndlp = (struct lpfc_nodelist *)(pmb->context1);
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
}
/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
@ -4354,9 +4407,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpi = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
mbox->context1 = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else {
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
mempool_free(mbox, phba->mbox_mem_pool);
@ -4499,9 +4559,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_disable_node(vport, ndlp);
}
/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@ -4512,6 +4576,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Cleanup REG_LOGIN completions which are not yet processed */
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
(ndlp != (struct lpfc_nodelist *) mb->context2))
continue;
@ -4521,6 +4586,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
@ -4585,7 +4651,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
mbox->vport = vport;
mbox->context2 = NULL;
mbox->context2 = ndlp;
rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
@ -5365,9 +5431,17 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
struct lpfc_nodelist *ndlp;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (filter(ndlp, param))
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3185 FIND node filter %p DID "
"Data: x%p x%x x%x\n",
filter, ndlp, ndlp->nlp_DID,
ndlp->nlp_flag);
return ndlp;
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3186 FIND node filter %p NOT FOUND.\n", filter);
return NULL;
}

Просмотреть файл

@ -1188,8 +1188,8 @@ typedef struct {
*/
/* Number of rings currently used and available. */
#define MAX_CONFIGURED_RINGS 3
#define MAX_RINGS 4
#define MAX_SLI3_CONFIGURED_RINGS 3
#define MAX_SLI3_RINGS 4
/* IOCB / Mailbox is owned by FireFly */
#define OWN_CHIP 1
@ -1251,6 +1251,8 @@ typedef struct {
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
#define PCI_DEVICE_ID_TIGERSHARK 0x0704
#define PCI_DEVICE_ID_TOMCAT 0x0714
#define PCI_DEVICE_ID_SKYHAWK 0x0724
#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@ -1458,6 +1460,7 @@ typedef struct { /* FireFly BIU registers */
#define MBX_UNREG_FCFI 0xA2
#define MBX_INIT_VFI 0xA3
#define MBX_INIT_VPI 0xA4
#define MBX_ACCESS_VDATA 0xA5
#define MBX_AUTH_PORT 0xF8
#define MBX_SECURITY_MGMT 0xF9
@ -2991,7 +2994,7 @@ typedef struct _PCB {
uint32_t pgpAddrLow;
uint32_t pgpAddrHigh;
SLI2_RDSC rdsc[MAX_RINGS];
SLI2_RDSC rdsc[MAX_SLI3_RINGS];
} PCB_t;
/* NEW_FEATURE */
@ -3101,18 +3104,18 @@ struct lpfc_pgp {
struct sli2_desc {
uint32_t unused1[16];
struct lpfc_hgp host[MAX_RINGS];
struct lpfc_pgp port[MAX_RINGS];
struct lpfc_hgp host[MAX_SLI3_RINGS];
struct lpfc_pgp port[MAX_SLI3_RINGS];
};
struct sli3_desc {
struct lpfc_hgp host[MAX_RINGS];
struct lpfc_hgp host[MAX_SLI3_RINGS];
uint32_t reserved[8];
uint32_t hbq_put[16];
};
struct sli3_pgp {
struct lpfc_pgp port[MAX_RINGS];
struct lpfc_pgp port[MAX_SLI3_RINGS];
uint32_t hbq_get[16];
};
@ -3242,6 +3245,7 @@ typedef struct {
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
#define IOERR_SLI_BRESET 0x102
#define IOERR_SLI_ABORTED 0x103
#define IOERR_PARAM_MASK 0x1ff
} PARM_ERR;
typedef union {

Просмотреть файл

@ -187,11 +187,17 @@ struct lpfc_sli_intf {
/* Active interrupt test count */
#define LPFC_ACT_INTR_CNT 4
/* Algrithmns for scheduling FCP commands to WQs */
#define LPFC_FCP_SCHED_ROUND_ROBIN 0
#define LPFC_FCP_SCHED_BY_CPU 1
/* Delay Multiplier constant */
#define LPFC_DMULT_CONST 651042
#define LPFC_MIM_IMAX 636
#define LPFC_FP_DEF_IMAX 10000
#define LPFC_SP_DEF_IMAX 10000
/* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 50000
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
@ -338,7 +344,7 @@ struct lpfc_cqe {
* Define mask value for xri_aborted and wcqe completed CQE extended status.
* Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
*/
#define WCQE_PARAM_MASK 0x1FF;
#define WCQE_PARAM_MASK 0x1FF
/* completion queue entry for wqe completions */
struct lpfc_wcqe_complete {
@ -880,13 +886,19 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1
#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
@ -1382,6 +1394,11 @@ struct lpfc_mbx_set_link_diag_state {
#define lpfc_mbx_set_diag_state_diag_SHIFT 0
#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
#define lpfc_mbx_set_diag_state_diag_WORD word0
#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2
#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001
#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0
#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0
#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1
#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
#define lpfc_mbx_set_diag_state_link_num_WORD word0
@ -2556,7 +2573,7 @@ struct lpfc_mbx_get_sli4_parameters {
};
struct lpfc_rscr_desc_generic {
#define LPFC_RSRC_DESC_WSIZE 18
#define LPFC_RSRC_DESC_WSIZE 22
uint32_t desc[LPFC_RSRC_DESC_WSIZE];
};
@ -2566,6 +2583,9 @@ struct lpfc_rsrc_desc_pcie {
#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
#define lpfc_rsrc_desc_pcie_type_WORD word0
#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
#define lpfc_rsrc_desc_pcie_length_SHIFT 8
#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff
#define lpfc_rsrc_desc_pcie_length_WORD word0
uint32_t word1;
#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
@ -2593,6 +2613,12 @@ struct lpfc_rsrc_desc_fcfcoe {
#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8
#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff
#define lpfc_rsrc_desc_fcfcoe_length_WORD word0
#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0
#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72
#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88
uint32_t word1;
#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
@ -2651,6 +2677,12 @@ struct lpfc_rsrc_desc_fcfcoe {
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
uint32_t bw_min;
uint32_t bw_max;
uint32_t iops_min;
uint32_t iops_max;
uint32_t reserved[4];
};
struct lpfc_func_cfg {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -92,7 +92,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
pmb->context2 = (uint8_t *) mp;
pmb->context1 = (uint8_t *)mp;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@ -950,44 +950,47 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
pring->sli.sli3.sizeCiocb =
phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
pring->sli.sli3.sizeRiocb =
phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
/* A ring MUST have both cmd and rsp entries defined to be
valid */
if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
if ((pring->sli.sli3.numCiocb == 0) ||
(pring->sli.sli3.numRiocb == 0)) {
pcbp->rdsc[i].cmdEntries = 0;
pcbp->rdsc[i].rspEntries = 0;
pcbp->rdsc[i].cmdAddrHigh = 0;
pcbp->rdsc[i].rspAddrHigh = 0;
pcbp->rdsc[i].cmdAddrLow = 0;
pcbp->rdsc[i].rspAddrLow = 0;
pring->cmdringaddr = NULL;
pring->rspringaddr = NULL;
pring->sli.sli3.cmdringaddr = NULL;
pring->sli.sli3.rspringaddr = NULL;
continue;
}
/* Command ring setup for ring */
pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
(uint8_t *) phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
iocbCnt += pring->sli.sli3.numCiocb;
/* Response ring setup for ring */
pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
(uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numRiocb;
iocbCnt += pring->sli.sli3.numRiocb;
}
}
@ -1609,12 +1612,15 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
switch (mbox->mbxCommand) {
case MBX_WRITE_NV: /* 0x03 */
case MBX_DUMP_MEMORY: /* 0x17 */
case MBX_UPDATE_CFG: /* 0x1B */
case MBX_DOWN_LOAD: /* 0x1C */
case MBX_DEL_LD_ENTRY: /* 0x1D */
case MBX_WRITE_VPARMS: /* 0x32 */
case MBX_LOAD_AREA: /* 0x81 */
case MBX_WRITE_WWN: /* 0x98 */
case MBX_LOAD_EXP_ROM: /* 0x9C */
case MBX_ACCESS_VDATA: /* 0xA5 */
return LPFC_MBOX_TMO_FLASH_CMD;
case MBX_SLI4_CONFIG: /* 0x9b */
subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
@ -1625,11 +1631,17 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
case LPFC_MBOX_OPCODE_WRITE_OBJECT:
case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
case LPFC_MBOX_OPCODE_DELETE_OBJECT:
case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
case LPFC_MBOX_OPCODE_RESET_LICENSES:
case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
case LPFC_MBOX_OPCODE_GET_VPD_DATA:
case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
}
}

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* Copyright (C) 2004-2012 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -194,6 +194,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
pci_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
if (phba->rrq_pool)
mempool_destroy(phba->rrq_pool);
phba->rrq_pool = NULL;
/* Free NLP memory pool */
mempool_destroy(phba->nlp_mem_pool);
phba->nlp_mem_pool = NULL;

Просмотреть файл

@ -1777,6 +1777,117 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
struct ls_rjt stat;
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
struct ls_rjt stat;
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
struct ls_rjt stat;
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
struct ls_rjt stat;
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
static uint32_t
lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
/*
* Take no action. If a LOGO is outstanding, then possibly DevLoss has
* timed out and is calling for Device Remove. In this case, the LOGO
* must be allowed to complete in state LOGO_ISSUE so that the rpi
* and other NLP flags are correctly cleaned up.
*/
return ndlp->nlp_state;
}
static uint32_t
lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
/*
* Device Recovery events have no meaning for a node with a LOGO
* outstanding. The LOGO has to complete first and handle the
* node from that point.
*/
return ndlp->nlp_state;
}
static uint32_t
lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
@ -2083,6 +2194,8 @@ lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* For the fabric port just clear the fc flags. */
if (ndlp->nlp_DID == Fabric_DID) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@ -2297,6 +2410,20 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_device_rm_prli_issue, /* DEVICE_RM */
lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
lpfc_disc_illegal, /* CMPL_PRLI */
lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
lpfc_disc_illegal, /* CMPL_ADISC */
lpfc_disc_illegal, /* CMPL_REG_LOGIN */
lpfc_device_rm_logo_issue, /* DEVICE_RM */
lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
lpfc_rcv_logo_unmap_node, /* RCV_LOGO */

Просмотреть файл

@ -60,12 +60,6 @@ static char *dif_op_str[] = {
"PROT_WRITE_PASS",
};
static char *dif_grd_str[] = {
"NO_GUARD",
"DIF_CRC",
"DIX_IP",
};
struct scsi_dif_tuple {
__be16 guard_tag; /* Checksum */
__be16 app_tag; /* Opaque storage */
@ -3482,9 +3476,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
lp = (uint32_t *)cmnd->sense_buffer;
if (!scsi_status && (resp_info & RESID_UNDER) &&
vport->cfg_log_verbose & LOG_FCP_UNDER)
logit = LOG_FCP_UNDER;
/* special handling for under run conditions */
if (!scsi_status && (resp_info & RESID_UNDER)) {
/* don't log under runs if fcp set... */
if (vport->cfg_log_verbose & LOG_FCP)
logit = LOG_FCP_ERROR;
/* unless operator says so */
if (vport->cfg_log_verbose & LOG_FCP_UNDER)
logit = LOG_FCP_UNDER;
}
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9024 FCP command x%x failed: x%x SNS x%x x%x "
@ -3552,11 +3552,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
/*
* Check SLI validation that all the transfer was actually done
* (fcpi_parm should be zero). Apply check only to reads.
* (fcpi_parm should be zero).
*/
} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
} else if (fcpi_parm) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"9029 FCP Read Check Error Data: "
"9029 FCP Data Transfer Check Error: "
"x%x x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
@ -3615,7 +3615,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd = lpfc_cmd->pCmd;
shost = cmd->device->host;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
@ -3660,10 +3660,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
else if (lpfc_cmd->status >= IOSTAT_CNT)
lpfc_cmd->status = IOSTAT_DEFAULT;
if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
&& !lpfc_cmd->fcp_rsp->rspStatus3
&& (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
&& !(phba->cfg_log_verbose & LOG_FCP_UNDER))
if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
!lpfc_cmd->fcp_rsp->rspStatus3 &&
(lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
!(vport->cfg_log_verbose & LOG_FCP_UNDER))
logit = 0;
else
logit = LOG_FCP | LOG_FCP_UNDER;
@ -3829,12 +3829,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
spin_lock_irq(&phba->hbalock);
lpfc_cmd->pCmd = NULL;
spin_unlock_irq(&phba->hbalock);
/*
* If there is a thread waiting for command completion
* wake up the thread.
*/
spin_lock_irqsave(shost->host_lock, flags);
lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(shost->host_lock, flags);
@ -3868,12 +3871,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
}
spin_lock_irq(&phba->hbalock);
lpfc_cmd->pCmd = NULL;
spin_unlock_irq(&phba->hbalock);
/*
* If there is a thread waiting for command completion
* wake up the thread.
*/
spin_lock_irqsave(shost->host_lock, flags);
lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(shost->host_lock, flags);
@ -3919,6 +3925,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
int datadir = scsi_cmnd->sc_data_direction;
char tag[2];
uint8_t *ptr;
bool sli4;
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return;
@ -3930,8 +3938,13 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
ptr = &fcp_cmnd->fcpCdb[0];
memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
ptr += scsi_cmnd->cmd_len;
memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
}
if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
switch (tag[0]) {
case HEAD_OF_QUEUE_TAG:
@ -3947,6 +3960,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else
fcp_cmnd->fcpCntl1 = 0;
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
/*
* There are three possibilities here - use scatter-gather segment, use
* the single mapping, or neither. Start the lpfc command prep by
@ -3956,11 +3971,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
if (phba->sli_rev < LPFC_SLI_REV4) {
if (sli4)
iocb_cmd->ulpPU = PARM_READ_CHECK;
else {
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
} else
iocb_cmd->ulpPU = PARM_READ_CHECK;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
} else {
@ -3984,7 +4000,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* of the scsi_cmnd request_buffer
*/
piocbq->iocb.ulpContext = pnode->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
if (sli4)
piocbq->iocb.ulpContext =
phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
@ -4241,9 +4257,8 @@ void lpfc_poll_timeout(unsigned long ptr)
* SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
**/
static int
lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@ -4299,53 +4314,28 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
cmnd->scsi_done = done;
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9033 BLKGRD: rcvd protected cmd:%02x op=%s "
"guard=%s\n", cmnd->cmnd[0],
dif_op_str[scsi_get_prot_op(cmnd)],
dif_grd_str[scsi_host_get_guard(shost)]);
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9035 BLKGRD: READ @ sector %llu, "
"cnt %u, rpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9036 BLKGRD: WRITE @ sector %llu, "
"cnt %u, wpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
"9033 BLKGRD: rcvd %s cmd:x%x "
"sector x%llx cnt %u pt %x\n",
dif_op_str[scsi_get_prot_op(cmnd)],
cmnd->cmnd[0],
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
}
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
} else {
if (vport->phba->cfg_enable_bg) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9038 BLKGRD: rcvd unprotected cmd:"
"%02x op=%s guard=%s\n", cmnd->cmnd[0],
dif_op_str[scsi_get_prot_op(cmnd)],
dif_grd_str[scsi_host_get_guard(shost)]);
if (cmnd->cmnd[0] == READ_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9040 dbg: READ @ sector %llu, "
"cnt %u, rpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
"9038 BLKGRD: rcvd PROT_NORMAL cmd: "
"x%x sector x%llx cnt %u pt %x\n",
cmnd->cmnd[0],
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
else if (cmnd->cmnd[0] == WRITE_10)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
"9041 dbg: WRITE @ sector %llu, "
"cnt %u, wpt %d\n",
(unsigned long long)scsi_get_lba(cmnd),
blk_rq_sectors(cmnd->request),
(cmnd->cmnd[1]>>5));
(cmnd->cmnd[1]>>5));
}
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
@ -4363,11 +4353,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
spin_unlock(shost->host_lock);
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
spin_lock(shost->host_lock);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
@ -4384,11 +4372,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return SCSI_MLQUEUE_TARGET_BUSY;
out_fail_command:
done(cmnd);
cmnd->scsi_done(cmnd);
return 0;
}
static DEF_SCSI_QCMD(lpfc_queuecommand)
/**
* lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
@ -4414,7 +4401,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
status = fc_block_scsi_eh(cmnd);
if (status)
if (status != 0 && status != SUCCESS)
return status;
spin_lock_irq(&phba->hbalock);
@ -4428,7 +4415,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
}
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
if (!lpfc_cmd) {
if (!lpfc_cmd || !lpfc_cmd->pCmd) {
spin_unlock_irq(&phba->hbalock);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
@ -4521,9 +4508,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret = FAILED;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
"for abort to complete: ret %#x, ID %d, "
"LUN %d\n",
ret, cmnd->device->id, cmnd->device->lun);
"for abortng I/O (xri:x%x) to complete: "
"ret %#x, ID %d, LUN %d\n",
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
}
goto out;
@ -4769,7 +4757,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
}
pnode = rdata->pnode;
status = fc_block_scsi_eh(cmnd);
if (status)
if (status != 0 && status != SUCCESS)
return status;
status = lpfc_chk_tgt_mapped(vport, cmnd);
@ -4836,7 +4824,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
}
pnode = rdata->pnode;
status = fc_block_scsi_eh(cmnd);
if (status)
if (status != 0 && status != SUCCESS)
return status;
status = lpfc_chk_tgt_mapped(vport, cmnd);
@ -4904,7 +4892,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
status = fc_block_scsi_eh(cmnd);
if (status)
if (status != 0 && status != SUCCESS)
return status;
/*

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -131,7 +131,9 @@ typedef struct lpfcMboxq {
#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
ring */
#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver.
For SLI4, an additional ring for each
FCP WQ will be allocated. */
struct lpfc_sli_ring;
@ -158,6 +160,24 @@ struct lpfc_sli_ring_stat {
uint64_t iocb_rsp_full; /* IOCB rsp ring full */
};
struct lpfc_sli3_ring {
uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
uint32_t next_cmdidx; /* next_cmd index */
uint32_t rspidx; /* current index in response ring */
uint32_t cmdidx; /* current index in command ring */
uint16_t numCiocb; /* number of command iocb's per ring */
uint16_t numRiocb; /* number of rsp iocb's per ring */
uint16_t sizeCiocb; /* Size of command iocb's in this ring */
uint16_t sizeRiocb; /* Size of response iocb's in this ring */
uint32_t *cmdringaddr; /* virtual address for cmd rings */
uint32_t *rspringaddr; /* virtual address for rsp rings */
};
struct lpfc_sli4_ring {
struct lpfc_queue *wqp; /* Pointer to associated WQ */
};
/* Structure used to hold SLI ring information */
struct lpfc_sli_ring {
uint16_t flag; /* ring flags */
@ -166,16 +186,10 @@ struct lpfc_sli_ring {
#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
uint32_t next_cmdidx; /* next_cmd index */
uint32_t rspidx; /* current index in response ring */
uint32_t cmdidx; /* current index in command ring */
uint8_t rsvd;
uint8_t ringno; /* ring number */
uint16_t numCiocb; /* number of command iocb's per ring */
uint16_t numRiocb; /* number of rsp iocb's per ring */
uint16_t sizeCiocb; /* Size of command iocb's in this ring */
uint16_t sizeRiocb; /* Size of response iocb's in this ring */
spinlock_t ring_lock; /* lock for issuing commands */
uint32_t fast_iotag; /* max fastlookup based iotag */
uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@ -186,8 +200,6 @@ struct lpfc_sli_ring {
struct list_head txcmplq;
uint16_t txcmplq_cnt; /* current length of queue */
uint16_t txcmplq_max; /* max length */
uint32_t *cmdringaddr; /* virtual address for cmd rings */
uint32_t *rspringaddr; /* virtual address for rsp rings */
uint32_t missbufcnt; /* keep track of buffers to post */
struct list_head postbufq;
uint16_t postbufq_cnt; /* current length of queue */
@ -207,6 +219,10 @@ struct lpfc_sli_ring {
/* cmd ring available */
void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
struct lpfc_sli_ring *);
union {
struct lpfc_sli3_ring sli3;
struct lpfc_sli4_ring sli4;
} sli;
};
/* Structure used for configuring rings to a specific profile or rctl / type */
@ -239,6 +255,8 @@ struct lpfc_sli_stat {
uint64_t mbox_stat_err; /* Mbox cmds completed status error */
uint64_t mbox_cmd; /* Mailbox commands issued */
uint64_t sli_intr; /* Count of Host Attention interrupts */
uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */
uint64_t sli_ips; /* Host Attention interrupts per sec */
uint32_t err_attn_event; /* Error Attn event counters */
uint32_t link_event; /* Link event counters */
uint32_t mbox_event; /* Mailbox event counters */
@ -270,7 +288,7 @@ struct lpfc_sli {
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
struct lpfc_sli_ring *ring;
int fcp_ring; /* ring used for FCP initiator commands */
int next_ring;

Просмотреть файл

@ -34,18 +34,10 @@
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
#define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for fast-path FCP work queues */
#define LPFC_FN_EQN_MAX 8
#define LPFC_SP_EQN_DEF 1
#define LPFC_FP_EQN_DEF 4
#define LPFC_FP_EQN_MIN 1
#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
#define LPFC_FN_WQN_MAX 32
#define LPFC_SP_WQN_DEF 1
#define LPFC_FP_WQN_DEF 4
#define LPFC_FP_WQN_MIN 1
#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_FCP_IO_CHAN_DEF 4
#define LPFC_FCP_IO_CHAN_MIN 1
#define LPFC_FCP_IO_CHAN_MAX 8
/*
* Provide the default FCF Record attributes used by the driver
@ -141,6 +133,37 @@ struct lpfc_queue {
uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
/* For q stats */
uint32_t q_cnt_1;
uint32_t q_cnt_2;
uint32_t q_cnt_3;
uint64_t q_cnt_4;
/* defines for EQ stats */
#define EQ_max_eqe q_cnt_1
#define EQ_no_entry q_cnt_2
#define EQ_badstate q_cnt_3
#define EQ_processed q_cnt_4
/* defines for CQ stats */
#define CQ_mbox q_cnt_1
#define CQ_max_cqe q_cnt_1
#define CQ_release_wqe q_cnt_2
#define CQ_xri_aborted q_cnt_3
#define CQ_wq q_cnt_4
/* defines for WQ stats */
#define WQ_overflow q_cnt_1
#define WQ_posted q_cnt_4
/* defines for RQ stats */
#define RQ_no_posted_buf q_cnt_1
#define RQ_no_buf_found q_cnt_2
#define RQ_buf_trunc q_cnt_3
#define RQ_rcv_buf q_cnt_4
union sli4_qe qe[1]; /* array to index entries (must be last) */
};
@ -350,6 +373,7 @@ struct lpfc_hba;
struct lpfc_fcp_eq_hdl {
uint32_t idx;
struct lpfc_hba *phba;
atomic_t fcp_eq_in_use;
};
/* Port Capabilities for SLI4 Parameters */
@ -407,6 +431,8 @@ struct lpfc_sli4_lnk_info {
uint8_t lnk_no;
};
#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@ -463,20 +489,23 @@ struct lpfc_sli4_hba {
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
uint32_t cfg_eqn;
uint32_t msix_vec_nr;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue **fp_eq; /* Fast-path event queue */
struct lpfc_queue *sp_eq; /* Slow-path event queue */
struct lpfc_queue **hba_eq;/* Event queues for HBA */
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
uint16_t *fcp_cq_map;
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
/* Setup information for various queue parameters */
int eq_esize;

Просмотреть файл

@ -18,11 +18,16 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.32"
#define LPFC_DRIVER_VERSION "8.3.34"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
/* Used for SLI4 */
#define LPFC_DRIVER_HANDLER_NAME "lpfc:"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved."

Просмотреть файл

@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
#define MEGASAS_VERSION "00.00.06.15-rc1"
#define MEGASAS_RELDATE "Mar. 19, 2012"
#define MEGASAS_EXT_VERSION "Mon. Mar. 19 17:00:00 PDT 2012"
#define MEGASAS_VERSION "00.00.06.18-rc1"
#define MEGASAS_RELDATE "Jun. 17, 2012"
#define MEGASAS_EXT_VERSION "Tue. Jun. 17 17:00:00 PDT 2012"
/*
* Device IDs
@ -747,6 +747,7 @@ struct megasas_ctrl_info {
#define MEGASAS_RESET_NOTICE_INTERVAL 5
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
/*
* FW reports the maximum of number of commands that it can accept (maximum
@ -1364,6 +1365,7 @@ struct megasas_instance {
unsigned long bar;
long reset_flags;
struct mutex reset_mutex;
int throttlequeuedepth;
};
enum {

Просмотреть файл

@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
* Version : v00.00.06.15-rc1
* Version : v00.00.06.18-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@ -71,6 +71,16 @@ static int msix_disable;
module_param(msix_disable, int, S_IRUGO);
MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
module_param(throttlequeuedepth, int, S_IRUGO);
MODULE_PARM_DESC(throttlequeuedepth,
"Adapter queue depth when throttled due to I/O timeout. Default: 16");
int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, S_IRUGO);
MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
"before resetting adapter. Default: 180");
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
@ -1595,8 +1605,9 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
{
unsigned long flags;
if (instance->flag & MEGASAS_FW_BUSY
&& time_after(jiffies, instance->last_time + 5 * HZ)
&& atomic_read(&instance->fw_outstanding) < 17) {
&& time_after(jiffies, instance->last_time + 5 * HZ)
&& atomic_read(&instance->fw_outstanding) <
instance->throttlequeuedepth + 1) {
spin_lock_irqsave(instance->host->host_lock, flags);
instance->flag &= ~MEGASAS_FW_BUSY;
@ -1772,7 +1783,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
return SUCCESS;
}
for (i = 0; i < wait_time; i++) {
for (i = 0; i < resetwaittime; i++) {
int outstanding = atomic_read(&instance->fw_outstanding);
@ -1914,7 +1925,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
/* FW is busy, throttle IO */
spin_lock_irqsave(instance->host->host_lock, flags);
instance->host->can_queue = 16;
instance->host->can_queue = instance->throttlequeuedepth;
instance->last_time = jiffies;
instance->flag |= MEGASAS_FW_BUSY;
@ -3577,6 +3588,24 @@ static int megasas_init_fw(struct megasas_instance *instance)
kfree(ctrl_info);
/* Check for valid throttlequeuedepth module parameter */
if (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY ||
instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) {
if (throttlequeuedepth > (instance->max_fw_cmds -
MEGASAS_SKINNY_INT_CMDS))
instance->throttlequeuedepth =
MEGASAS_THROTTLE_QUEUE_DEPTH;
else
instance->throttlequeuedepth = throttlequeuedepth;
} else {
if (throttlequeuedepth > (instance->max_fw_cmds -
MEGASAS_INT_CMDS))
instance->throttlequeuedepth =
MEGASAS_THROTTLE_QUEUE_DEPTH;
else
instance->throttlequeuedepth = throttlequeuedepth;
}
/*
* Setup tasklet for cmd completion
*/

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше