Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (84 commits)
  [SCSI] be2iscsi: SGE Len == 64K
  [SCSI] be2iscsi: Remove premature free of cid
  [SCSI] be2iscsi: More time for FW
  [SCSI] libsas: fix bug for vacant phy
  [SCSI] sd: Fix overflow with big physical blocks
  [SCSI] st: add MTWEOFI to write filemarks without flushing drive buffer
  [SCSI] libsas: Don't issue commands to devices that have been hot-removed
  [SCSI] megaraid_sas: Add Online Controller Reset to MegaRAID SAS drive
  [SCSI] lpfc 8.3.17: Update lpfc driver version to 8.3.17
  [SCSI] lpfc 8.3.17: Replace function reset methodology
  [SCSI] lpfc 8.3.17: SCSI fixes
  [SCSI] lpfc 8.3.17: BSG fixes
  [SCSI] lpfc 8.3.17: SLI Additions and Fixes
  [SCSI] lpfc 8.3.17: Code Cleanup and Locking fixes
  [SCSI] zfcp: Remove scsi_cmnd->serial_number from debug traces
  [SCSI] ipr: fix array error logging
  [SCSI] aha152x: enable PCMCIA on 64bit
  [SCSI] scsi_dh_alua: Handle all states correctly
  [SCSI] cxgb4i: connection and ddp setting update
  [SCSI] cxgb3i: fixed connection over vlan
  ...
This commit is contained in:
Linus Torvalds 2010-10-22 17:34:15 -07:00
Родитель 80c226fbef 58ff4bd042
Коммит c70b5296e7
306 изменённых файлов: 39258 добавлений и 45052 удалений

Просмотреть файл

@ -2,7 +2,7 @@ This file contains brief information about the SCSI tape driver.
The driver is currently maintained by Kai Mäkisara (email
Kai.Makisara@kolumbus.fi)
Last modified: Sun Feb 24 21:59:07 2008 by kai.makisara
Last modified: Sun Aug 29 18:25:47 2010 by kai.makisara
BASICS
@ -85,6 +85,17 @@ writing and the last operation has been a write. Two filemarks can be
optionally written. In both cases end of data is signified by
returning zero bytes for two consecutive reads.
Writing filemarks without the immediate bit set in the SCSI command block acts
as a synchronization point, i.e., all remaining data form the drive buffers is
written to tape before the command returns. This makes sure that write errors
are caught at that point, but this takes time. In some applications, several
consecutive files must be written fast. The MTWEOFI operation can be used to
write the filemarks without flushing the drive buffer. Writing filemark at
close() is always flushing the drive buffers. However, if the previous
operation is MTWEOFI, close() does not write a filemark. This can be used if
the program wants to close/open the tape device between files and wants to
skip waiting.
If rewind, offline, bsf, or seek is done and previous tape operation was
write, a filemark is written before moving tape.
@ -301,6 +312,8 @@ MTBSR Space backward over count records.
MTFSS Space forward over count setmarks.
MTBSS Space backward over count setmarks.
MTWEOF Write count filemarks.
MTWEOFI Write count filemarks with immediate bit set (i.e., does not
wait until data is on tape)
MTWSM Write count setmarks.
MTREW Rewind tape.
MTOFFL Set device off line (often rewind plus eject).

Просмотреть файл

@ -5945,8 +5945,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
goto out;
mem = kmalloc(iocpage2sz, GFP_KERNEL);
if (!mem)
if (!mem) {
rc = -ENOMEM;
goto out;
}
memcpy(mem, (u8 *)pIoc2, iocpage2sz);
ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;

Просмотреть файл

@ -2,7 +2,8 @@
# Makefile for the S/390 specific device drivers
#
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
zfcp_unit.o
obj-$(CONFIG_ZFCP) += zfcp.o

Просмотреть файл

@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
struct ccw_device *cdev;
struct zfcp_adapter *adapter;
struct zfcp_port *port;
struct zfcp_unit *unit;
cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
if (!cdev)
@ -72,17 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (!port)
goto out_port;
flush_work(&port->rport_work);
unit = zfcp_unit_enqueue(port, lun);
if (IS_ERR(unit))
goto out_unit;
zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
zfcp_erp_wait(adapter);
flush_work(&unit->scsi_work);
out_unit:
zfcp_unit_add(port, lun);
put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
@ -158,6 +151,9 @@ static int __init zfcp_module_init(void)
fc_attach_transport(&zfcp_transport_functions);
if (!zfcp_data.scsi_transport_template)
goto out_transport;
scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
retval = misc_register(&zfcp_cfdc_misc);
if (retval) {
@ -210,30 +206,6 @@ static void __exit zfcp_module_exit(void)
module_exit(zfcp_module_exit);
/**
* zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
* @port: pointer to port to search for unit
* @fcp_lun: FCP LUN to search for
*
* Returns: pointer to zfcp_unit or NULL
*/
struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
{
unsigned long flags;
struct zfcp_unit *unit;
read_lock_irqsave(&port->unit_list_lock, flags);
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
if (!get_device(&unit->dev))
unit = NULL;
read_unlock_irqrestore(&port->unit_list_lock, flags);
return unit;
}
read_unlock_irqrestore(&port->unit_list_lock, flags);
return NULL;
}
/**
* zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
* @adapter: pointer to adapter to search for port
@ -259,92 +231,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
return NULL;
}
/**
* zfcp_unit_release - dequeue unit
* @dev: pointer to device
*
* waits until all work is done on unit and removes it then from the unit->list
* of the associated port.
*/
static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
put_device(&unit->port->dev);
kfree(unit);
}
/**
* zfcp_unit_enqueue - enqueue unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: pointer to enqueued unit on success, ERR_PTR on error
*
* Sets up some unit internal structures and creates sysfs entry.
*/
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
int retval = -ENOMEM;
get_device(&port->dev);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
retval = -EEXIST;
goto err_out;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
goto err_out;
unit->port = port;
unit->fcp_lun = fcp_lun;
unit->dev.parent = &port->dev;
unit->dev.release = zfcp_unit_release;
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
goto err_out;
}
retval = -EINVAL;
INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
spin_lock_init(&unit->latencies.lock);
unit->latencies.write.channel.min = 0xFFFFFFFF;
unit->latencies.write.fabric.min = 0xFFFFFFFF;
unit->latencies.read.channel.min = 0xFFFFFFFF;
unit->latencies.read.fabric.min = 0xFFFFFFFF;
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
if (device_register(&unit->dev)) {
put_device(&unit->dev);
goto err_out;
}
if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
goto err_out_put;
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
return unit;
err_out_put:
device_unregister(&unit->dev);
err_out:
put_device(&port->dev);
return ERR_PTR(retval);
}
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
{
adapter->pool.erp_req =

Просмотреть файл

@ -46,8 +46,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
if (!adapter)
return 0;
zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccresu2", NULL);
zfcp_erp_wait(adapter);
@ -164,14 +163,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccsonl2", NULL);
zfcp_erp_wait(adapter);
flush_work(&adapter->scan_work);
zfcp_ccw_activate(cdev);
zfcp_ccw_adapter_put(adapter);
return 0;
}
@ -224,9 +216,8 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
break;
case CIO_OPER:
dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"ccnoti4", NULL);
break;

Просмотреть файл

@ -2,9 +2,10 @@
* zfcp device driver
*
* Userspace interface for accessing the
* Access Control Lists / Control File Data Channel
* Access Control Lists / Control File Data Channel;
* handling of response code and states for ports and LUNs.
*
* Copyright IBM Corporation 2008, 2009
* Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@ -261,3 +262,184 @@ struct miscdevice zfcp_cfdc_misc = {
.name = "zfcp_cfdc",
.fops = &zfcp_cfdc_fops,
};
/**
* zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
* @adapter: Adapter where the Access Control Table (ACT) changed
*
* After a change in the adapter ACT, check if access to any
* previously denied resources is now possible.
*/
void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
{
unsigned long flags;
struct zfcp_port *port;
struct scsi_device *sdev;
struct zfcp_scsi_dev *zfcp_sdev;
int status;
if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
return;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
status = atomic_read(&port->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
(status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"cfaac_1", NULL);
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
shost_for_each_device(sdev, port->adapter->scsi_host) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
(status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
zfcp_erp_lun_reopen(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED,
"cfaac_2", NULL);
}
}
static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
{
u16 subtable = table >> 16;
u16 rule = table & 0xffff;
const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
if (subtable && subtable < ARRAY_SIZE(act_type))
dev_warn(&adapter->ccw_device->dev,
"Access denied according to ACT rule type %s, "
"rule %d\n", act_type[subtable], rule);
}
/**
* zfcp_cfdc_port_denied - Process "access denied" for port
* @port: The port where the acces has been denied
* @qual: The FSF status qualifier for the access denied FSF status
*/
void zfcp_cfdc_port_denied(struct zfcp_port *port,
union fsf_status_qual *qual)
{
dev_warn(&port->adapter->ccw_device->dev,
"Access denied to port 0x%016Lx\n",
(unsigned long long)port->wwpn);
zfcp_act_eval_err(port->adapter, qual->halfword[0]);
zfcp_act_eval_err(port->adapter, qual->halfword[1]);
zfcp_erp_set_port_status(port,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
}
/**
* zfcp_cfdc_lun_denied - Process "access denied" for LUN
* @sdev: The SCSI device / LUN where the access has been denied
* @qual: The FSF status qualifier for the access denied FSF status
*/
void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
union fsf_status_qual *qual)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
"Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
}
/**
* zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
* @sdev: The LUN / SCSI device where sharing violation occurred
* @qual: The FSF status qualifier from the LUN sharing violation
*/
void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
union fsf_status_qual *qual)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
if (qual->word[0])
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
"LUN 0x%Lx on port 0x%Lx is already in "
"use by CSS%d, MIF Image ID %x\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn,
qual->fsf_queue_designator.cssid,
qual->fsf_queue_designator.hla);
else
zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
zfcp_erp_set_lun_status(sdev,
ZFCP_STATUS_COMMON_ERP_FAILED |
ZFCP_STATUS_COMMON_ACCESS_DENIED);
atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
}
/**
* zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
* @sdev: The SCSI device / LUN where to evaluate the status
* @bottom: The qtcb bottom with the status from the "open lun"
*
* Returns: 0 if LUN is usable, -EACCES if the access control table
* reports an unsupported configuration.
*/
int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
struct fsf_qtcb_bottom_support *bottom)
{
int shared, rw;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
!(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
zfcp_ccw_priv_sch(adapter))
return 0;
shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
if (shared)
atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
if (!rw) {
atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
"0x%016Lx on port 0x%016Lx opened read-only\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
}
if (!shared && !rw) {
dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
"not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
return -EACCES;
}
if (shared && rw) {
dev_err(&adapter->ccw_device->dev,
"Shared read-write access not supported "
"(LUN 0x%016Lx, port 0x%016Lx)\n",
zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
return -EACCES;
}
return 0;
}

Просмотреть файл

@ -154,7 +154,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
if (scsi_cmnd) {
response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
response->u.fcp.serial = scsi_cmnd->serial_number;
response->u.fcp.data_dir =
qtcb->bottom.io.data_direction;
}
@ -330,7 +329,6 @@ static void zfcp_dbf_hba_view_response(char **p,
break;
zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
*p += sprintf(*p, "\n");
break;
@ -482,7 +480,7 @@ static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us);
zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls);
break;
case ZFCP_REC_DBF_ID_ACTION:
zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
@ -600,19 +598,20 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
}
/**
* zfcp_dbf_rec_unit - trace event for unit state change
* zfcp_dbf_rec_lun - trace event for LUN state change
* @id: identifier for trigger of state change
* @ref: additional reference (e.g. request)
* @unit: unit
* @sdev: SCSI device
*/
void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev)
{
struct zfcp_port *port = unit->port;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port = zfcp_sdev->port;
struct zfcp_dbf *dbf = port->adapter->dbf;
zfcp_dbf_rec_target(id, ref, dbf, &unit->status,
&unit->erp_counter, port->wwpn, port->d_id,
unit->fcp_lun);
zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status,
&zfcp_sdev->erp_counter, port->wwpn, port->d_id,
zfcp_scsi_dev_lun(sdev));
}
/**
@ -624,11 +623,11 @@ void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
* @action: address of error recovery action struct
* @adapter: adapter
* @port: port
* @unit: unit
* @sdev: SCSI device
*/
void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
struct zfcp_adapter *adapter, struct zfcp_port *port,
struct zfcp_unit *unit)
struct scsi_device *sdev)
{
struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
@ -647,9 +646,10 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
r->u.trigger.ps = atomic_read(&port->status);
r->u.trigger.wwpn = port->wwpn;
}
if (unit)
r->u.trigger.us = atomic_read(&unit->status);
r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
if (sdev)
r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status);
r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) :
ZFCP_DBF_INVALID_LUN;
debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
@ -879,7 +879,6 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
}
rec->scsi_result = scsi_cmnd->result;
rec->scsi_cmnd = (unsigned long)scsi_cmnd;
rec->scsi_serial = scsi_cmnd->serial_number;
memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
min((int)scsi_cmnd->cmd_len,
ZFCP_DBF_SCSI_OPCODE));
@ -948,7 +947,6 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial);
zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
0, ZFCP_DBF_SCSI_OPCODE);
zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);

Просмотреть файл

@ -60,7 +60,7 @@ struct zfcp_dbf_rec_record_trigger {
u8 need;
u32 as;
u32 ps;
u32 us;
u32 ls;
u64 ref;
u64 action;
u64 wwpn;
@ -110,7 +110,6 @@ struct zfcp_dbf_hba_record_response {
union {
struct {
u64 cmnd;
u64 serial;
u32 data_dir;
} fcp;
struct {
@ -206,7 +205,6 @@ struct zfcp_dbf_scsi_record {
u32 scsi_lun;
u32 scsi_result;
u64 scsi_cmnd;
u64 scsi_serial;
#define ZFCP_DBF_SCSI_OPCODE 16
u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
u8 scsi_retries;
@ -350,16 +348,16 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
/**
* zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
* @tag: tag indicating success or failure of reset operation
* @scmnd: SCSI command which caused this error recovery
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
* @unit: unit that needs reset
* @scsi_cmnd: SCSI command which caused this error recovery
*/
static inline
void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
struct scsi_cmnd *scsi_cmnd)
void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
}
#endif /* ZFCP_DBF_H */

Просмотреть файл

@ -85,8 +85,8 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
/* logical unit status */
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
#define ZFCP_STATUS_LUN_SHARED 0x00000004
#define ZFCP_STATUS_LUN_READONLY 0x00000008
/* FSF request status (this does not have a common part) */
#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
@ -118,7 +118,7 @@ struct zfcp_erp_action {
int action; /* requested action code */
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
struct zfcp_unit *unit;
struct scsi_device *sdev;
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
unsigned long fsf_req_id;
@ -219,20 +219,65 @@ struct zfcp_port {
unsigned int starget_id;
};
/**
* struct zfcp_unit - LUN configured via zfcp sysfs
* @dev: struct device for sysfs representation and reference counting
* @list: entry in LUN/unit list per zfcp_port
* @port: reference to zfcp_port where this LUN is configured
* @fcp_lun: 64 bit LUN value
* @scsi_work: for running scsi_scan_target
*
* This is the representation of a LUN that has been configured for
* usage. The main data here is the 64 bit LUN value, data for
* running I/O and recovery is in struct zfcp_scsi_dev.
*/
struct zfcp_unit {
struct device dev;
struct list_head list; /* list of logical units */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
u64 fcp_lun; /* own FCP_LUN */
u32 handle; /* handle assigned by FSF */
struct scsi_device *device; /* scsi device struct pointer */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
struct zfcp_latencies latencies;
struct device dev;
struct list_head list;
struct zfcp_port *port;
u64 fcp_lun;
struct work_struct scsi_work;
};
/**
* struct zfcp_scsi_dev - zfcp data per SCSI device
* @status: zfcp internal status flags
* @lun_handle: handle from "open lun" for issuing FSF requests
* @erp_action: zfcp erp data for opening and recovering this LUN
* @erp_counter: zfcp erp counter for this LUN
* @latencies: FSF channel and fabric latencies
* @port: zfcp_port where this LUN belongs to
*/
struct zfcp_scsi_dev {
atomic_t status;
u32 lun_handle;
struct zfcp_erp_action erp_action;
atomic_t erp_counter;
struct zfcp_latencies latencies;
struct zfcp_port *port;
};
/**
* sdev_to_zfcp - Access zfcp LUN data for SCSI device
* @sdev: scsi_device where to get the zfcp_scsi_dev pointer
*/
static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
{
return scsi_transport_device_data(sdev);
}
/**
* zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
* @sdev: SCSI device where to get the LUN from
*/
static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
{
u64 fcp_lun;
int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
return fcp_lun;
}
/**
* struct zfcp_fsf_req - basic FSF request structure
* @list: list of FSF requests
@ -249,7 +294,6 @@ struct zfcp_unit {
* @erp_action: reference to erp action if request issued on behalf of ERP
* @pool: reference to memory pool if used for this request
* @issued: time when request was send (STCK)
* @unit: reference to unit if this request is a SCSI request
* @handler: handler which should be called to process response
*/
struct zfcp_fsf_req {
@ -267,7 +311,6 @@ struct zfcp_fsf_req {
struct zfcp_erp_action *erp_action;
mempool_t *pool;
unsigned long long issued;
struct zfcp_unit *unit;
void (*handler)(struct zfcp_fsf_req *);
};
@ -282,9 +325,4 @@ struct zfcp_data {
struct kmem_cache *adisc_cache;
};
/********************** ZFCP SPECIFIC DEFINES ********************************/
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
#endif /* ZFCP_DEF_H */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -15,12 +15,10 @@
#include "zfcp_fc.h"
/* zfcp_aux.c */
extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
extern void zfcp_device_unregister(struct device *,
@ -36,6 +34,14 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
/* zfcp_cfdc.c */
extern struct miscdevice zfcp_cfdc_misc;
extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
union fsf_status_qual *);
extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
struct fsf_qtcb_bottom_support *);
extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
/* zfcp_dbf.c */
extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
@ -44,10 +50,10 @@ extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *);
extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
struct zfcp_adapter *, struct zfcp_port *,
struct zfcp_unit *);
struct scsi_device *);
extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
struct zfcp_dbf *);
@ -65,34 +71,26 @@ extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
unsigned long);
/* zfcp_erp.c */
extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
void *, u32, int);
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
void *);
extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *);
extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32,
int);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
void *);
extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *);
extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32,
int);
extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *);
extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *);
extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
void *);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
@ -118,8 +116,8 @@ extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_config *);
@ -135,12 +133,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
mempool_t *, unsigned int);
extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
struct scsi_cmnd *);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
struct zfcp_unit *);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
@ -163,8 +159,6 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_scan(struct zfcp_unit *);
extern void zfcp_scsi_scan_work(struct work_struct *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
@ -175,4 +169,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
/* zfcp_unit.c */
extern int zfcp_unit_add(struct zfcp_port *, u64);
extern int zfcp_unit_remove(struct zfcp_port *, u64);
extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
#endif /* ZFCP_EXT_H */

Просмотреть файл

@ -365,7 +365,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
}
if (!port->d_id) {
zfcp_erp_port_failed(port, "fcgpn_2", NULL);
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
goto out;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
unsigned long long now, span;
int used;
spin_lock(&qdio->stat_lock);
now = get_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
spin_unlock(&qdio->stat_lock);
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
/* cleanup all SBALs being program-owned now */
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
spin_lock_irq(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock_irq(&qdio->stat_lock);
atomic_add(count, &qdio->req_q_free);
wake_up(&qdio->req_q_wq);
}
@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
spin_lock_bh(&qdio->req_q_lock);
spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
spin_unlock_bh(&qdio->req_q_lock);
spin_unlock_irq(&qdio->req_q_lock);
return 0;
}
@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
spin_unlock_bh(&qdio->req_q_lock);
spin_unlock_irq(&qdio->req_q_lock);
ret = wait_event_interruptible_timeout(qdio->req_q_wq,
zfcp_qdio_sbal_check(qdio), 5 * HZ);
@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
}
spin_lock_bh(&qdio->req_q_lock);
spin_lock_irq(&qdio->req_q_lock);
return -EIO;
}
@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
int retval;
u8 sbal_number = q_req->sbal_number;
spin_lock(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock(&qdio->stat_lock);
retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
q_req->sbal_first, sbal_number);
@ -328,9 +330,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
spin_lock_bh(&qdio->req_q_lock);
spin_lock_irq(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_bh(&qdio->req_q_lock);
spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq);

Просмотреть файл

@ -49,11 +49,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
return sdev->queue_depth;
}
static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
unit->device = NULL;
put_device(&unit->dev);
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@ -78,23 +79,16 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
void (*done) (struct scsi_cmnd *))
{
struct zfcp_unit *unit;
struct zfcp_adapter *adapter;
int status, scsi_result, ret;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
int status, scsi_result, ret;
/* reset the status for this request */
scpnt->result = 0;
scpnt->host_scribble = NULL;
scpnt->scsi_done = done;
/*
* figure out adapter and target device
* (stored there by zfcp_scsi_slave_alloc)
*/
adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
unit = scpnt->device->hostdata;
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
@ -103,11 +97,11 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return 0;
}
status = atomic_read(&unit->status);
status = atomic_read(&zfcp_sdev->status);
if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
!(atomic_read(&unit->port->status) &
!(atomic_read(&zfcp_sdev->port->status) &
ZFCP_STATUS_COMMON_ERP_FAILED)) {
/* only unit access denied, but port is good
/* only LUN access denied, but port is good
* not covered by FC transport, have to fail here */
zfcp_scsi_command_fail(scpnt, DID_ERROR);
return 0;
@ -115,8 +109,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
/* This could be either
* open unit pending: this is temporary, will result in
* open unit or ERP_FAILED, so retry command
* open LUN pending: this is temporary, will result in
* open LUN or ERP_FAILED, so retry command
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
@ -124,7 +118,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return 0;
}
ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
ret = zfcp_fsf_fcp_cmnd(scpnt);
if (unlikely(ret == -EBUSY))
return SCSI_MLQUEUE_DEVICE_BUSY;
else if (unlikely(ret < 0))
@ -133,45 +127,42 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
return ret;
}
static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
unsigned int id, u64 lun)
static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
{
unsigned long flags;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sdev->host->hostdata[0];
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
struct zfcp_port *port;
struct zfcp_unit *unit = NULL;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if (!port->rport || (id != port->rport->scsi_target_id))
continue;
unit = zfcp_get_unit_by_lun(port, lun);
if (unit)
break;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return unit;
}
static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
{
struct zfcp_adapter *adapter;
struct zfcp_unit *unit;
u64 lun;
adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
if (!adapter)
goto out;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
unit = zfcp_unit_lookup(adapter, sdp->id, lun);
if (unit) {
sdp->hostdata = unit;
unit->device = sdp;
return 0;
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
put_device(&port->dev);
return -ENXIO;
}
out:
return -ENXIO;
zfcp_sdev->port = port;
zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
spin_lock_init(&zfcp_sdev->latencies.lock);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
zfcp_erp_wait(port->adapter);
return 0;
}
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
struct Scsi_Host *scsi_host = scpnt->device->host;
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
@ -203,7 +193,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
write_unlock_irqrestore(&adapter->abort_lock, flags);
while (retry--) {
abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit);
abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
if (abrt_req)
break;
@ -238,14 +228,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags);
fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
if (fsf_req)
break;
@ -256,7 +246,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
return SUCCESS;
}
}
@ -266,10 +256,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
} else
zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
zfcp_fsf_req_free(fsf_req);
return retval;
@ -287,8 +277,8 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
@ -319,8 +309,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
}
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 1;
adapter->scsi_host->max_lun = 1;
adapter->scsi_host->max_id = 511;
adapter->scsi_host->max_lun = 0xFFFFFFFF;
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
@ -534,20 +524,6 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
}
}
static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
list_for_each_entry(unit, &port->unit_list, list) {
get_device(&unit->dev);
if (scsi_queue_work(port->adapter->scsi_host,
&unit->scsi_work) <= 0)
put_device(&unit->dev);
}
read_unlock_irq(&port->unit_list_lock);
}
static void zfcp_scsi_rport_register(struct zfcp_port *port)
{
struct fc_rport_identifiers ids;
@ -574,7 +550,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
port->rport = rport;
port->starget_id = rport->scsi_target_id;
zfcp_scsi_queue_unit_register(port);
zfcp_unit_queue_scsi_scan(port);
}
static void zfcp_scsi_rport_block(struct zfcp_port *port)
@ -637,29 +613,6 @@ void zfcp_scsi_rport_work(struct work_struct *work)
put_device(&port->dev);
}
/**
* zfcp_scsi_scan - Register LUN with SCSI midlayer
* @unit: The LUN/unit to register
*/
void zfcp_scsi_scan(struct zfcp_unit *unit)
{
struct fc_rport *rport = unit->port->rport;
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
}
void zfcp_scsi_scan_work(struct work_struct *work)
{
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
scsi_work);
zfcp_scsi_scan(unit);
put_device(&unit->dev);
}
/**
* zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
* @adapter: The adapter where to configure DIF/DIX for the SCSI host
@ -735,7 +688,6 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_port_type = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.disable_target_scan = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};

Просмотреть файл

@ -68,63 +68,96 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
atomic_read(&unit->status));
zfcp_unit_sdev_status(unit));
ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
(atomic_read(&unit->status) &
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(atomic_read(&unit->status) &
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_SHARED) != 0);
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_LUN_SHARED) != 0);
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_READONLY) != 0);
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_LUN_READONLY) != 0);
#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \
static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
return sprintf(buf, "1\n"); \
else \
return sprintf(buf, "0\n"); \
} \
static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
unsigned long val; \
int retval = 0; \
\
if (!(_feat && get_device(&_feat->dev))) \
return -EBUSY; \
\
if (strict_strtoul(buf, 0, &val) || val != 0) { \
retval = -EINVAL; \
goto out; \
} \
\
zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
put_device(&_feat->dev); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
zfcp_sysfs_##_feat##_failed_show, \
zfcp_sysfs_##_feat##_failed_store);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
return sprintf(buf, "1\n");
return sprintf(buf, "0\n");
}
static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
unsigned long val;
if (strict_strtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
NULL);
zfcp_erp_wait(port->adapter);
return count;
}
static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_port_failed_show,
zfcp_sysfs_port_failed_store);
static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
struct scsi_device *sdev;
unsigned int status, failed = 1;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
status = atomic_read(&sdev_to_zfcp(sdev)->status);
failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
scsi_device_put(sdev);
}
return sprintf(buf, "%d\n", failed);
}
static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
unsigned long val;
struct scsi_device *sdev;
if (strict_strtoul(buf, 0, &val) || val != 0)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
"syufai2", NULL);
zfcp_erp_wait(unit->port->adapter);
} else
zfcp_unit_scsi_scan(unit);
return count;
}
static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
zfcp_sysfs_unit_failed_show,
zfcp_sysfs_unit_failed_store);
static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
struct device_attribute *attr,
@ -163,8 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
goto out;
}
zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"syafai2", NULL);
zfcp_erp_wait(adapter);
@ -257,28 +289,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
goto out;
return -EINVAL;
unit = zfcp_unit_enqueue(port, fcp_lun);
if (IS_ERR(unit))
goto out;
else
retval = 0;
if (zfcp_unit_add(port, fcp_lun))
return -EINVAL;
zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
zfcp_erp_wait(unit->port->adapter);
zfcp_scsi_scan(unit);
out:
put_device(&port->dev);
return retval ? retval : (ssize_t) count;
return count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@ -287,42 +306,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
struct scsi_device *sdev;
if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
goto out;
return -EINVAL;
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (!unit)
goto out;
else
retval = 0;
if (zfcp_unit_remove(port, fcp_lun))
return -EINVAL;
sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
port->starget_id,
scsilun_to_int((struct scsi_lun *)&fcp_lun));
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
write_lock_irq(&port->unit_list_lock);
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
put_device(&unit->dev);
zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
out:
put_device(&port->dev);
return retval ? retval : (ssize_t) count;
return count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
@ -363,9 +355,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) { \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_unit *unit = sdev->hostdata; \
struct zfcp_latencies *lat = &unit->latencies; \
struct zfcp_adapter *adapter = unit->port->adapter; \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \
unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \
\
spin_lock_bh(&lat->lock); \
@ -394,8 +386,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \
const char *buf, size_t count) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_unit *unit = sdev->hostdata; \
struct zfcp_latencies *lat = &unit->latencies; \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_latencies *lat = &zfcp_sdev->latencies; \
unsigned long flags; \
\
spin_lock_irqsave(&lat->lock, flags); \
@ -423,19 +415,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_unit *unit = sdev->hostdata; \
struct scsi_device *sdev = to_scsi_device(dev); \
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \
struct zfcp_port *port = zfcp_sdev->port; \
\
return sprintf(buf, _format, _value); \
} \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
dev_name(&unit->port->adapter->ccw_device->dev));
dev_name(&port->adapter->ccw_device->dev));
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
(unsigned long long) unit->port->wwpn);
ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
(unsigned long long) unit->fcp_lun);
(unsigned long long) port->wwpn);
static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
}
static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,

Просмотреть файл

@ -0,0 +1,244 @@
/*
* zfcp device driver
*
* Tracking of manually configured LUNs and helper functions to
* register the LUNs with the SCSI midlayer.
*
* Copyright IBM Corporation 2010
*/
#include "zfcp_def.h"
#include "zfcp_ext.h"
/**
* zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
* @unit: The zfcp LUN/unit to register
*
* When the SCSI midlayer is not allowed to automatically scan and
* attach SCSI devices, zfcp has to register the single devices with
* the SCSI midlayer.
*/
void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
{
struct fc_rport *rport = unit->port->rport;
unsigned int lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
}
static void zfcp_unit_scsi_scan_work(struct work_struct *work)
{
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
scsi_work);
zfcp_unit_scsi_scan(unit);
put_device(&unit->dev);
}
/**
* zfcp_unit_queue_scsi_scan - Register configured units on port
* @port: The zfcp_port where to register units
*
* After opening a port, all units configured on this port have to be
* registered with the SCSI midlayer. This function should be called
* after calling fc_remote_port_add, so that the fc_rport is already
* ONLINE and the call to scsi_scan_target runs the same way as the
* call in the FC transport class.
*/
void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
list_for_each_entry(unit, &port->unit_list, list) {
get_device(&unit->dev);
if (scsi_queue_work(port->adapter->scsi_host,
&unit->scsi_work) <= 0)
put_device(&unit->dev);
}
read_unlock_irq(&port->unit_list_lock);
}
static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
get_device(&unit->dev);
return unit;
}
return NULL;
}
/**
* zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
* @port: zfcp_port where to look for the unit
* @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
*
* If zfcp_unit is found, a reference is acquired that has to be
* released later.
*
* Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
* with the specified FCP LUN.
*/
struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
read_lock_irq(&port->unit_list_lock);
unit = _zfcp_unit_find(port, fcp_lun);
read_unlock_irq(&port->unit_list_lock);
return unit;
}
/**
* zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
* @dev: pointer to device in zfcp_unit
*/
static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
put_device(&unit->port->dev);
kfree(unit);
}
/**
* zfcp_unit_enqueue - enqueue unit to unit list of a port.
* @port: pointer to port where unit is added
* @fcp_lun: FCP LUN of unit to be enqueued
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.
*/
int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
unit = zfcp_unit_find(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
return -EEXIST;
}
unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
if (!unit)
return -ENOMEM;
unit->port = port;
unit->fcp_lun = fcp_lun;
unit->dev.parent = &port->dev;
unit->dev.release = zfcp_unit_release;
INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
return -ENOMEM;
}
if (device_register(&unit->dev)) {
put_device(&unit->dev);
return -ENOMEM;
}
if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
device_unregister(&unit->dev);
return -EINVAL;
}
get_device(&port->dev);
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
zfcp_unit_scsi_scan(unit);
return 0;
}
/**
* zfcp_unit_sdev - Return SCSI device for zfcp_unit
* @unit: The zfcp_unit where to get the SCSI device for
*
* Returns: scsi_device pointer on success, NULL if there is no SCSI
* device for this zfcp_unit
*
* On success, the caller also holds a reference to the SCSI device
* that must be released with scsi_device_put.
*/
struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
unsigned int lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
port = unit->port;
shost = port->adapter->scsi_host;
return scsi_device_lookup(shost, 0, port->starget_id, lun);
}
/**
* zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
* @unit: The unit to lookup the SCSI device for
*
* Returns the zfcp LUN status field of the SCSI device if the SCSI device
* for the zfcp_unit exists, 0 otherwise.
*/
unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
{
unsigned int status = 0;
struct scsi_device *sdev;
struct zfcp_scsi_dev *zfcp_sdev;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
scsi_device_put(sdev);
}
return status;
}
/**
* zfcp_unit_remove - Remove entry from list of configured units
* @port: The port where to remove the unit from the configuration
* @fcp_lun: The 64 bit LUN of the unit to remove
*
* Returns: -EINVAL if a unit with the specified LUN does not exist,
* 0 on success.
*/
int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
struct scsi_device *sdev;
write_lock_irq(&port->unit_list_lock);
unit = _zfcp_unit_find(port, fcp_lun);
if (unit)
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
if (!unit)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
put_device(&unit->dev);
zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
return 0;
}

Просмотреть файл

@ -316,7 +316,8 @@ config SCSI_ISCSI_ATTRS
config SCSI_SAS_ATTRS
tristate "SAS Transport Attributes"
depends on SCSI && BLK_DEV_BSG
depends on SCSI
select BLK_DEV_BSG
help
If you wish to export transport-specific information about
each attached SAS device to sysfs, say Y.
@ -378,7 +379,7 @@ config ISCSI_BOOT_SYSFS
via sysfs to userspace. If you wish to export this information,
say Y. Otherwise, say N.
source "drivers/scsi/cxgb3i/Kconfig"
source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"

Просмотреть файл

@ -133,7 +133,8 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o

Просмотреть файл

@ -190,7 +190,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Initialize the mutex used to wait for the next AIF.
*/
init_MUTEX_LOCKED(&fibctx->wait_sem);
sema_init(&fibctx->wait_sem, 0);
fibctx->wait = 0;
/*
* Initialize the fibs and set the count of fibs on

Просмотреть файл

@ -124,7 +124,7 @@ int aac_fib_setup(struct aac_dev * dev)
fibptr->hw_fib_va = hw_fib;
fibptr->data = (void *) fibptr->hw_fib_va->data;
fibptr->next = fibptr+1; /* Forward chain the fibs */
init_MUTEX_LOCKED(&fibptr->event_wait);
sema_init(&fibptr->event_wait, 0);
spin_lock_init(&fibptr->event_lock);
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);

Просмотреть файл

@ -878,8 +878,8 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
if (!error) {
if (acb->devstate[id][lun] == ARECA_RAID_GONE)
acb->devstate[id][lun] = ARECA_RAID_GOOD;
ccb->pcmd->result = DID_OK << 16;
arcmsr_ccb_complete(ccb);
ccb->pcmd->result = DID_OK << 16;
arcmsr_ccb_complete(ccb);
}else{
switch (ccb->arcmsr_cdb.DeviceStatus) {
case ARCMSR_DEV_SELECT_TIMEOUT: {

Просмотреть файл

@ -335,7 +335,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
if (ready)
break;
if (cnt > 6000000) {
if (cnt > 12000000) {
dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
return -EBUSY;
}

Просмотреть файл

@ -522,7 +522,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
phba->params.cxns_per_ctrl * 2)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
goto free_ep;
}
@ -559,7 +558,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
" status = %d extd_status = %d\n",
status, extd_status);
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
free_mcc_tag(&phba->ctrl, tag);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@ -574,7 +572,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_ep->cid_vld = 1;
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
}
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;

Просмотреть файл

@ -2040,7 +2040,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
unsigned int num_sg, struct beiscsi_io_task *io_task)
{
struct iscsi_sge *psgl;
unsigned short sg_len, index;
unsigned int sg_len, index;
unsigned int sge_len = 0;
unsigned long long addr;
struct scatterlist *l_sg;

Просмотреть файл

@ -1,15 +1,8 @@
obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
bfa-y += bfad_debugfs.o
bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
bfa-y += bfa_csdebug.o bfa_sm.o plog.o
bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
ccflags-y := -DBFA_PERF_BUILD

438
drivers/scsi/bfa/bfa.h Normal file
Просмотреть файл

@ -0,0 +1,438 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_H__
#define __BFA_H__
#include "bfa_os_inc.h"
#include "bfa_cs.h"
#include "bfa_plog.h"
#include "bfa_defs_svc.h"
#include "bfi.h"
#include "bfa_ioc.h"
struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
/**
* Interrupt message handlers
*/
void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
/**
* Request and response queue related defines
*/
#define BFA_REQQ_NELEMS_MIN (4)
#define BFA_RSPQ_NELEMS_MIN (4)
#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
#define bfa_reqq_ci(__bfa, __reqq) \
(*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
#define bfa_reqq_full(__bfa, __reqq) \
(((bfa_reqq_pi(__bfa, __reqq) + 1) & \
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
bfa_reqq_ci(__bfa, __reqq))
#define bfa_reqq_next(__bfa, __reqq) \
(bfa_reqq_full(__bfa, __reqq) ? NULL : \
((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+ bfa_reqq_pi((__bfa), (__reqq)))))
#define bfa_reqq_produce(__bfa, __reqq) do { \
(__bfa)->iocfc.req_cq_pi[__reqq]++; \
(__bfa)->iocfc.req_cq_pi[__reqq] &= \
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
(__bfa)->iocfc.req_cq_pi[__reqq]); \
mmiowb(); \
} while (0)
#define bfa_rspq_pi(__bfa, __rspq) \
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
#define bfa_rspq_elem(__bfa, __rspq, __ci) \
(&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
#define CQ_INCR(__index, __size) do { \
(__index)++; \
(__index) &= ((__size) - 1); \
} while (0)
/**
* Queue element to wait for room in request queue. FIFO order is
* maintained when fullfilling requests.
*/
struct bfa_reqq_wait_s {
struct list_head qe;
void (*qresume) (void *cbarg);
void *cbarg;
};
/**
* Circular queue usage assignments
*/
enum {
BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
BFA_REQQ_FCXP = 0, /* all FCXP messages */
BFA_REQQ_LPS = 0, /* all lport service msgs */
BFA_REQQ_PORT = 0, /* all port messages */
BFA_REQQ_FLASH = 0, /* for flash module */
BFA_REQQ_DIAG = 0, /* for diag module */
BFA_REQQ_RPORT = 0, /* all port messages */
BFA_REQQ_SBOOT = 0, /* all san boot messages */
BFA_REQQ_QOS_LO = 1, /* all low priority IO */
BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
BFA_REQQ_QOS_HI = 3, /* all high priority IO */
};
static inline void
bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
void *cbarg)
{
wqe->qresume = qresume;
wqe->cbarg = cbarg;
}
#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
/**
* static inline void
* bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
*/
#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
\
struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
\
bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
\
list_add_tail(&(__wqe)->qe, waitq); \
} while (0)
#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
/**
* Generic BFA callback element.
*/
struct bfa_cb_qe_s {
struct list_head qe;
bfa_cb_cbfn_t cbfn;
bfa_boolean_t once;
u32 rsvd;
void *cbarg;
};
#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
(__hcb_qe)->cbfn = (__cbfn); \
(__hcb_qe)->cbarg = (__cbarg); \
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
} while (0)
#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
(__hcb_qe)->cbfn = (__cbfn); \
(__hcb_qe)->cbarg = (__cbarg); \
if (!(__hcb_qe)->once) { \
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
(__hcb_qe)->once = BFA_TRUE; \
} \
} while (0)
#define bfa_cb_queue_done(__hcb_qe) do { \
(__hcb_qe)->once = BFA_FALSE; \
} while (0)
/**
* PCI devices supported by the current BFA
*/
struct bfa_pciid_s {
u16 device_id;
u16 vendor_id;
};
extern char bfa_version[];
/**
* BFA memory resources
*/
enum bfa_mem_type {
BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */
BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */
BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
};
struct bfa_mem_elem_s {
enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
u32 mem_len; /* Total Length in Bytes */
u8 *kva; /* kernel virtual address */
u64 dma; /* dma address if DMA memory */
u8 *kva_curp; /* kva allocation cursor */
u64 dma_curp; /* dma allocation cursor */
};
struct bfa_meminfo_s {
struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
};
#define bfa_meminfo_kva(_m) \
((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
#define bfa_meminfo_dma_virt(_m) \
((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
#define bfa_meminfo_dma_phys(_m) \
((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
struct bfa_iocfc_regs_s {
bfa_os_addr_t intr_status;
bfa_os_addr_t intr_mask;
bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
};
/**
* MSIX vector handlers
*/
#define BFA_MSIX_MAX_VECTORS 22
typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
struct bfa_msix_s {
int nvecs;
bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
};
/**
* Chip specific interfaces
*/
struct bfa_hwif_s {
void (*hw_reginit)(struct bfa_s *bfa);
void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
void (*hw_msix_install)(struct bfa_s *bfa);
void (*hw_msix_uninstall)(struct bfa_s *bfa);
void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
u32 *nvecs, u32 *maxvec);
void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
u32 *end);
};
typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
struct bfa_iocfc_s {
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
int action;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
struct bfa_cb_qe_s init_hcb_qe;
struct bfa_cb_qe_s stop_hcb_qe;
struct bfa_cb_qe_s dis_hcb_qe;
struct bfa_cb_qe_s stats_hcb_qe;
bfa_boolean_t cfgdone;
struct bfa_dma_s cfg_info;
struct bfi_iocfc_cfg_s *cfginfo;
struct bfa_dma_s cfgrsp_dma;
struct bfi_iocfc_cfgrsp_s *cfgrsp;
struct bfi_iocfc_cfg_reply_s *cfg_reply;
struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
struct bfa_hwif_s hwif;
bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
void *updateq_cbarg; /* bios callback arg */
u32 intr_mask;
};
#define bfa_lpuid(__bfa) \
bfa_ioc_portid(&(__bfa)->ioc)
#define bfa_msix_init(__bfa, __nvecs) \
((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
#define bfa_msix_install(__bfa) \
((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
#define bfa_msix_uninstall(__bfa) \
((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
#define bfa_isr_mode_set(__bfa, __msix) \
((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
__nvecs, __maxvec))
#define bfa_msix_get_rme_range(__bfa, __start, __end) \
((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
#define bfa_msix(__bfa, __vec) \
((__bfa)->msix.handler[__vec](__bfa, __vec))
/*
* FC specific IOC functions.
*/
void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void bfa_iocfc_detach(struct bfa_s *bfa);
void bfa_iocfc_init(struct bfa_s *bfa);
void bfa_iocfc_start(struct bfa_s *bfa);
void bfa_iocfc_stop(struct bfa_s *bfa);
void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
void bfa_iocfc_reset_queues(struct bfa_s *bfa);
void bfa_msix_all(struct bfa_s *bfa, int vec);
void bfa_msix_reqq(struct bfa_s *bfa, int vec);
void bfa_msix_rspq(struct bfa_s *bfa, int vec);
void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
void bfa_hwcb_reginit(struct bfa_s *bfa);
void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwcb_msix_install(struct bfa_s *bfa);
void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
u32 *maxvec);
void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
u32 *end);
void bfa_hwct_reginit(struct bfa_s *bfa);
void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwct_msix_install(struct bfa_s *bfa);
void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
u32 *maxvec);
void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
u32 *end);
void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
struct bfa_boot_pbc_s *pbcfg);
int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
struct bfi_pbc_vport_s *pbc_vport);
/**
*----------------------------------------------------------------------
* BFA public interfaces
*----------------------------------------------------------------------
*/
#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++)
#define bfa_ioc_get_stats(__bfa, __ioc_stats) \
bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
#define bfa_ioc_clear_stats(__bfa) \
bfa_ioc_clr_stats(&(__bfa)->ioc)
#define bfa_get_nports(__bfa) \
bfa_ioc_get_nports(&(__bfa)->ioc)
#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
#define bfa_get_adapter_model(__bfa, __model) \
bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
#define bfa_get_ioc_state(__bfa) \
bfa_ioc_get_state(&(__bfa)->ioc)
#define bfa_get_type(__bfa) \
bfa_ioc_get_type(&(__bfa)->ioc)
#define bfa_get_mac(__bfa) \
bfa_ioc_get_mac(&(__bfa)->ioc)
#define bfa_get_mfg_mac(__bfa) \
bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
#define bfa_get_fw_clock_res(__bfa) \
((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo);
void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
void bfa_detach(struct bfa_s *bfa);
void bfa_init(struct bfa_s *bfa);
void bfa_start(struct bfa_s *bfa);
void bfa_stop(struct bfa_s *bfa);
void bfa_attach_fcs(struct bfa_s *bfa);
void bfa_cb_init(void *bfad, bfa_status_t status);
void bfa_cb_updateq(void *bfad, bfa_status_t status);
bfa_boolean_t bfa_intx(struct bfa_s *bfa);
void bfa_intx_disable(struct bfa_s *bfa);
void bfa_intx_enable(struct bfa_s *bfa);
void bfa_isr_enable(struct bfa_s *bfa);
void bfa_isr_disable(struct bfa_s *bfa);
void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
void bfa_adapter_get_attr(struct bfa_s *bfa,
struct bfa_adapter_attr_s *ad_attr);
u64 bfa_adapter_get_id(struct bfa_s *bfa);
bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
struct bfa_iocfc_intr_attr_s *attr);
void bfa_iocfc_enable(struct bfa_s *bfa);
void bfa_iocfc_disable(struct bfa_s *bfa);
void bfa_chip_reset(struct bfa_s *bfa);
void bfa_timer_tick(struct bfa_s *bfa);
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
/*
* BFA debug API functions
*/
bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
u32 *offset, int *buflen);
void bfa_debug_fwsave_clear(struct bfa_s *bfa);
bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
#endif /* __BFA_H__ */

Просмотреть файл

@ -1,57 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_CALLBACK_PRIV_H__
#define __BFA_CALLBACK_PRIV_H__
#include <cs/bfa_q.h>
typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
/**
* Generic BFA callback element.
*/
struct bfa_cb_qe_s {
struct list_head qe;
bfa_cb_cbfn_t cbfn;
bfa_boolean_t once;
u32 rsvd;
void *cbarg;
};
#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
(__hcb_qe)->cbfn = (__cbfn); \
(__hcb_qe)->cbarg = (__cbarg); \
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
} while (0)
#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
(__hcb_qe)->cbfn = (__cbfn); \
(__hcb_qe)->cbarg = (__cbarg); \
if (!(__hcb_qe)->once) { \
list_add_tail((__hcb_qe), &(__bfa)->comp_q); \
(__hcb_qe)->once = BFA_TRUE; \
} \
} while (0)
#define bfa_cb_queue_done(__hcb_qe) do { \
(__hcb_qe)->once = BFA_FALSE; \
} while (0)
#endif /* __BFA_CALLBACK_PRIV_H__ */

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,37 +15,25 @@
* General Public License for more details.
*/
/**
* bfa_cb_ioim_macros.h BFA IOIM driver interface macros.
*/
#ifndef __BFA_HCB_IOIM_MACROS_H__
#define __BFA_HCB_IOIM_MACROS_H__
#include <bfa_os_inc.h>
/*
* #include <linux/dma-mapping.h>
*
* #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include
* <scsi/scsi_device.h> #include <scsi/scsi_host.h>
*/
#include "bfad_im_compat.h"
#ifndef __BFA_HCB_IOIM_H__
#define __BFA_HCB_IOIM_H__
#include "bfa_os_inc.h"
/*
* task attribute values in FCP-2 FCP_CMND IU
*/
#define SIMPLE_Q 0
#define HEAD_OF_Q 1
#define ORDERED_Q 2
#define ACA_Q 4
#define ACA_Q 4
#define UNTAGGED 5
static inline lun_t
bfad_int_to_lun(u32 luno)
{
union {
u16 scsi_lun[4];
lun_t bfa_lun;
u16 scsi_lun[4];
lun_t bfa_lun;
} lun;
lun.bfa_lun = 0;
@ -141,7 +129,7 @@ static inline u8
bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
{
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
u8 task_attr = UNTAGGED;
u8 task_attr = UNTAGGED;
if (cmnd->device->tagged_supported) {
switch (cmnd->tag) {
@ -178,4 +166,4 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
*/
#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
#endif /* __BFA_HCB_IOIM_MACROS_H__ */
#endif /* __BFA_HCB_IOIM_H__ */

Просмотреть файл

@ -1,492 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <defs/bfa_defs_cee.h>
#include <cs/bfa_trc.h>
#include <cs/bfa_log.h>
#include <cs/bfa_debug.h>
#include <cee/bfa_cee.h>
#include <bfi/bfi_cee.h>
#include <bfi/bfi.h>
#include <bfa_ioc.h>
#include <cna/bfa_cna_trcmod.h>
BFA_TRC_FILE(CNA, CEE);
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg);
static void bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s
*dcbcx_stats);
static void bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s
*lldp_stats);
static void bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats);
static void bfa_cee_format_cee_cfg(void *buffer);
static void bfa_cee_format_cee_stats(void *buffer);
static void
bfa_cee_format_cee_stats(void *buffer)
{
struct bfa_cee_stats_s *cee_stats = buffer;
bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats);
bfa_cee_format_lldp_stats(&cee_stats->lldp_stats);
bfa_cee_format_cfg_stats(&cee_stats->cfg_stats);
}
static void
bfa_cee_format_cee_cfg(void *buffer)
{
struct bfa_cee_attr_s *cee_cfg = buffer;
bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
}
static void
bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats)
{
dcbcx_stats->subtlvs_unrecognized =
bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized);
dcbcx_stats->negotiation_failed =
bfa_os_ntohl(dcbcx_stats->negotiation_failed);
dcbcx_stats->remote_cfg_changed =
bfa_os_ntohl(dcbcx_stats->remote_cfg_changed);
dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received);
dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid);
dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno);
dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno);
dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno);
dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno);
}
static void
bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats)
{
lldp_stats->frames_transmitted =
bfa_os_ntohl(lldp_stats->frames_transmitted);
lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out);
lldp_stats->frames_discarded =
bfa_os_ntohl(lldp_stats->frames_discarded);
lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error);
lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd);
lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded);
lldp_stats->tlvs_unrecognized =
bfa_os_ntohl(lldp_stats->tlvs_unrecognized);
}
static void
bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats)
{
cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down);
cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up);
cfg_stats->cee_hw_cfg_changed =
bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed);
cfg_stats->recvd_invalid_cfg =
bfa_os_ntohl(cfg_stats->recvd_invalid_cfg);
}
static void
bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg)
{
lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval);
lldp_cfg->enabled_system_cap =
bfa_os_ntohs(lldp_cfg->enabled_system_cap);
}
/**
* bfa_cee_attr_meminfo()
*
*
* @param[in] void
*
* @return Size of DMA region
*/
static u32
bfa_cee_attr_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
}
/**
* bfa_cee_stats_meminfo()
*
*
* @param[in] void
*
* @return Size of DMA region
*/
static u32
bfa_cee_stats_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
}
/**
* bfa_cee_get_attr_isr()
*
*
* @param[in] cee - Pointer to the CEE module
* status - Return status from the f/w
*
* @return void
*/
static void
bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
{
cee->get_attr_status = status;
bfa_trc(cee, 0);
if (status == BFA_STATUS_OK) {
bfa_trc(cee, 0);
/*
* The requested data has been copied to the DMA area, *process
* it.
*/
memcpy(cee->attr, cee->attr_dma.kva,
sizeof(struct bfa_cee_attr_s));
bfa_cee_format_cee_cfg(cee->attr);
}
cee->get_attr_pending = BFA_FALSE;
if (cee->cbfn.get_attr_cbfn) {
bfa_trc(cee, 0);
cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
}
bfa_trc(cee, 0);
}
/**
* bfa_cee_get_attr_isr()
*
*
* @param[in] cee - Pointer to the CEE module
* status - Return status from the f/w
*
* @return void
*/
static void
bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
{
cee->get_stats_status = status;
bfa_trc(cee, 0);
if (status == BFA_STATUS_OK) {
bfa_trc(cee, 0);
/*
* The requested data has been copied to the DMA area, process
* it.
*/
memcpy(cee->stats, cee->stats_dma.kva,
sizeof(struct bfa_cee_stats_s));
bfa_cee_format_cee_stats(cee->stats);
}
cee->get_stats_pending = BFA_FALSE;
bfa_trc(cee, 0);
if (cee->cbfn.get_stats_cbfn) {
bfa_trc(cee, 0);
cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
}
bfa_trc(cee, 0);
}
/**
* bfa_cee_get_attr_isr()
*
*
* @param[in] cee - Pointer to the CEE module
* status - Return status from the f/w
*
* @return void
*/
static void
bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
{
cee->reset_stats_status = status;
cee->reset_stats_pending = BFA_FALSE;
if (cee->cbfn.reset_stats_cbfn)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
}
/**
* bfa_cee_meminfo()
*
*
* @param[in] void
*
* @return Size of DMA region
*/
u32
bfa_cee_meminfo(void)
{
return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
}
/**
* bfa_cee_mem_claim()
*
*
* @param[in] cee CEE module pointer
* dma_kva Kernel Virtual Address of CEE DMA Memory
* dma_pa Physical Address of CEE DMA Memory
*
* @return void
*/
void
bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
{
cee->attr_dma.kva = dma_kva;
cee->attr_dma.pa = dma_pa;
cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
cee->attr = (struct bfa_cee_attr_s *)dma_kva;
cee->stats =
(struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo());
}
/**
* bfa_cee_get_attr()
*
* Send the request to the f/w to fetch CEE attributes.
*
* @param[in] Pointer to the CEE module data structure.
*
* @return Status
*/
bfa_status_t
bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
{
struct bfi_cee_get_req_s *cmd;
bfa_assert((cee != NULL) && (cee->ioc != NULL));
bfa_trc(cee, 0);
if (!bfa_ioc_is_operational(cee->ioc)) {
bfa_trc(cee, 0);
return BFA_STATUS_IOC_FAILURE;
}
if (cee->get_attr_pending == BFA_TRUE) {
bfa_trc(cee, 0);
return BFA_STATUS_DEVBUSY;
}
cee->get_attr_pending = BFA_TRUE;
cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg;
cee->attr = attr;
cee->cbfn.get_attr_cbfn = cbfn;
cee->cbfn.get_attr_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
bfa_ioc_portid(cee->ioc));
bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
bfa_trc(cee, 0);
return BFA_STATUS_OK;
}
/**
* bfa_cee_get_stats()
*
* Send the request to the f/w to fetch CEE statistics.
*
* @param[in] Pointer to the CEE module data structure.
*
* @return Status
*/
bfa_status_t
bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
{
struct bfi_cee_get_req_s *cmd;
bfa_assert((cee != NULL) && (cee->ioc != NULL));
if (!bfa_ioc_is_operational(cee->ioc)) {
bfa_trc(cee, 0);
return BFA_STATUS_IOC_FAILURE;
}
if (cee->get_stats_pending == BFA_TRUE) {
bfa_trc(cee, 0);
return BFA_STATUS_DEVBUSY;
}
cee->get_stats_pending = BFA_TRUE;
cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg;
cee->stats = stats;
cee->cbfn.get_stats_cbfn = cbfn;
cee->cbfn.get_stats_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
bfa_ioc_portid(cee->ioc));
bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
bfa_trc(cee, 0);
return BFA_STATUS_OK;
}
/**
* bfa_cee_reset_stats()
*
*
* @param[in] Pointer to the CEE module data structure.
*
* @return Status
*/
bfa_status_t
bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn,
void *cbarg)
{
struct bfi_cee_reset_stats_s *cmd;
bfa_assert((cee != NULL) && (cee->ioc != NULL));
if (!bfa_ioc_is_operational(cee->ioc)) {
bfa_trc(cee, 0);
return BFA_STATUS_IOC_FAILURE;
}
if (cee->reset_stats_pending == BFA_TRUE) {
bfa_trc(cee, 0);
return BFA_STATUS_DEVBUSY;
}
cee->reset_stats_pending = BFA_TRUE;
cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg;
cee->cbfn.reset_stats_cbfn = cbfn;
cee->cbfn.reset_stats_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
bfa_ioc_portid(cee->ioc));
bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
bfa_trc(cee, 0);
return BFA_STATUS_OK;
}
/**
* bfa_cee_isrs()
*
*
* @param[in] Pointer to the CEE module data structure.
*
* @return void
*/
void
bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
{
union bfi_cee_i2h_msg_u *msg;
struct bfi_cee_get_rsp_s *get_rsp;
struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg;
msg = (union bfi_cee_i2h_msg_u *)m;
get_rsp = (struct bfi_cee_get_rsp_s *)m;
bfa_trc(cee, msg->mh.msg_id);
switch (msg->mh.msg_id) {
case BFI_CEE_I2H_GET_CFG_RSP:
bfa_trc(cee, get_rsp->cmd_status);
bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
break;
case BFI_CEE_I2H_GET_STATS_RSP:
bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
break;
case BFI_CEE_I2H_RESET_STATS_RSP:
bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
break;
default:
bfa_assert(0);
}
}
/**
* bfa_cee_hbfail()
*
*
* @param[in] Pointer to the CEE module data structure.
*
* @return void
*/
void
bfa_cee_hbfail(void *arg)
{
struct bfa_cee_s *cee;
cee = (struct bfa_cee_s *)arg;
if (cee->get_attr_pending == BFA_TRUE) {
cee->get_attr_status = BFA_STATUS_FAILED;
cee->get_attr_pending = BFA_FALSE;
if (cee->cbfn.get_attr_cbfn) {
cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
BFA_STATUS_FAILED);
}
}
if (cee->get_stats_pending == BFA_TRUE) {
cee->get_stats_status = BFA_STATUS_FAILED;
cee->get_stats_pending = BFA_FALSE;
if (cee->cbfn.get_stats_cbfn) {
cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
BFA_STATUS_FAILED);
}
}
if (cee->reset_stats_pending == BFA_TRUE) {
cee->reset_stats_status = BFA_STATUS_FAILED;
cee->reset_stats_pending = BFA_FALSE;
if (cee->cbfn.reset_stats_cbfn) {
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
BFA_STATUS_FAILED);
}
}
}
/**
* bfa_cee_attach()
*
*
* @param[in] cee - Pointer to the CEE module data structure
* ioc - Pointer to the ioc module data structure
* dev - Pointer to the device driver module data structure
* The device driver specific mbox ISR functions have
* this pointer as one of the parameters.
* trcmod -
* logmod -
*
* @return void
*/
void
bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
{
bfa_assert(cee != NULL);
cee->dev = dev;
cee->trcmod = trcmod;
cee->logmod = logmod;
cee->ioc = ioc;
bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
bfa_trc(cee, 0);
}
/**
* bfa_cee_detach()
*
*
* @param[in] cee - Pointer to the CEE module data structure
*
* @return void
*/
void
bfa_cee_detach(struct bfa_cee_s *cee)
{
/*
* For now, just check if there is some ioctl pending and mark that as
* failed?
*/
/* bfa_cee_hbfail(cee); */
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

364
drivers/scsi/bfa/bfa_cs.h Normal file
Просмотреть файл

@ -0,0 +1,364 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfa_cs.h BFA common services
*/
#ifndef __BFA_CS_H__
#define __BFA_CS_H__
#include "bfa_os_inc.h"
/**
* BFA TRC
*/
#ifndef BFA_TRC_MAX
#define BFA_TRC_MAX (4 * 1024)
#endif
#ifndef BFA_TRC_TS
#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++)
#endif
struct bfa_trc_s {
#ifdef __BIGENDIAN
u16 fileno;
u16 line;
#else
u16 line;
u16 fileno;
#endif
u32 timestamp;
union {
struct {
u32 rsvd;
u32 u32;
} u32;
u64 u64;
} data;
};
struct bfa_trc_mod_s {
u32 head;
u32 tail;
u32 ntrc;
u32 stopped;
u32 ticks;
u32 rsvd[3];
struct bfa_trc_s trc[BFA_TRC_MAX];
};
enum {
BFA_TRC_HAL = 1, /* BFA modules */
BFA_TRC_FCS = 2, /* BFA FCS modules */
BFA_TRC_LDRV = 3, /* Linux driver modules */
BFA_TRC_CNA = 4, /* Common modules */
};
#define BFA_TRC_MOD_SH 10
#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
/**
* Define a new tracing file (module). Module should match one defined above.
*/
#define BFA_TRC_FILE(__mod, __submod) \
static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
BFA_TRC_MOD(__mod))
#define bfa_trc32(_trcp, _data) \
__bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
#define bfa_trc(_trcp, _data) \
__bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
static inline void
bfa_trc_init(struct bfa_trc_mod_s *trcm)
{
trcm->head = trcm->tail = trcm->stopped = 0;
trcm->ntrc = BFA_TRC_MAX;
}
static inline void
bfa_trc_stop(struct bfa_trc_mod_s *trcm)
{
trcm->stopped = 1;
}
#ifdef FWTRC
extern void dc_flush(void *data);
#else
#define dc_flush(data)
#endif
static inline void
__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
{
int tail = trcm->tail;
struct bfa_trc_s *trc = &trcm->trc[tail];
if (trcm->stopped)
return;
trc->fileno = (u16) fileno;
trc->line = (u16) line;
trc->data.u64 = data;
trc->timestamp = BFA_TRC_TS(trcm);
dc_flush(trc);
trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
if (trcm->tail == trcm->head)
trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
dc_flush(trcm);
}
static inline void
__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
{
int tail = trcm->tail;
struct bfa_trc_s *trc = &trcm->trc[tail];
if (trcm->stopped)
return;
trc->fileno = (u16) fileno;
trc->line = (u16) line;
trc->data.u32.u32 = data;
trc->timestamp = BFA_TRC_TS(trcm);
dc_flush(trc);
trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
if (trcm->tail == trcm->head)
trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
dc_flush(trcm);
}
#ifndef BFA_PERF_BUILD
#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data)
#else
#define bfa_trc_fp(_trcp, _data)
#endif
/**
* @ BFA LOG interfaces
*/
#define bfa_assert(__cond) do { \
if (!(__cond)) { \
printk(KERN_ERR "assert(%s) failed at %s:%d\\n", \
#__cond, __FILE__, __LINE__); \
} \
} while (0)
#define bfa_sm_fault(__mod, __event) do { \
bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
printk(KERN_ERR "Assertion failure: %s:%d: %d", \
__FILE__, __LINE__, (__event)); \
} while (0)
#ifndef BFA_PERF_BUILD
#define bfa_assert_fp(__cond) bfa_assert(__cond)
#else
#define bfa_assert_fp(__cond)
#endif
/* BFA queue definitions */
#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
/*
* bfa_q_qe_init - to initialize a queue element
*/
#define bfa_q_qe_init(_qe) { \
bfa_q_next(_qe) = (struct list_head *) NULL; \
bfa_q_prev(_qe) = (struct list_head *) NULL; \
}
/*
* bfa_q_deq - dequeue an element from head of the queue
*/
#define bfa_q_deq(_q, _qe) { \
if (!list_empty(_q)) { \
(*((struct list_head **) (_qe))) = bfa_q_next(_q); \
bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
} \
}
/*
* bfa_q_deq_tail - dequeue an element from tail of the queue
*/
#define bfa_q_deq_tail(_q, _qe) { \
if (!list_empty(_q)) { \
*((struct list_head **) (_qe)) = bfa_q_prev(_q); \
bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
(struct list_head *) (_q); \
bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \
} else { \
*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
} \
}
static inline int
bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
{
struct list_head *tqe;
tqe = bfa_q_next(q);
while (tqe != q) {
if (tqe == qe)
return 1;
tqe = bfa_q_next(tqe);
if (tqe == NULL)
break;
}
return 0;
}
/*
* #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
* consistent across modules)
*/
#ifndef BFA_PERF_BUILD
#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
#else
#define BFA_Q_DBG_INIT(_qe)
#endif
#define bfa_q_is_on_q(_q, _qe) \
bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
/**
* @ BFA state machine interfaces
*/
typedef void (*bfa_sm_t)(void *sm, int event);
/**
* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc_s
* etype - object type, eg. enum ioc_event
*/
#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
/**
* For converting from state machine function to state encoding.
*/
struct bfa_sm_table_s {
bfa_sm_t sm; /* state machine function */
int state; /* state machine encoding */
char *name; /* state name for display */
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
/**
* State machine with entry actions.
*/
typedef void (*bfa_fsm_t)(void *fsm, int event);
/**
* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc_s
* etype - object type, eg. enum ioc_event
*/
#define bfa_fsm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event); \
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
(_fsm)->fsm = (bfa_fsm_t)(_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
static inline int
bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
{
int i = 0;
while (smt[i].sm && smt[i].sm != sm)
i++;
return smt[i].state;
}
/**
* @ Generic wait counter.
*/
typedef void (*bfa_wc_resume_t) (void *cbarg);
struct bfa_wc_s {
bfa_wc_resume_t wc_resume;
void *wc_cbarg;
int wc_count;
};
static inline void
bfa_wc_up(struct bfa_wc_s *wc)
{
wc->wc_count++;
}
static inline void
bfa_wc_down(struct bfa_wc_s *wc)
{
wc->wc_count--;
if (wc->wc_count == 0)
wc->wc_resume(wc->wc_cbarg);
}
/**
* Initialize a waiting counter.
*/
static inline void
bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
{
wc->wc_resume = wc_resume;
wc->wc_cbarg = wc_cbarg;
wc->wc_count = 0;
bfa_wc_up(wc);
}
/**
* Wait for counter to reach zero
*/
static inline void
bfa_wc_wait(struct bfa_wc_s *wc)
{
bfa_wc_down(wc);
}
#endif /* __BFA_CS_H__ */

Просмотреть файл

@ -1,58 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <cs/bfa_debug.h>
#include <bfa_os_inc.h>
#include <cs/bfa_q.h>
#include <log/bfa_log_hal.h>
/**
* cs_debug_api
*/
void
bfa_panic(int line, char *file, char *panicstr)
{
bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr);
bfa_os_panic();
}
void
bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event)
{
bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event);
bfa_os_panic();
}
int
bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
{
struct list_head *tqe;
tqe = bfa_q_next(q);
while (tqe != q) {
if (tqe == qe)
return 1;
tqe = bfa_q_next(tqe);
if (tqe == NULL)
break;
}
return 0;
}

466
drivers/scsi/bfa/bfa_defs.h Normal file
Просмотреть файл

@ -0,0 +1,466 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_DEFS_H__
#define __BFA_DEFS_H__
#include "bfa_fc.h"
#include "bfa_os_inc.h"
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
/**
* Manufacturing card type
*/
enum {
BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */
BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */
BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */
BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */
BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */
BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
};
#pragma pack(1)
/**
* Check if Mezz card
*/
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE || \
(type) == BFA_MFG_TYPE_ASTRA || \
(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
(type) == BFA_MFG_TYPE_LIGHTNING))
/**
* Check if the card having old wwn/mac handling
*/
#define bfa_mfg_is_old_wwn_mac_model(type) (( \
(type) == BFA_MFG_TYPE_FC8P2 || \
(type) == BFA_MFG_TYPE_FC8P1 || \
(type) == BFA_MFG_TYPE_FC4P2 || \
(type) == BFA_MFG_TYPE_FC4P1 || \
(type) == BFA_MFG_TYPE_CNA10P2 || \
(type) == BFA_MFG_TYPE_CNA10P1 || \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE))
#define bfa_mfg_increment_wwn_mac(m, i) \
do { \
u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
(u32)(m)[2]; \
t += (i); \
(m)[0] = (t >> 16) & 0xFF; \
(m)[1] = (t >> 8) & 0xFF; \
(m)[2] = t & 0xFF; \
} while (0)
/**
* VPD data length
*/
#define BFA_MFG_VPD_LEN 512
/**
* VPD vendor tag
*/
enum {
BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
BFA_MFG_VPD_IBM = 1, /* vendor IBM */
BFA_MFG_VPD_HP = 2, /* vendor HP */
BFA_MFG_VPD_DELL = 3, /* vendor DELL */
BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
};
/**
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_vpd_s {
u8 version; /* vpd data version */
u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
u8 chksum; /* u8 checksum */
u8 vendor; /* vendor */
u8 len; /* vpd data length excluding header */
u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
};
#pragma pack()
/**
* Status return values
*/
enum bfa_status {
BFA_STATUS_OK = 0, /* Success */
BFA_STATUS_FAILED = 1, /* Operation failed */
BFA_STATUS_EINVAL = 2, /* Invalid params Check input
* parameters */
BFA_STATUS_ENOMEM = 3, /* Out of resources */
BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
* contact support */
BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */
BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */
BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
* contact support */
BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
* configuration */
BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
* this adapter */
BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on
* the adapter */
BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
BFA_STATUS_MAX_VAL /* Unknown error code */
};
#define bfa_status_t enum bfa_status
enum bfa_eproto_status {
BFA_EPROTO_BAD_ACCEPT = 0,
BFA_EPROTO_UNKNOWN_RSP = 1
};
#define bfa_eproto_status_t enum bfa_eproto_status
enum bfa_boolean {
BFA_FALSE = 0,
BFA_TRUE = 1
};
#define bfa_boolean_t enum bfa_boolean
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
/**
* ---------------------- adapter definitions ------------
*/
/**
* BFA adapter level attributes.
*/
enum {
BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
/*
*!< adapter serial num length
*/
BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */
BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */
BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */
BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */
BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */
};
struct bfa_adapter_attr_s {
char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
u32 card_type;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
wwn_t pwwn;
char node_symname[FC_SYMNAME_MAX];
char hw_ver[BFA_VERSION_LEN];
char fw_ver[BFA_VERSION_LEN];
char optrom_ver[BFA_VERSION_LEN];
char os_type[BFA_ADAPTER_OS_TYPE_LEN];
struct bfa_mfg_vpd_s vpd;
struct mac_s mac;
u8 nports;
u8 max_speed;
u8 prototype;
char asic_rev;
u8 pcie_gen;
u8 pcie_lanes_orig;
u8 pcie_lanes;
u8 cna_capable;
u8 is_mezz;
u8 trunk_capable;
};
/**
* ---------------------- IOC definitions ------------
*/
enum {
BFA_IOC_DRIVER_LEN = 16,
BFA_IOC_CHIP_REV_LEN = 8,
};
/**
* Driver and firmware versions.
*/
struct bfa_ioc_driver_attr_s {
char driver[BFA_IOC_DRIVER_LEN]; /* driver name */
char driver_ver[BFA_VERSION_LEN]; /* driver version */
char fw_ver[BFA_VERSION_LEN]; /* firmware version */
char bios_ver[BFA_VERSION_LEN]; /* bios version */
char efi_ver[BFA_VERSION_LEN]; /* EFI version */
char ob_ver[BFA_VERSION_LEN]; /* openboot version */
};
/**
* IOC PCI device attributes
*/
struct bfa_ioc_pci_attr_s {
u16 vendor_id; /* PCI vendor ID */
u16 device_id; /* PCI device ID */
u16 ssid; /* subsystem ID */
u16 ssvid; /* subsystem vendor ID */
u32 pcifn; /* PCI device function */
u32 rsvd; /* padding */
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
};
/**
* IOC states
*/
enum bfa_ioc_state {
BFA_IOC_UNINIT = 1, /* IOC is in uninit state */
BFA_IOC_RESET = 2, /* IOC is in reset state */
BFA_IOC_SEMWAIT = 3, /* Waiting for IOC h/w semaphore */
BFA_IOC_HWINIT = 4, /* IOC h/w is being initialized */
BFA_IOC_GETATTR = 5, /* IOC is being configured */
BFA_IOC_OPERATIONAL = 6, /* IOC is operational */
BFA_IOC_INITFAIL = 7, /* IOC hardware failure */
BFA_IOC_FAIL = 8, /* IOC heart-beat failure */
BFA_IOC_DISABLING = 9, /* IOC is being disabled */
BFA_IOC_DISABLED = 10, /* IOC is disabled */
BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */
BFA_IOC_ENABLING = 12, /* IOC is being enabled */
};
/**
* IOC firmware stats
*/
struct bfa_fw_ioc_stats_s {
u32 enable_reqs;
u32 disable_reqs;
u32 get_attr_reqs;
u32 dbg_sync;
u32 dbg_dump;
u32 unknown_reqs;
};
/**
* IOC driver stats
*/
struct bfa_ioc_drv_stats_s {
u32 ioc_isrs;
u32 ioc_enables;
u32 ioc_disables;
u32 ioc_hbfails;
u32 ioc_boots;
u32 stats_tmos;
u32 hb_count;
u32 disable_reqs;
u32 enable_reqs;
u32 disable_replies;
u32 enable_replies;
};
/**
* IOC statistics
*/
struct bfa_ioc_stats_s {
struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */
struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */
};
enum bfa_ioc_type_e {
BFA_IOC_TYPE_FC = 1,
BFA_IOC_TYPE_FCoE = 2,
BFA_IOC_TYPE_LL = 3,
};
/**
* IOC attributes returned in queries
*/
struct bfa_ioc_attr_s {
enum bfa_ioc_type_e ioc_type;
enum bfa_ioc_state state; /* IOC state */
struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */
struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
struct bfa_ioc_pci_attr_s pci_attr;
u8 port_id; /* port number */
u8 rsvd[7]; /* 64bit align */
};
/**
* ---------------------- mfg definitions ------------
*/
/**
* Checksum size
*/
#define BFA_MFG_CHKSUM_SIZE 16
#define BFA_MFG_PARTNUM_SIZE 14
#define BFA_MFG_SUPPLIER_ID_SIZE 10
#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
#pragma pack(1)
/**
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block_s {
u8 version; /* manufacturing block version */
u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
u16 mfgsize; /* mfg block size */
u16 u16_chksum; /* old u16 checksum */
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
u8 mfg_day; /* manufacturing day */
u8 mfg_month; /* manufacturing month */
u16 mfg_year; /* manufacturing year */
wwn_t mfg_wwn; /* wwn base for this adapter */
u8 num_wwn; /* number of wwns assigned */
u8 mfg_speeds; /* speeds allowed for this adapter */
u8 rsv[2];
char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
char
supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
char
supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
mac_t mfg_mac; /* mac address */
u8 num_mac; /* number of mac addresses */
u8 rsv2;
u32 mfg_type; /* card type */
u8 rsv3[108];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
/**
* ---------------------- pci definitions ------------
*/
/**
* PCI device and vendor ID information
*/
enum {
BFA_PCI_VENDOR_ID_BROCADE = 0x1657,
BFA_PCI_DEVICE_ID_FC_8G2P = 0x13,
BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
BFA_PCI_DEVICE_ID_CT = 0x14,
BFA_PCI_DEVICE_ID_CT_FC = 0x21,
};
#define bfa_asic_id_ct(devid) \
((devid) == BFA_PCI_DEVICE_ID_CT || \
(devid) == BFA_PCI_DEVICE_ID_CT_FC)
/**
* PCI sub-system device and vendor ID information
*/
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
};
/**
* Maximum number of device address ranges mapped through different BAR(s)
*/
#define BFA_PCI_ACCESS_RANGES 1
/*
* Port speed settings. Each specific speed is a bit field. Use multiple
* bits to specify speeds to be selected for auto-negotiation.
*/
enum bfa_port_speed {
BFA_PORT_SPEED_UNKNOWN = 0,
BFA_PORT_SPEED_1GBPS = 1,
BFA_PORT_SPEED_2GBPS = 2,
BFA_PORT_SPEED_4GBPS = 4,
BFA_PORT_SPEED_8GBPS = 8,
BFA_PORT_SPEED_10GBPS = 10,
BFA_PORT_SPEED_16GBPS = 16,
BFA_PORT_SPEED_AUTO =
(BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
};
#define bfa_port_speed_t enum bfa_port_speed
enum {
BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */
BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */
};
#define BOOT_CFG_REV1 1
#define BOOT_CFG_VLAN 1
/**
* Boot options setting. Boot options setting determines from where
* to get the boot lun information
*/
enum bfa_boot_bootopt {
BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */
BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */
BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */
BFA_BOOT_PBC = 3, /* Boot from pbc configured blun */
};
#pragma pack(1)
/**
* Boot lun information.
*/
struct bfa_boot_bootlun_s {
wwn_t pwwn; /* port wwn of target */
lun_t lun; /* 64-bit lun */
};
#pragma pack()
/**
* BOOT boot configuraton
*/
struct bfa_boot_pbc_s {
u8 enable; /* enable/disable SAN boot */
u8 speed; /* boot speed settings */
u8 topology; /* boot topology setting */
u8 rsvd1;
u32 nbluns; /* number of boot luns */
struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
};
#endif /* __BFA_DEFS_H__ */

Просмотреть файл

@ -0,0 +1,457 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_DEFS_FCS_H__
#define __BFA_DEFS_FCS_H__
#include "bfa_fc.h"
#include "bfa_defs_svc.h"
/**
* VF states
*/
enum bfa_vf_state {
BFA_VF_UNINIT = 0, /* fabric is not yet initialized */
BFA_VF_LINK_DOWN = 1, /* link is down */
BFA_VF_FLOGI = 2, /* flogi is in progress */
BFA_VF_AUTH = 3, /* authentication in progress */
BFA_VF_NOFABRIC = 4, /* fabric is not present */
BFA_VF_ONLINE = 5, /* login to fabric is complete */
BFA_VF_EVFP = 6, /* EVFP is in progress */
BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */
};
/**
* VF statistics
*/
struct bfa_vf_stats_s {
u32 flogi_sent; /* Num FLOGIs sent */
u32 flogi_rsp_err; /* FLOGI response errors */
u32 flogi_acc_err; /* FLOGI accept errors */
u32 flogi_accepts; /* FLOGI accepts received */
u32 flogi_rejects; /* FLOGI rejects received */
u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */
u32 flogi_alloc_wait; /* Allocation waits prior to sending FLOGI */
u32 flogi_rcvd; /* FLOGIs received */
u32 flogi_rejected; /* Incoming FLOGIs rejected */
u32 fabric_onlines; /* Internal fabric online notification sent
* to other modules */
u32 fabric_offlines; /* Internal fabric offline notification sent
* to other modules */
u32 resvd; /* padding for 64 bit alignment */
};
/**
* VF attributes returned in queries
*/
struct bfa_vf_attr_s {
enum bfa_vf_state state; /* VF state */
u32 rsvd;
wwn_t fabric_name; /* fabric name */
};
#define BFA_FCS_MAX_LPORTS 256
#define BFA_FCS_FABRIC_IPADDR_SZ 16
/**
* symbolic names for base port/virtual port
*/
#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */
struct bfa_lport_symname_s {
char symname[BFA_SYMNAME_MAXLEN];
};
/**
* Roles of FCS port:
* - FCP IM and FCP TM roles cannot be enabled together for a FCS port
* - Create multiple ports if both IM and TM functions required.
* - Atleast one role must be specified.
*/
enum bfa_lport_role {
BFA_LPORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */
BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM,
};
/**
* FCS port configuration.
*/
struct bfa_lport_cfg_s {
wwn_t pwwn; /* port wwn */
wwn_t nwwn; /* node wwn */
struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
bfa_boolean_t preboot_vp; /* vport created from PBC */
enum bfa_lport_role roles; /* FCS port roles */
u8 tag[16]; /* opaque tag from application */
};
/**
* FCS port states
*/
enum bfa_lport_state {
BFA_LPORT_UNINIT = 0, /* PORT is not yet initialized */
BFA_LPORT_FDISC = 1, /* FDISC is in progress */
BFA_LPORT_ONLINE = 2, /* login to fabric is complete */
BFA_LPORT_OFFLINE = 3, /* No login to fabric */
};
/**
* FCS port type.
*/
enum bfa_lport_type {
BFA_LPORT_TYPE_PHYSICAL = 0,
BFA_LPORT_TYPE_VIRTUAL,
};
/**
* FCS port offline reason.
*/
enum bfa_lport_offline_reason {
BFA_LPORT_OFFLINE_UNKNOWN = 0,
BFA_LPORT_OFFLINE_LINKDOWN,
BFA_LPORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the
* fabric */
BFA_LPORT_OFFLINE_FAB_NORESOURCES,
BFA_LPORT_OFFLINE_FAB_LOGOUT,
};
/**
* FCS lport info.
*/
struct bfa_lport_info_s {
u8 port_type; /* bfa_lport_type_t : physical or
* virtual */
u8 port_state; /* one of bfa_lport_state values */
u8 offline_reason; /* one of bfa_lport_offline_reason_t
* values */
wwn_t port_wwn;
wwn_t node_wwn;
/*
* following 4 feilds are valid for Physical Ports only
*/
u32 max_vports_supp; /* Max supported vports */
u32 num_vports_inuse; /* Num of in use vports */
u32 max_rports_supp; /* Max supported rports */
u32 num_rports_inuse; /* Num of doscovered rports */
};
/**
* FCS port statistics
*/
struct bfa_lport_stats_s {
u32 ns_plogi_sent;
u32 ns_plogi_rsp_err;
u32 ns_plogi_acc_err;
u32 ns_plogi_accepts;
u32 ns_rejects; /* NS command rejects */
u32 ns_plogi_unknown_rsp;
u32 ns_plogi_alloc_wait;
u32 ns_retries; /* NS command retries */
u32 ns_timeouts; /* NS command timeouts */
u32 ns_rspnid_sent;
u32 ns_rspnid_accepts;
u32 ns_rspnid_rsp_err;
u32 ns_rspnid_rejects;
u32 ns_rspnid_alloc_wait;
u32 ns_rftid_sent;
u32 ns_rftid_accepts;
u32 ns_rftid_rsp_err;
u32 ns_rftid_rejects;
u32 ns_rftid_alloc_wait;
u32 ns_rffid_sent;
u32 ns_rffid_accepts;
u32 ns_rffid_rsp_err;
u32 ns_rffid_rejects;
u32 ns_rffid_alloc_wait;
u32 ns_gidft_sent;
u32 ns_gidft_accepts;
u32 ns_gidft_rsp_err;
u32 ns_gidft_rejects;
u32 ns_gidft_unknown_rsp;
u32 ns_gidft_alloc_wait;
/*
* Mgmt Server stats
*/
u32 ms_retries; /* MS command retries */
u32 ms_timeouts; /* MS command timeouts */
u32 ms_plogi_sent;
u32 ms_plogi_rsp_err;
u32 ms_plogi_acc_err;
u32 ms_plogi_accepts;
u32 ms_rejects; /* MS command rejects */
u32 ms_plogi_unknown_rsp;
u32 ms_plogi_alloc_wait;
u32 num_rscn; /* Num of RSCN received */
u32 num_portid_rscn;/* Num portid format RSCN
* received */
u32 uf_recvs; /* Unsolicited recv frames */
u32 uf_recv_drops; /* Dropped received frames */
u32 plogi_rcvd; /* Received plogi */
u32 prli_rcvd; /* Received prli */
u32 adisc_rcvd; /* Received adisc */
u32 prlo_rcvd; /* Received prlo */
u32 logo_rcvd; /* Received logo */
u32 rpsc_rcvd; /* Received rpsc */
u32 un_handled_els_rcvd; /* Received unhandled ELS */
u32 rport_plogi_timeouts; /* Rport plogi retry timeout count */
u32 rport_del_max_plogi_retry; /* Deleted rport
* (max retry of plogi) */
};
/**
* BFA port attribute returned in queries
*/
struct bfa_lport_attr_s {
enum bfa_lport_state state; /* port state */
u32 pid; /* port ID */
struct bfa_lport_cfg_s port_cfg; /* port configuration */
enum bfa_port_type port_type; /* current topology */
u32 loopback; /* cable is externally looped back */
wwn_t fabric_name; /* attached switch's nwwn */
u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
* fabric's ip addr */
mac_t fpma_mac; /* Lport's FPMA Mac address */
u16 authfail; /* auth failed state */
};
/**
* VPORT states
*/
enum bfa_vport_state {
BFA_FCS_VPORT_UNINIT = 0,
BFA_FCS_VPORT_CREATED = 1,
BFA_FCS_VPORT_OFFLINE = 1,
BFA_FCS_VPORT_FDISC_SEND = 2,
BFA_FCS_VPORT_FDISC = 3,
BFA_FCS_VPORT_FDISC_RETRY = 4,
BFA_FCS_VPORT_ONLINE = 5,
BFA_FCS_VPORT_DELETING = 6,
BFA_FCS_VPORT_CLEANUP = 6,
BFA_FCS_VPORT_LOGO_SEND = 7,
BFA_FCS_VPORT_LOGO = 8,
BFA_FCS_VPORT_ERROR = 9,
BFA_FCS_VPORT_MAX_STATE,
};
/**
* vport statistics
*/
struct bfa_vport_stats_s {
struct bfa_lport_stats_s port_stats; /* base class (port) stats */
/*
* TODO - remove
*/
u32 fdisc_sent; /* num fdisc sent */
u32 fdisc_accepts; /* fdisc accepts */
u32 fdisc_retries; /* fdisc retries */
u32 fdisc_timeouts; /* fdisc timeouts */
u32 fdisc_rsp_err; /* fdisc response error */
u32 fdisc_acc_bad; /* bad fdisc accepts */
u32 fdisc_rejects; /* fdisc rejects */
u32 fdisc_unknown_rsp;
/*
*!< fdisc rsp unknown error
*/
u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */
u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */
u32 logo_sent; /* logo sent */
u32 logo_accepts; /* logo accepts */
u32 logo_rejects; /* logo rejects */
u32 logo_rsp_err; /* logo rsp errors */
u32 logo_unknown_rsp;
/* logo rsp unknown errors */
u32 fab_no_npiv; /* fabric does not support npiv */
u32 fab_offline; /* offline events from fab SM */
u32 fab_online; /* online events from fab SM */
u32 fab_cleanup; /* cleanup request from fab SM */
u32 rsvd;
};
/**
* BFA vport attribute returned in queries
*/
struct bfa_vport_attr_s {
struct bfa_lport_attr_s port_attr; /* base class (port) attributes */
enum bfa_vport_state vport_state; /* vport state */
u32 rsvd;
};
/**
* FCS remote port states
*/
enum bfa_rport_state {
BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */
BFA_RPORT_OFFLINE = 1, /* rport is offline */
BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */
BFA_RPORT_ONLINE = 3, /* login to rport is complete */
BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */
BFA_RPORT_NSQUERY = 5, /* nameserver query */
BFA_RPORT_ADISC = 6, /* ADISC authentication */
BFA_RPORT_LOGO = 7, /* logging out with rport */
BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */
BFA_RPORT_NSDISC = 9, /* re-discover rport */
};
/**
* Rport Scsi Function : Initiator/Target.
*/
enum bfa_rport_function {
BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */
BFA_RPORT_TARGET = 0x02, /* SCSI Target */
};
/**
* port/node symbolic names for rport
*/
#define BFA_RPORT_SYMNAME_MAXLEN 255
struct bfa_rport_symname_s {
char symname[BFA_RPORT_SYMNAME_MAXLEN];
};
/**
* FCS remote port statistics
*/
struct bfa_rport_stats_s {
u32 offlines; /* remote port offline count */
u32 onlines; /* remote port online count */
u32 rscns; /* RSCN affecting rport */
u32 plogis; /* plogis sent */
u32 plogi_accs; /* plogi accepts */
u32 plogi_timeouts; /* plogi timeouts */
u32 plogi_rejects; /* rcvd plogi rejects */
u32 plogi_failed; /* local failure */
u32 plogi_rcvd; /* plogis rcvd */
u32 prli_rcvd; /* inbound PRLIs */
u32 adisc_rcvd; /* ADISCs received */
u32 adisc_rejects; /* recvd ADISC rejects */
u32 adisc_sent; /* ADISC requests sent */
u32 adisc_accs; /* ADISC accepted by rport */
u32 adisc_failed; /* ADISC failed (no response) */
u32 adisc_rejected; /* ADISC rejected by us */
u32 logos; /* logos sent */
u32 logo_accs; /* LOGO accepts from rport */
u32 logo_failed; /* LOGO failures */
u32 logo_rejected; /* LOGO rejects from rport */
u32 logo_rcvd; /* LOGO from remote port */
u32 rpsc_rcvd; /* RPSC received */
u32 rpsc_rejects; /* recvd RPSC rejects */
u32 rpsc_sent; /* RPSC requests sent */
u32 rpsc_accs; /* RPSC accepted by rport */
u32 rpsc_failed; /* RPSC failed (no response) */
u32 rpsc_rejected; /* RPSC rejected by us */
u32 rjt_insuff_res; /* LS RJT with insuff resources */
struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */
};
/**
* FCS remote port attributes returned in queries
*/
struct bfa_rport_attr_s {
wwn_t nwwn; /* node wwn */
wwn_t pwwn; /* port wwn */
enum fc_cos cos_supported; /* supported class of services */
u32 pid; /* port ID */
u32 df_sz; /* Max payload size */
enum bfa_rport_state state; /* Rport State machine state */
enum fc_cos fc_cos; /* FC classes of services */
bfa_boolean_t cisc; /* CISC capable device */
struct bfa_rport_symname_s symname; /* Symbolic Name */
enum bfa_rport_function scsi_function; /* Initiator/Target */
struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */
enum bfa_port_speed curr_speed; /* operating speed got from
* RPSC ELS. UNKNOWN, if RPSC
* is not supported */
bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */
enum bfa_port_speed assigned_speed; /* Speed assigned by the user.
* will be used if RPSC is not
* supported by the rport */
};
struct bfa_rport_remote_link_stats_s {
u32 lfc; /* Link Failure Count */
u32 lsyc; /* Loss of Synchronization Count */
u32 lsic; /* Loss of Signal Count */
u32 pspec; /* Primitive Sequence Protocol Error Count */
u32 itwc; /* Invalid Transmission Word Count */
u32 icc; /* Invalid CRC Count */
};
#define BFA_MAX_IO_INDEX 7
#define BFA_NO_IO_INDEX 9
/**
* FCS itnim states
*/
enum bfa_itnim_state {
BFA_ITNIM_OFFLINE = 0, /* offline */
BFA_ITNIM_PRLI_SEND = 1, /* prli send */
BFA_ITNIM_PRLI_SENT = 2, /* prli sent */
BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */
BFA_ITNIM_HCB_ONLINE = 4, /* online callback */
BFA_ITNIM_ONLINE = 5, /* online */
BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */
BFA_ITNIM_INITIATIOR = 7, /* initiator */
};
/**
* FCS remote port statistics
*/
struct bfa_itnim_stats_s {
u32 onlines; /* num rport online */
u32 offlines; /* num rport offline */
u32 prli_sent; /* num prli sent out */
u32 fcxp_alloc_wait;/* num fcxp alloc waits */
u32 prli_rsp_err; /* num prli rsp errors */
u32 prli_rsp_acc; /* num prli rsp accepts */
u32 initiator; /* rport is an initiator */
u32 prli_rsp_parse_err; /* prli rsp parsing errors */
u32 prli_rsp_rjt; /* num prli rsp rejects */
u32 timeout; /* num timeouts detected */
u32 sler; /* num sler notification from BFA */
u32 rsvd; /* padding for 64 bit alignment */
};
/**
* FCS itnim attributes returned in queries
*/
struct bfa_itnim_attr_s {
enum bfa_itnim_state state; /* FCS itnim state */
u8 retry; /* data retransmision support */
u8 task_retry_id; /* task retry ident support */
u8 rec_support; /* REC supported */
u8 conf_comp; /* confirmed completion supp */
};
#endif /* __BFA_DEFS_FCS_H__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -14,10 +14,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <defs/bfa_defs_pci.h>
#include <cs/bfa_debug.h>
#include <bfa_iocfc.h>
#include "bfa_modules.h"
/**
* BFA module list terminated by NULL
@ -30,9 +28,6 @@ struct bfa_module_s *hal_mods[] = {
&hal_mod_uf,
&hal_mod_rport,
&hal_mod_fcpim,
#ifdef BFA_CFG_PBIND
&hal_mod_pbind,
#endif
NULL
};
@ -74,17 +69,39 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
bfa_isr_unhandled, /* --------- */
};
/**
* Message handlers for mailbox command classes
*/
bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
NULL,
NULL, /* BFI_MC_IOC */
NULL, /* BFI_MC_DIAG */
NULL, /* BFI_MC_IOC */
NULL, /* BFI_MC_DIAG */
NULL, /* BFI_MC_FLASH */
NULL, /* BFI_MC_CEE */
NULL, /* BFI_MC_PORT */
NULL, /* BFI_MC_CEE */
NULL, /* BFI_MC_PORT */
bfa_iocfc_isr, /* BFI_MC_IOCFC */
NULL,
};
void
bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
{
struct bfa_port_s *port = &bfa->modules.port;
u32 dm_len;
u8 *dm_kva;
u64 dm_pa;
dm_len = bfa_port_meminfo();
dm_kva = bfa_meminfo_dma_virt(mi);
dm_pa = bfa_meminfo_dma_phys(mi);
memset(port, 0, sizeof(struct bfa_port_s));
bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
bfa_port_mem_claim(port, dm_kva, dm_pa);
bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -18,25 +18,25 @@
* fcbuild.c - FC link service frame building and parsing routines
*/
#include <bfa_os_inc.h>
#include "fcbuild.h"
#include "bfa_os_inc.h"
#include "bfa_fcbuild.h"
/*
* static build functions
*/
static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id);
static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id);
static struct fchs_s fc_els_req_tmpl;
static struct fchs_s fc_els_rsp_tmpl;
static struct fchs_s fc_bls_req_tmpl;
static struct fchs_s fc_bls_rsp_tmpl;
static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id);
static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id);
static struct fchs_s fc_els_req_tmpl;
static struct fchs_s fc_els_rsp_tmpl;
static struct fchs_s fc_bls_req_tmpl;
static struct fchs_s fc_bls_rsp_tmpl;
static struct fc_ba_acc_s ba_acc_tmpl;
static struct fc_logi_s plogi_tmpl;
static struct fc_prli_s prli_tmpl;
static struct fc_rrq_s rrq_tmpl;
static struct fchs_s fcp_fchs_tmpl;
static struct fchs_s fcp_fchs_tmpl;
void
fcbuild_init(void)
@ -123,7 +123,7 @@ fcbuild_init(void)
rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
/*
* fcp_fchs_tmpl
* fcp_struct fchs_s mpl
*/
fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
@ -135,8 +135,7 @@ fcbuild_init(void)
}
static void
fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u32 ox_id)
fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
{
bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
@ -158,8 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
}
void
fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id)
fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
fchs->d_id = (d_id);
@ -168,8 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
}
static void
fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id)
fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
fchs->d_id = d_id;
@ -180,8 +177,8 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
enum fc_parse_status
fc_els_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
len = len;
@ -199,8 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
}
static void
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id)
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
{
bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
fchs->d_id = d_id;
@ -213,7 +209,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size, u8 els_code)
{
struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
@ -233,12 +229,11 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16
fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size, u8 set_npiv, u8 set_auth,
u16 local_bb_credits)
u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
u8 set_npiv, u8 set_auth, u16 local_bb_credits)
{
u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
u32 *vvl_info;
u32 *vvl_info;
bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
@ -292,8 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
u16
fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size)
u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
{
u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT);
@ -330,9 +324,9 @@ fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
enum fc_parse_status
fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_logi_s *plogi;
struct fc_ls_rjt_s *ls_rjt;
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_logi_s *plogi;
struct fc_ls_rjt_s *ls_rjt;
switch (els_cmd->els_code) {
case FC_ELS_LS_RJT:
@ -364,7 +358,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
enum fc_parse_status
fc_plogi_parse(struct fchs_s *fchs)
{
struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
if (plogi->class3.class_valid != 1)
return FC_PARSE_FAILURE;
@ -381,7 +375,7 @@ u16
fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16 ox_id)
{
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
fc_els_req_build(fchs, d_id, s_id, ox_id);
bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
@ -398,19 +392,16 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16
fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16 ox_id, enum bfa_port_role role)
u16 ox_id, enum bfa_lport_role role)
{
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
prli->command = FC_ELS_ACC;
if ((role & BFA_PORT_ROLE_FCP_TM) == BFA_PORT_ROLE_FCP_TM)
prli->parampage.servparams.target = 1;
else
prli->parampage.servparams.initiator = 1;
prli->parampage.servparams.initiator = 1;
prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
@ -452,12 +443,12 @@ fc_prli_parse(struct fc_prli_s *prli)
}
u16
fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name)
fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name)
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
memset(logo, '\0', sizeof(struct fc_logo_s));
bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
logo->els_cmd.els_code = FC_ELS_LOGO;
logo->nport_id = (s_id);
logo->orig_port_name = port_name;
@ -470,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name,
wwn_t node_name, u8 els_code)
{
memset(adisc, '\0', sizeof(struct fc_adisc_s));
bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
adisc->els_cmd.els_code = els_code;
@ -489,8 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
u16
fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name,
wwn_t node_name)
u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
{
return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
node_name, FC_ELS_ADISC);
@ -523,10 +513,10 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
}
enum fc_parse_status
fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
wwn_t node_name, wwn_t port_name)
fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name,
wwn_t port_name)
{
struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
if (adisc->els_cmd.els_code != FC_ELS_ACC)
return FC_PARSE_FAILURE;
@ -542,13 +532,13 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
enum fc_parse_status
fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
{
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
if (pdisc->class3.class_valid != 1)
return FC_PARSE_FAILURE;
if ((bfa_os_ntohs(pdisc->class3.rxsz) <
(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
|| (pdisc->class3.rxsz == 0))
return FC_PARSE_FAILURE;
@ -584,8 +574,8 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len)
}
u16
fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id,
u32 s_id, u16 ox_id, u16 rrq_oxid)
fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
u16 ox_id, u16 rrq_oxid)
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
@ -604,11 +594,11 @@ u16
fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16 ox_id)
{
struct fc_els_cmd_s *acc = pld;
struct fc_els_cmd_s *acc = pld;
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
memset(acc, 0, sizeof(struct fc_els_cmd_s));
bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
acc->els_code = FC_ELS_ACC;
return sizeof(struct fc_els_cmd_s);
@ -620,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
u8 reason_code_expl)
{
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
ls_rjt->reason_code = reason_code;
@ -647,11 +637,11 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
}
u16
fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
u32 d_id, u32 s_id, u16 ox_id)
fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
u32 s_id, u16 ox_id)
{
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
els_cmd->els_code = FC_ELS_ACC;
return sizeof(struct fc_els_cmd_s);
@ -661,8 +651,8 @@ int
fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
{
int num_pages = 0;
struct fc_prlo_s *prlo;
struct fc_tprlo_s *tprlo;
struct fc_prlo_s *prlo;
struct fc_tprlo_s *tprlo;
if (els_code == FC_ELS_PRLO) {
prlo = (struct fc_prlo_s *) (fc_frame + 1);
@ -676,14 +666,13 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
u16
fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
u32 d_id, u32 s_id, u16 ox_id,
int num_pages)
u32 d_id, u32 s_id, u16 ox_id, int num_pages)
{
int page;
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
memset(tprlo_acc, 0, (num_pages * 16) + 4);
bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
tprlo_acc->command = FC_ELS_ACC;
tprlo_acc->page_len = 0x10;
@ -700,15 +689,14 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
}
u16
fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u32 d_id, u32 s_id, u16 ox_id,
int num_pages)
fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
u32 s_id, u16 ox_id, int num_pages)
{
int page;
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
memset(prlo_acc, 0, (num_pages * 16) + 4);
bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
prlo_acc->command = FC_ELS_ACC;
prlo_acc->page_len = 0x10;
prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
@ -726,11 +714,11 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u16
fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
u32 s_id, u16 ox_id, u32 data_format)
u32 s_id, u16 ox_id, u32 data_format)
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
rnid->els_cmd.els_code = FC_ELS_RNID;
rnid->node_id_data_format = data_format;
@ -739,13 +727,12 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
}
u16
fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
u32 d_id, u32 s_id, u16 ox_id,
u32 data_format,
struct fc_rnid_common_id_data_s *common_id_data,
struct fc_rnid_general_topology_data_s *gen_topo_data)
fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
u32 s_id, u16 ox_id, u32 data_format,
struct fc_rnid_common_id_data_s *common_id_data,
struct fc_rnid_general_topology_data_s *gen_topo_data)
{
memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@ -769,27 +756,26 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
u16
fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
u32 s_id, u16 ox_id)
u32 s_id, u16 ox_id)
{
fc_els_req_build(fchs, d_id, s_id, ox_id);
memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
rpsc->els_cmd.els_code = FC_ELS_RPSC;
return sizeof(struct fc_rpsc_cmd_s);
}
u16
fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
u32 d_id, u32 s_id, u32 *pid_list,
u16 npids)
fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
u32 s_id, u32 *pid_list, u16 npids)
{
u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
int i = 0;
fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
rpsc2->els_cmd.els_code = FC_ELS_RPSC;
rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
@ -797,16 +783,15 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
for (i = 0; i < npids; i++)
rpsc2->pid_list[i].pid = pid_list[i];
return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
(sizeof(u32)));
return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32)));
}
u16
fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
u32 d_id, u32 s_id, u16 ox_id,
struct fc_rpsc_speed_info_s *oper_speed)
u32 d_id, u32 s_id, u16 ox_id,
struct fc_rpsc_speed_info_s *oper_speed)
{
memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@ -820,7 +805,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
bfa_os_htons(oper_speed->port_op_speed);
return sizeof(struct fc_rpsc_acc_s);
}
/*
@ -831,7 +815,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
u16
fc_logo_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
len = len;
if (els_cmd->els_code != FC_ELS_ACC)
@ -841,11 +825,10 @@ fc_logo_rsp_parse(struct fchs_s *fchs, int len)
}
u16
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size)
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name, u16 pdu_size)
{
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
@ -862,7 +845,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16
fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
{
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
if (len < sizeof(struct fc_logi_s))
return FC_PARSE_LEN_INVAL;
@ -886,11 +869,11 @@ u16
fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int num_pages)
{
struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
int page;
fc_els_req_build(fchs, d_id, s_id, ox_id);
memset(prlo, 0, (num_pages * 16) + 4);
bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
prlo->command = FC_ELS_PRLO;
prlo->page_len = 0x10;
prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@ -909,7 +892,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
u16
fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
int num_pages = 0;
int page = 0;
@ -941,15 +924,14 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
}
u16
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages,
enum fc_tprlo_type tprlo_type, u32 tpr_id)
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
{
struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
int page;
fc_els_req_build(fchs, d_id, s_id, ox_id);
memset(tprlo, 0, (num_pages * 16) + 4);
bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
tprlo->command = FC_ELS_TPRLO;
tprlo->page_len = 0x10;
tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@ -1003,7 +985,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
enum fc_parse_status
fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
len = len;
if (els_cmd->els_code != FC_ELS_ACC)
@ -1013,11 +995,10 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
}
u16
fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, u32 reason_code,
u32 reason_expl)
fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
u32 reason_code, u32 reason_expl)
{
struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
@ -1062,10 +1043,8 @@ u16
fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
wwn_t port_name)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_gidpn_req_s *gidpn =
(struct fcgs_gidpn_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@ -1080,8 +1059,7 @@ u16
fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u32 port_id)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
@ -1097,8 +1075,7 @@ u16
fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u32 port_id)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
@ -1124,8 +1101,8 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
}
u16
fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
u32 s_id, u16 ox_id)
fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
u8 set_br_reg, u32 s_id, u16 ox_id)
{
u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
@ -1141,8 +1118,8 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
}
u16
fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
u16 ox_id)
fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
u32 s_id, u16 ox_id)
{
u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
u16 payldlen;
@ -1162,11 +1139,10 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
u16
fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
enum bfa_port_role roles)
enum bfa_lport_role roles)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rftid_req_s *rftid =
(struct fcgs_rftid_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
u8 index;
@ -1182,23 +1158,15 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
type_value = 1 << (FC_TYPE_FCP % 32);
rftid->fc4_type[index] = bfa_os_htonl(type_value);
if (roles & BFA_PORT_ROLE_FCP_IPFC) {
index = FC_TYPE_IP >> 5;
type_value = 1 << (FC_TYPE_IP % 32);
rftid->fc4_type[index] |= bfa_os_htonl(type_value);
}
return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
}
u16
fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 ox_id, u8 *fc4_bitmap,
u32 bitmap_size)
fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u8 *fc4_bitmap, u32 bitmap_size)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rftid_req_s *rftid =
(struct fcgs_rftid_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@ -1208,7 +1176,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
rftid->dap = s_id;
bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
(bitmap_size < 32 ? bitmap_size : 32));
(bitmap_size < 32 ? bitmap_size : 32));
return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
}
@ -1217,9 +1185,8 @@ u16
fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u8 fc4_type, u8 fc4_ftrs)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rffid_req_s *rffid =
(struct fcgs_rffid_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@ -1227,9 +1194,9 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
rffid->dap = s_id;
rffid->dap = s_id;
rffid->fc4ftr_bits = fc4_ftrs;
rffid->fc4_type = fc4_type;
rffid->fc4_type = fc4_type;
return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
}
@ -1239,9 +1206,9 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u8 *name)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rspnid_req_s *rspnid =
(struct fcgs_rspnid_req_s *) (cthdr + 1);
(struct fcgs_rspnid_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@ -1257,13 +1224,11 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
}
u16
fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u8 fc4_type)
fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_gidft_req_s *gidft =
(struct fcgs_gidft_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@ -1282,9 +1247,8 @@ u16
fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
wwn_t port_name)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rpnid_req_s *rpnid =
(struct fcgs_rpnid_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@ -1301,9 +1265,8 @@ u16
fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
wwn_t node_name)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rnnid_req_s *rnnid =
(struct fcgs_rnnid_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@ -1320,7 +1283,7 @@ u16
fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
u32 cos)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rcsid_req_s *rcsid =
(struct fcgs_rcsid_req_s *) (cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
@ -1339,9 +1302,8 @@ u16
fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
u8 port_type)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rptid_req_s *rptid =
(struct fcgs_rptid_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@ -1357,9 +1319,8 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
u16
fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_ganxt_req_s *ganxt =
(struct fcgs_ganxt_req_s *) (cthdr + 1);
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_NAME_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@ -1379,7 +1340,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 cmd_code)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@ -1409,12 +1370,12 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
}
/*
* GMAL Request
* GMAL Request
*/
u16
fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);
@ -1434,7 +1395,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
u16
fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
{
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER);

Просмотреть файл

@ -0,0 +1,316 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* fcbuild.h - FC link service frame building and parsing routines
*/
#ifndef __FCBUILD_H__
#define __FCBUILD_H__
#include "bfa_os_inc.h"
#include "bfa_fc.h"
#include "bfa_defs_fcs.h"
/*
* Utility Macros/functions
*/
#define wwn_is_equal(_wwn1, _wwn2) \
(memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
/*
* Given the fc response length, this routine will return
* the length of the actual payload bytes following the CT header.
*
* Assumes the input response length does not include the crc, eof, etc.
*/
static inline u32
fc_get_ctresp_pyld_len(u32 resp_len)
{
return resp_len - sizeof(struct ct_hdr_s);
}
/*
* Convert bfa speed to rpsc speed value.
*/
static inline enum bfa_port_speed
fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
{
switch (speed) {
case RPSC_OP_SPEED_1G:
return BFA_PORT_SPEED_1GBPS;
case RPSC_OP_SPEED_2G:
return BFA_PORT_SPEED_2GBPS;
case RPSC_OP_SPEED_4G:
return BFA_PORT_SPEED_4GBPS;
case RPSC_OP_SPEED_8G:
return BFA_PORT_SPEED_8GBPS;
case RPSC_OP_SPEED_10G:
return BFA_PORT_SPEED_10GBPS;
default:
return BFA_PORT_SPEED_UNKNOWN;
}
}
/*
* Convert RPSC speed to bfa speed value.
*/
static inline enum fc_rpsc_op_speed
fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
{
switch (op_speed) {
case BFA_PORT_SPEED_1GBPS:
return RPSC_OP_SPEED_1G;
case BFA_PORT_SPEED_2GBPS:
return RPSC_OP_SPEED_2G;
case BFA_PORT_SPEED_4GBPS:
return RPSC_OP_SPEED_4G;
case BFA_PORT_SPEED_8GBPS:
return RPSC_OP_SPEED_8G;
case BFA_PORT_SPEED_10GBPS:
return RPSC_OP_SPEED_10G;
default:
return RPSC_OP_SPEED_NOT_EST;
}
}
enum fc_parse_status {
FC_PARSE_OK = 0,
FC_PARSE_FAILURE = 1,
FC_PARSE_BUSY = 2,
FC_PARSE_LEN_INVAL,
FC_PARSE_ACC_INVAL,
FC_PARSE_PWWN_NOT_EQUAL,
FC_PARSE_NWWN_NOT_EQUAL,
FC_PARSE_RXSZ_INVAL,
FC_PARSE_NOT_FCP,
FC_PARSE_OPAFLAG_INVAL,
FC_PARSE_RPAFLAG_INVAL,
FC_PARSE_OPA_INVAL,
FC_PARSE_RPA_INVAL,
};
struct fc_templates_s {
struct fchs_s fc_els_req;
struct fchs_s fc_bls_req;
struct fc_logi_s plogi;
struct fc_rrq_s rrq;
};
void fcbuild_init(void);
u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size, u8 set_npiv, u8 set_auth,
u16 local_bb_credits);
u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size);
u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name,
u16 pdu_size,
u16 local_bb_credits);
u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name,
wwn_t node_name, u16 pdu_size);
enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
u16 ox_id);
enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
u32 s_id, u16 ox_id, u16 rrq_oxid);
enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name);
u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, enum bfa_lport_role role);
u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 ox_id, u8 *fc4_bitmap,
u32 bitmap_size);
u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 ox_id, wwn_t port_name);
u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u32 port_id);
u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
u8 set_br_reg, u32 s_id, u16 ox_id);
u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name,
u16 pdu_size);
u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
wwn_t node_name);
enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
u32 host_dap, wwn_t node_name, wwn_t port_name);
enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
wwn_t port_name, wwn_t node_name);
u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
u32 d_id, u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name);
u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
u32 d_id, u32 s_id, u16 ox_id,
u8 reason_code, u8 reason_code_expl);
u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
u32 d_id, u32 s_id, u16 ox_id);
u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id);
enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id,
enum bfa_lport_role role);
u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
u32 d_id, u32 s_id, u16 ox_id,
u32 data_format);
u16 fc_rnid_acc_build(struct fchs_s *fchs,
struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
u16 ox_id, u32 data_format,
struct fc_rnid_common_id_data_s *common_id_data,
struct fc_rnid_general_topology_data_s *gen_topo_data);
u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
u32 d_id, u32 s_id, u32 *pid_list, u16 npids);
u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
u32 d_id, u32 s_id, u16 ox_id);
u16 fc_rpsc_acc_build(struct fchs_s *fchs,
struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
u8 fc4_type);
u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u32 port_id, wwn_t port_name);
u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u32 port_id, wwn_t node_name);
u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u32 port_id, u32 cos);
u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u32 port_id, u8 port_type);
u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u32 port_id);
u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name);
u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id);
u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 cmd_code);
u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id);
enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
wwn_t port_name);
enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli);
enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
wwn_t port_name);
u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
u32 s_id, u16 ox_id, u16 rx_id);
int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
u32 d_id, u32 s_id, u16 ox_id, int num_pages);
u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u32 d_id, u32 s_id, u16 ox_id, int num_pages);
u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size);
u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages);
u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
u32 tpr_id);
u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, u32 reason_code, u32 reason_expl);
u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
u32 port_id);
u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
u16 ox_id);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,401 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_FCPIM_H__
#define __BFA_FCPIM_H__
#include "bfa.h"
#include "bfa_svc.h"
#include "bfi_ms.h"
#include "bfa_defs_svc.h"
#include "bfa_cs.h"
#define BFA_ITNIM_MIN 32
#define BFA_ITNIM_MAX 1024
#define BFA_IOIM_MIN 8
#define BFA_IOIM_MAX 2000
#define BFA_TSKIM_MIN 4
#define BFA_TSKIM_MAX 512
#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
#define bfa_itnim_ioprofile_update(__itnim, __index) \
(__itnim->ioprofile.iocomps[__index]++)
#define BFA_IOIM_RETRY_TAG_OFFSET 11
#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
#define BFA_IOIM_RETRY_MAX 7
/* Buckets are are 512 bytes to 2MB */
static inline u32
bfa_ioim_get_index(u32 n) {
int pos = 0;
if (n >= (1UL)<<22)
return BFA_IOBUCKET_MAX - 1;
n >>= 8;
if (n >= (1UL)<<16)
n >>= 16; pos += 16;
if (n >= 1 << 8)
n >>= 8; pos += 8;
if (n >= 1 << 4)
n >>= 4; pos += 4;
if (n >= 1 << 2)
n >>= 2; pos += 2;
if (n >= 1 << 1)
pos += 1;
return (n == 0) ? (0) : pos;
}
/*
* forward declarations
*/
struct bfa_ioim_s;
struct bfa_tskim_s;
struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_mod_s {
struct bfa_s *bfa;
struct bfa_itnim_s *itnim_arr;
struct bfa_ioim_s *ioim_arr;
struct bfa_ioim_sp_s *ioim_sp_arr;
struct bfa_tskim_s *tskim_arr;
struct bfa_dma_s snsbase;
int num_itnims;
int num_ioim_reqs;
int num_tskim_reqs;
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
u8 rsvd;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_free_q; /* free IO resources */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
struct list_head tskim_free_q;
u32 ios_active; /* current active IOs */
u32 delay_comp;
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
bfa_boolean_t ioredirect;
bfa_boolean_t io_profile;
u32 io_profile_start_time;
bfa_fcpim_profile_t profile_comp;
bfa_fcpim_profile_t profile_start;
};
/**
* BFA IO (initiator mode)
*/
struct bfa_ioim_s {
struct list_head qe; /* queue elememt */
bfa_sm_t sm; /* BFA ioim state machine */
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
struct bfad_ioim_s *dio; /* driver IO handle */
u16 iotag; /* FWI IO tag */
u16 abort_tag; /* unqiue abort request tag */
u16 nsges; /* number of SG elements */
u16 nsgpgs; /* number of SG pages */
struct bfa_sgpg_s *sgpg; /* first SG page */
struct list_head sgpg_q; /* allocated SG pages */
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
u8 reqq; /* Request queue for I/O */
u64 start_time; /* IO's Profile start val */
};
struct bfa_ioim_sp_s {
struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
u8 *snsinfo; /* sense info for this IO */
struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
bfa_boolean_t abort_explicit; /* aborted by OS */
struct bfa_tskim_s *tskim; /* Relevant TM cmd */
};
/**
* BFA Task management command (initiator mode)
*/
struct bfa_tskim_s {
struct list_head qe;
bfa_sm_t sm;
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
bfa_boolean_t notify; /* notify itnim on TM comp */
lun_t lun; /* lun if applicable */
enum fcp_tm_cmnd tm_cmnd; /* task management command */
u16 tsk_tag; /* FWI IO tag */
u8 tsecs; /* timeout in seconds */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct list_head io_q; /* queue of affected IOs */
struct bfa_wc_s wc; /* waiting counter */
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
enum bfi_tskim_status tsk_status; /* TM status */
};
/**
* BFA i-t-n (initiator mode)
*/
struct bfa_itnim_s {
struct list_head qe; /* queue element */
bfa_sm_t sm; /* i-t-n im BFA state machine */
struct bfa_s *bfa; /* bfa instance */
struct bfa_rport_s *rport; /* bfa rport */
void *ditn; /* driver i-t-n structure */
struct bfi_mhdr_s mhdr; /* pre-built mhdr */
u8 msg_no; /* itnim/rport firmware handle */
u8 reqq; /* CQ for requests */
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
struct list_head pending_q; /* queue of pending IO requests */
struct list_head io_q; /* queue of active IO requests */
struct list_head io_cleanup_q; /* IO being cleaned up */
struct list_head tsk_q; /* queue of active TM commands */
struct list_head delay_comp_q; /* queue of failed inflight cmds */
bfa_boolean_t seq_rec; /* SQER supported */
bfa_boolean_t is_online; /* itnim is ONLINE for IO */
bfa_boolean_t iotov_active; /* IO TOV timer is active */
struct bfa_wc_s wc; /* waiting counter */
struct bfa_timer_s timer; /* pending IO TOV */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
struct bfa_itnim_iostats_s stats;
struct bfa_itnim_ioprofile_s ioprofile;
};
#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
(&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
#define bfa_io_profile_start_time(_bfa) \
(_bfa->modules.fcpim_mod.io_profile_start_time)
#define bfa_fcpim_get_io_profile(_bfa) \
(_bfa->modules.fcpim_mod.io_profile)
static inline bfa_boolean_t
bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
{
u16 k = ioim->iotag;
k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
if (k > BFA_IOIM_RETRY_MAX)
return BFA_FALSE;
ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
return BFA_TRUE;
}
/*
* function prototypes
*/
void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
struct bfi_msg_s *msg);
void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
struct bfa_tskim_s *tskim);
void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
void bfa_ioim_tov(struct bfa_ioim_s *ioim);
void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
/*
* bfa fcpim module API functions
*/
void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *modstats);
bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
struct bfa_fcpim_del_itn_stats_s *modstats);
bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
void bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
bfa_boolean_t state);
void bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
(((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
{ \
struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
__fcpim->reqq++; \
__fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
*(__qid) = __fcpim->reqq; \
}
#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \
*(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
/*
* bfa itnim API functions
*/
struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
struct bfa_rport_s *rport, void *itnim);
void bfa_itnim_delete(struct bfa_itnim_s *itnim);
void bfa_itnim_online(struct bfa_itnim_s *itnim,
bfa_boolean_t seq_rec);
void bfa_itnim_offline(struct bfa_itnim_s *itnim);
void bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
struct bfa_itnim_iostats_s *stats);
void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
struct bfa_itnim_ioprofile_s *ioprofile);
#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
/**
* BFA completion callback for bfa_itnim_online().
*
* @param[in] itnim FCS or driver itnim instance
*
* return None
*/
void bfa_cb_itnim_online(void *itnim);
/**
* BFA completion callback for bfa_itnim_offline().
*
* @param[in] itnim FCS or driver itnim instance
*
* return None
*/
void bfa_cb_itnim_offline(void *itnim);
void bfa_cb_itnim_tov_begin(void *itnim);
void bfa_cb_itnim_tov(void *itnim);
/**
* BFA notification to FCS/driver for second level error recovery.
*
* Atleast one I/O request has timedout and target is unresponsive to
* repeated abort requests. Second level error recovery should be initiated
* by starting implicit logout and recovery procedures.
*
* @param[in] itnim FCS or driver itnim instance
*
* return None
*/
void bfa_cb_itnim_sler(void *itnim);
/*
* bfa ioim API functions
*/
struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa,
struct bfad_ioim_s *dio,
struct bfa_itnim_s *itnim,
u16 nsgles);
void bfa_ioim_free(struct bfa_ioim_s *ioim);
void bfa_ioim_start(struct bfa_ioim_s *ioim);
bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim);
void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
bfa_boolean_t iotov);
/**
* I/O completion notification.
*
* @param[in] dio driver IO structure
* @param[in] io_status IO completion status
* @param[in] scsi_status SCSI status returned by target
* @param[in] sns_len SCSI sense length, 0 if none
* @param[in] sns_info SCSI sense data, if any
* @param[in] residue Residual length
*
* @return None
*/
void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
enum bfi_ioim_status io_status,
u8 scsi_status, int sns_len,
u8 *sns_info, s32 residue);
/**
* I/O good completion notification.
*
* @param[in] dio driver IO structure
*
* @return None
*/
void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
/**
* I/O abort completion notification
*
* @param[in] dio driver IO that was aborted
*
* @return None
*/
void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
/*
* bfa tskim API functions
*/
struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
struct bfad_tskim_s *dtsk);
void bfa_tskim_free(struct bfa_tskim_s *tskim);
void bfa_tskim_start(struct bfa_tskim_s *tskim,
struct bfa_itnim_s *itnim, lun_t lun,
enum fcp_tm_cmnd tm, u8 t_secs);
void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
enum bfi_tskim_status tsk_status);
#endif /* __BFA_FCPIM_H__ */

Просмотреть файл

@ -1,192 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_FCPIM_PRIV_H__
#define __BFA_FCPIM_PRIV_H__
#include <bfa_fcpim.h>
#include <defs/bfa_defs_fcpim.h>
#include <cs/bfa_wc.h>
#include "bfa_sgpg_priv.h"
#define BFA_ITNIM_MIN 32
#define BFA_ITNIM_MAX 1024
#define BFA_IOIM_MIN 8
#define BFA_IOIM_MAX 2000
#define BFA_TSKIM_MIN 4
#define BFA_TSKIM_MAX 512
#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */
#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */
#define bfa_fcpim_stats(__fcpim, __stats) \
((__fcpim)->stats.__stats++)
struct bfa_fcpim_mod_s {
struct bfa_s *bfa;
struct bfa_itnim_s *itnim_arr;
struct bfa_ioim_s *ioim_arr;
struct bfa_ioim_sp_s *ioim_sp_arr;
struct bfa_tskim_s *tskim_arr;
struct bfa_dma_s snsbase;
int num_itnims;
int num_ioim_reqs;
int num_tskim_reqs;
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
u8 rsvd;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_free_q; /* free IO resources */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
struct list_head tskim_free_q;
u32 ios_active; /* current active IOs */
u32 delay_comp;
struct bfa_fcpim_stats_s stats;
bfa_boolean_t ioredirect;
};
struct bfa_ioim_s;
struct bfa_tskim_s;
/**
* BFA IO (initiator mode)
*/
struct bfa_ioim_s {
struct list_head qe; /* queue elememt */
bfa_sm_t sm; /* BFA ioim state machine */
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
struct bfad_ioim_s *dio; /* driver IO handle */
u16 iotag; /* FWI IO tag */
u16 abort_tag; /* unqiue abort request tag */
u16 nsges; /* number of SG elements */
u16 nsgpgs; /* number of SG pages */
struct bfa_sgpg_s *sgpg; /* first SG page */
struct list_head sgpg_q; /* allocated SG pages */
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
u8 reqq; /* Request queue for I/O */
};
struct bfa_ioim_sp_s {
struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
u8 *snsinfo; /* sense info for this IO */
struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
bfa_boolean_t abort_explicit; /* aborted by OS */
struct bfa_tskim_s *tskim; /* Relevant TM cmd */
};
/**
* BFA Task management command (initiator mode)
*/
struct bfa_tskim_s {
struct list_head qe;
bfa_sm_t sm;
struct bfa_s *bfa; /* BFA module */
struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
bfa_boolean_t notify; /* notify itnim on TM comp */
lun_t lun; /* lun if applicable */
enum fcp_tm_cmnd tm_cmnd; /* task management command */
u16 tsk_tag; /* FWI IO tag */
u8 tsecs; /* timeout in seconds */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct list_head io_q; /* queue of affected IOs */
struct bfa_wc_s wc; /* waiting counter */
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
enum bfi_tskim_status tsk_status; /* TM status */
};
/**
* BFA i-t-n (initiator mode)
*/
struct bfa_itnim_s {
struct list_head qe; /* queue element */
bfa_sm_t sm; /* i-t-n im BFA state machine */
struct bfa_s *bfa; /* bfa instance */
struct bfa_rport_s *rport; /* bfa rport */
void *ditn; /* driver i-t-n structure */
struct bfi_mhdr_s mhdr; /* pre-built mhdr */
u8 msg_no; /* itnim/rport firmware handle */
u8 reqq; /* CQ for requests */
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
struct list_head pending_q; /* queue of pending IO requests*/
struct list_head io_q; /* queue of active IO requests */
struct list_head io_cleanup_q; /* IO being cleaned up */
struct list_head tsk_q; /* queue of active TM commands */
struct list_head delay_comp_q;/* queue of failed inflight cmds */
bfa_boolean_t seq_rec; /* SQER supported */
bfa_boolean_t is_online; /* itnim is ONLINE for IO */
bfa_boolean_t iotov_active; /* IO TOV timer is active */
struct bfa_wc_s wc; /* waiting counter */
struct bfa_timer_s timer; /* pending IO TOV */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
struct bfa_itnim_hal_stats_s stats;
struct bfa_itnim_latency_s io_latency;
};
#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
(&fcpim->ioim_arr[_iotag])
#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \
(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
/*
* function prototypes
*/
void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
struct bfi_msg_s *msg);
void bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
struct bfa_tskim_s *tskim);
void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
void bfa_ioim_tov(struct bfa_ioim_s *ioim);
void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
struct bfa_meminfo_s *minfo);
void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
void bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
#endif /* __BFA_FCPIM_PRIV_H__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

779
drivers/scsi/bfa/bfa_fcs.h Normal file
Просмотреть файл

@ -0,0 +1,779 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_FCS_H__
#define __BFA_FCS_H__
#include "bfa_cs.h"
#include "bfa_defs.h"
#include "bfa_defs_fcs.h"
#include "bfa_modules.h"
#include "bfa_fc.h"
#define BFA_FCS_OS_STR_LEN 64
/*
* !!! Only append to the enums defined here to avoid any versioning
* !!! needed between trace utility and driver version
*/
enum {
BFA_TRC_FCS_FCS = 1,
BFA_TRC_FCS_PORT = 2,
BFA_TRC_FCS_RPORT = 3,
BFA_TRC_FCS_FCPIM = 4,
};
struct bfa_fcs_s;
#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg)
void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
#define N2N_LOCAL_PID 0x010000
#define N2N_REMOTE_PID 0x020000
#define BFA_FCS_RETRY_TIMEOUT 2000
#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
struct bfa_fcs_lport_ns_s {
bfa_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
};
struct bfa_fcs_lport_scn_s {
bfa_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
};
struct bfa_fcs_lport_fdmi_s {
bfa_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_ms_s *ms; /* parent ms */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
u8 retry_cnt; /* retry count */
u8 rsvd[3];
};
struct bfa_fcs_lport_ms_s {
bfa_sm_t sm; /* state machine */
struct bfa_timer_s timer;
struct bfa_fcs_lport_s *port; /* parent port */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
struct bfa_fcs_lport_fdmi_s fdmi; /* FDMI component of MS */
u8 retry_cnt; /* retry count */
u8 rsvd[3];
};
struct bfa_fcs_lport_fab_s {
struct bfa_fcs_lport_ns_s ns; /* NS component of port */
struct bfa_fcs_lport_scn_s scn; /* scn component of port */
struct bfa_fcs_lport_ms_s ms; /* MS component of port */
};
#define MAX_ALPA_COUNT 127
struct bfa_fcs_lport_loop_s {
u8 num_alpa; /* Num of ALPA entries in the map */
u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional
*Map */
struct bfa_fcs_lport_s *port; /* parent port */
};
struct bfa_fcs_lport_n2n_s {
u32 rsvd;
u16 reply_oxid; /* ox_id from the req flogi to be
*used in flogi acc */
wwn_t rem_port_wwn; /* Attached port's wwn */
};
union bfa_fcs_lport_topo_u {
struct bfa_fcs_lport_fab_s pfab;
struct bfa_fcs_lport_loop_s ploop;
struct bfa_fcs_lport_n2n_s pn2n;
};
struct bfa_fcs_lport_s {
struct list_head qe; /* used by port/vport */
bfa_sm_t sm; /* state machine */
struct bfa_fcs_fabric_s *fabric; /* parent fabric */
struct bfa_lport_cfg_s port_cfg; /* port configuration */
struct bfa_timer_s link_timer; /* timer for link offline */
u32 pid:24; /* FC address */
u8 lp_tag; /* lport tag */
u16 num_rports; /* Num of r-ports */
struct list_head rport_q; /* queue of discovered r-ports */
struct bfa_fcs_s *fcs; /* FCS instance */
union bfa_fcs_lport_topo_u port_topo; /* fabric/loop/n2n details */
struct bfad_port_s *bfad_port; /* driver peer instance */
struct bfa_fcs_vport_s *vport; /* NULL for base ports */
struct bfa_fcxp_s *fcxp;
struct bfa_fcxp_wqe_s fcxp_wqe;
struct bfa_lport_stats_s stats;
struct bfa_wc_s wc; /* waiting counter for events */
};
#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa)
#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns)
#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn)
#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms)
#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi)
#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
(port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
/*
* forward declaration
*/
struct bfad_vf_s;
enum bfa_fcs_fabric_type {
BFA_FCS_FABRIC_UNKNOWN = 0,
BFA_FCS_FABRIC_SWITCHED = 1,
BFA_FCS_FABRIC_N2N = 2,
};
struct bfa_fcs_fabric_s {
struct list_head qe; /* queue element */
bfa_sm_t sm; /* state machine */
struct bfa_fcs_s *fcs; /* FCS instance */
struct bfa_fcs_lport_s bport; /* base logical port */
enum bfa_fcs_fabric_type fab_type; /* fabric type */
enum bfa_port_type oper_type; /* current link topology */
u8 is_vf; /* is virtual fabric? */
u8 is_npiv; /* is NPIV supported ? */
u8 is_auth; /* is Security/Auth supported ? */
u16 bb_credit; /* BB credit from fabric */
u16 vf_id; /* virtual fabric ID */
u16 num_vports; /* num vports */
u16 rsvd;
struct list_head vport_q; /* queue of virtual ports */
struct list_head vf_q; /* queue of virtual fabrics */
struct bfad_vf_s *vf_drv; /* driver vf structure */
struct bfa_timer_s link_timer; /* Link Failure timer. Vport */
wwn_t fabric_name; /* attached fabric name */
bfa_boolean_t auth_reqd; /* authentication required */
struct bfa_timer_s delay_timer; /* delay timer */
union {
u16 swp_vfid;/* switch port VF id */
} event_arg;
struct bfa_wc_s wc; /* wait counter for delete */
struct bfa_vf_stats_s stats; /* fabric/vf stats */
struct bfa_lps_s *lps; /* lport login services */
u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
/* attached fabric's ip addr */
};
#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
#define bfa_fcs_fabric_is_switched(__f) \
((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
/**
* The design calls for a single implementation of base fabric and vf.
*/
#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
struct bfa_vf_event_s {
u32 undefined;
};
struct bfa_fcs_s;
struct bfa_fcs_fabric_s;
/*
* @todo : need to move to a global config file.
* Maximum Rports supported per port (physical/logical).
*/
#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
#define bfa_fcs_lport_t struct bfa_fcs_lport_s
/**
* Symbolic Name related defines
* Total bytes 255.
* Physical Port's symbolic name 128 bytes.
* For Vports, Vport's symbolic name is appended to the Physical port's
* Symbolic Name.
*
* Physical Port's symbolic name Format : (Total 128 bytes)
* Adapter Model number/name : 12 bytes
* Driver Version : 10 bytes
* Host Machine Name : 30 bytes
* Host OS Info : 48 bytes
* Host OS PATCH Info : 16 bytes
* ( remaining 12 bytes reserved to be used for separator)
*/
#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | "
#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12
#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10
#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30
#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
/**
* Get FC port ID for a logical port.
*/
#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
#define bfa_fcs_lport_is_initiator(_lport) \
((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
#define bfa_fcs_lport_get_nrports(_lport) \
((_lport) ? (_lport)->num_rports : 0)
static inline struct bfad_port_s *
bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
{
return port->bfad_port;
}
#define bfa_fcs_lport_get_opertype(_lport) ((_lport)->fabric->oper_type)
#define bfa_fcs_lport_get_fabric_name(_lport) ((_lport)->fabric->fabric_name)
#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \
((_lport)->fabric->fabric_ip_addr)
/**
* bfa fcs port public functions
*/
bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
wwn_t rport_wwns[], int *nrports);
wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
int index, int nrports, bfa_boolean_t bwwn);
struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
u16 vf_id, wwn_t lpwwn);
void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
struct bfa_lport_info_s *port_info);
void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
struct bfa_lport_attr_s *port_attr);
void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
struct bfa_lport_stats_s *port_stats);
void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port);
enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed(
struct bfa_fcs_lport_s *port);
/* MS FCS routines */
void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port);
/* FDMI FCS routines */
void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms);
void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms);
void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms);
void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs,
u16 len);
void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
u16 vf_id, struct bfa_fcs_vport_s *vport);
void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
struct bfa_lport_cfg_s *port_cfg);
void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
struct bfa_fcs_lport_s *port, u32 pid);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
struct bfa_fcs_lport_s *port, wwn_t pwwn);
struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
struct bfa_fcs_lport_s *port, wwn_t nwwn);
void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *rport);
void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *rport);
void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_frame, u32 len);
struct bfa_fcs_vport_s {
struct list_head qe; /* queue elem */
bfa_sm_t sm; /* state machine */
bfa_fcs_lport_t lport; /* logical port */
struct bfa_timer_s timer;
struct bfad_vport_s *vport_drv; /* Driver private */
struct bfa_vport_stats_s vport_stats; /* vport statistics */
struct bfa_lps_s *lps; /* Lport login service*/
int fdisc_retries;
};
#define bfa_fcs_vport_get_port(vport) \
((struct bfa_fcs_lport_s *)(&vport->port))
/**
* bfa fcs vport public functions
*/
bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
struct bfa_fcs_s *fcs, u16 vf_id,
struct bfa_lport_cfg_s *port_cfg,
struct bfad_vport_s *vport_drv);
bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
struct bfa_fcs_s *fcs, u16 vf_id,
struct bfa_lport_cfg_s *port_cfg,
struct bfad_vport_s *vport_drv);
bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport);
bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
struct bfa_vport_attr_s *vport_attr);
void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
struct bfa_vport_stats_s *vport_stats);
void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
u16 vf_id, wwn_t vpwwn);
void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
#define BFA_FCS_RPORT_MAX_RETRIES (5)
/*
* forward declarations
*/
struct bfad_rport_s;
struct bfa_fcs_itnim_s;
struct bfa_fcs_tin_s;
struct bfa_fcs_iprp_s;
/* Rport Features (RPF) */
struct bfa_fcs_rpf_s {
bfa_sm_t sm; /* state machine */
struct bfa_fcs_rport_s *rport; /* parent rport */
struct bfa_timer_s timer; /* general purpose timer */
struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
int rpsc_retries; /* max RPSC retry attempts */
enum bfa_port_speed rpsc_speed;
/* Current Speed from RPSC. O if RPSC fails */
enum bfa_port_speed assigned_speed;
/**
* Speed assigned by the user. will be used if RPSC is
* not supported by the rport.
*/
};
struct bfa_fcs_rport_s {
struct list_head qe; /* used by port/vport */
struct bfa_fcs_lport_s *port; /* parent FCS port */
struct bfa_fcs_s *fcs; /* fcs instance */
struct bfad_rport_s *rp_drv; /* driver peer instance */
u32 pid; /* port ID of rport */
u16 maxfrsize; /* maximum frame size */
u16 reply_oxid; /* OX_ID of inbound requests */
enum fc_cos fc_cos; /* FC classes of service supp */
bfa_boolean_t cisc; /* CISC capable device */
bfa_boolean_t prlo; /* processing prlo or LOGO */
wwn_t pwwn; /* port wwn of rport */
wwn_t nwwn; /* node wwn of rport */
struct bfa_rport_symname_s psym_name; /* port symbolic name */
bfa_sm_t sm; /* state machine */
struct bfa_timer_s timer; /* general purpose timer */
struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */
struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */
struct bfa_fcs_iprp_s *iprp; /* IP/FC role */
struct bfa_rport_s *bfa_rport; /* BFA Rport */
struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */
int plogi_retries; /* max plogi retry attempts */
int ns_retries; /* max NS query retry attempts */
struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */
struct bfa_rport_stats_s stats; /* rport stats */
enum bfa_rport_function scsi_function; /* Initiator/Target */
struct bfa_fcs_rpf_s rpf; /* Rport features module */
};
static inline struct bfa_rport_s *
bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
{
return rport->bfa_rport;
}
/**
* bfa fcs rport API functions
*/
bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
struct bfa_fcs_rport_s *rport,
struct bfad_rport_s *rport_drv);
bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
struct bfa_rport_attr_s *attr);
void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
struct bfa_rport_stats_s *stats);
void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
wwn_t rpwwn);
struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
struct bfa_fcs_lport_s *port, wwn_t rnwwn);
void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
enum bfa_port_speed speed);
void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
struct fchs_s *fchs, u16 len);
void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
u32 pid);
void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
struct fc_logi_s *plogi_rsp);
void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs,
struct fc_logi_s *plogi);
void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
struct fc_logi_s *plogi);
void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(
struct bfa_fcs_lport_s *port, wwn_t wwn);
void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
/*
* forward declarations
*/
struct bfad_itnim_s;
struct bfa_fcs_itnim_s {
bfa_sm_t sm; /* state machine */
struct bfa_fcs_rport_s *rport; /* parent remote rport */
struct bfad_itnim_s *itnim_drv; /* driver peer instance */
struct bfa_fcs_s *fcs; /* fcs instance */
struct bfa_timer_s timer; /* timer functions */
struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */
u32 prli_retries; /* max prli retry attempts */
bfa_boolean_t seq_rec; /* seq recovery support */
bfa_boolean_t rec_support; /* REC supported */
bfa_boolean_t conf_comp; /* FCP_CONF support */
bfa_boolean_t task_retry_id; /* task retry id supp */
struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */
struct bfa_fcxp_s *fcxp; /* FCXP in use */
struct bfa_itnim_stats_s stats; /* itn statistics */
};
#define bfa_fcs_fcxp_alloc(__fcs) \
bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
NULL, 0, 0, NULL, NULL, NULL, NULL)
static inline struct bfad_port_s *
bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->port->bfad_port;
}
static inline struct bfa_fcs_lport_s *
bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->port;
}
static inline wwn_t
bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->nwwn;
}
static inline wwn_t
bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->pwwn;
}
static inline u32
bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->pid;
}
static inline u32
bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->maxfrsize;
}
static inline enum fc_cos
bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
{
return itnim->rport->fc_cos;
}
static inline struct bfad_itnim_s *
bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
{
return itnim->itnim_drv;
}
static inline struct bfa_itnim_s *
bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
{
return itnim->bfa_itnim;
}
/**
* bfa fcs FCP Initiator mode API functions
*/
void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
struct bfa_itnim_attr_s *attr);
void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
struct bfa_itnim_stats_s *stats);
struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port,
wwn_t rpwwn);
bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
struct bfa_itnim_attr_s *attr);
bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
struct bfa_itnim_stats_s *stats);
bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
wwn_t rpwwn);
struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
struct fchs_s *fchs, u16 len);
#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
FDMI_TRANS_SPEED_2G | \
FDMI_TRANS_SPEED_4G | \
FDMI_TRANS_SPEED_8G)
/*
* HBA Attribute Block : BFA internal representation. Note : Some variable
* sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
* on this the size has been reduced to 16 bytes from the standard's 64 bytes.
*/
struct bfa_fcs_fdmi_hba_attr_s {
wwn_t node_name;
u8 manufacturer[64];
u8 serial_num[64];
u8 model[16];
u8 model_desc[256];
u8 hw_version[8];
u8 driver_version[8];
u8 option_rom_ver[BFA_VERSION_LEN];
u8 fw_version[8];
u8 os_name[256];
u32 max_ct_pyld;
};
/*
* Port Attribute Block
*/
struct bfa_fcs_fdmi_port_attr_s {
u8 supp_fc4_types[32]; /* supported FC4 types */
u32 supp_speed; /* supported speed */
u32 curr_speed; /* current Speed */
u32 max_frm_size; /* max frame size */
u8 os_device_name[256]; /* OS device Name */
u8 host_name[256]; /* host name */
};
struct bfa_fcs_stats_s {
struct {
u32 untagged; /* untagged receive frames */
u32 tagged; /* tagged receive frames */
u32 vfid_unknown; /* VF id is unknown */
} uf;
};
struct bfa_fcs_driver_info_s {
u8 version[BFA_VERSION_LEN]; /* Driver Version */
u8 host_machine_name[BFA_FCS_OS_STR_LEN];
u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
u8 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */
u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
};
struct bfa_fcs_s {
struct bfa_s *bfa; /* corresponding BFA bfa instance */
struct bfad_s *bfad; /* corresponding BDA driver instance */
struct bfa_trc_mod_s *trcmod; /* tracing module */
bfa_boolean_t vf_enabled; /* VF mode is enabled */
bfa_boolean_t fdmi_enabled; /* FDMI is enabled */
bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
u16 port_vfid; /* port default VF ID */
struct bfa_fcs_driver_info_s driver_info;
struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
struct bfa_fcs_stats_s stats; /* FCS statistics */
struct bfa_wc_s wc; /* waiting counter */
};
/*
* bfa fcs API functions
*/
void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
struct bfad_s *bfad,
bfa_boolean_t min_cfg);
void bfa_fcs_init(struct bfa_fcs_s *fcs);
void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
struct bfa_fcs_driver_info_s *driver_info);
void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
void bfa_fcs_exit(struct bfa_fcs_s *fcs);
void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
void bfa_fcs_start(struct bfa_fcs_s *fcs);
/**
* bfa fcs vf public functions
*/
bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
struct bfad_vf_s *vf_drv);
bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
struct bfa_vf_stats_s *vf_stats);
void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
/*
* fabric protected interface functions
*/
void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
struct bfa_fcs_vport_s *vport);
void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
struct bfa_fcs_vport_s *vport);
int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
struct fchs_s *fchs, u16 len);
bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
struct bfad_vf_s *vf_drv);
void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
wwn_t fabric_name);
u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
/**
* BFA FCS callback interfaces
*/
/**
* fcb Main fcs callbacks
*/
struct bfad_port_s;
struct bfad_vf_s;
struct bfad_vport_s;
struct bfad_rport_s;
/**
* lport callbacks
*/
struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
struct bfa_fcs_lport_s *port,
enum bfa_lport_role roles,
struct bfad_vf_s *vf_drv,
struct bfad_vport_s *vp_drv);
void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
struct bfad_vf_s *vf_drv,
struct bfad_vport_s *vp_drv);
/**
* vport callbacks
*/
void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
/**
* rport callbacks
*/
bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
struct bfa_fcs_rport_s **rport,
struct bfad_rport_s **rport_drv);
/**
* itnim callbacks
*/
void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
struct bfad_itnim_s **itnim_drv);
void bfa_fcb_itnim_free(struct bfad_s *bfad,
struct bfad_itnim_s *itnim_drv);
void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
#endif /* __BFA_FCS_H__ */

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -19,36 +19,24 @@
* fcpim.c - FCP initiator mode i-t nexus state machine
*/
#include <bfa.h>
#include <bfa_svc.h>
#include "fcs_fcpim.h"
#include "fcs_rport.h"
#include "fcs_lport.h"
#include "fcs_trcmod.h"
#include "fcs_fcxp.h"
#include "fcs.h"
#include <fcs/bfa_fcs_fcpim.h>
#include <fcb/bfa_fcb_fcpim.h>
#include <aen/bfa_aen_itnim.h>
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfad_drv.h"
#include "bfad_im.h"
BFA_TRC_FILE(FCS, FCPIM);
/*
* forward declarations
*/
static void bfa_fcs_itnim_timeout(void *arg);
static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
static void bfa_fcs_itnim_send_prli(void *itnim_cbarg,
static void bfa_fcs_itnim_timeout(void *arg);
static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
static void bfa_fcs_itnim_send_prli(void *itnim_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_itnim_prli_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
enum bfa_itnim_aen_event event);
static void bfa_fcs_itnim_prli_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
/**
* fcs_itnim_sm FCS itnim state machine events
@ -61,28 +49,28 @@ enum bfa_fcs_itnim_event {
BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
};
static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event);
static struct bfa_sm_table_s itnim_sm_table[] = {
@ -102,7 +90,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
static void
bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
enum bfa_fcs_itnim_event event)
{
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
@ -134,7 +122,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
static void
bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
enum bfa_fcs_itnim_event event)
{
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
@ -168,7 +156,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
static void
bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
enum bfa_fcs_itnim_event event)
{
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
@ -233,6 +221,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
}
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
bfa_timer_stop(&itnim->timer);
@ -259,6 +248,10 @@ static void
bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
{
struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
char rpwwn_buf[BFA_STRING_32];
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
@ -266,7 +259,11 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
case BFA_FCS_ITNIM_SM_HCB_ONLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
bfa_fcb_itnim_online(itnim->itnim_drv);
bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
wwn2str(rpwwn_buf, itnim->rport->pwwn);
BFA_LOG(KERN_INFO, bfad, log_level,
"Target (WWN = %s) is online for initiator (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
@ -287,8 +284,12 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
static void
bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
enum bfa_fcs_itnim_event event)
{
struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
char rpwwn_buf[BFA_STRING_32];
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
@ -297,10 +298,16 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
bfa_fcb_itnim_offline(itnim->itnim_drv);
bfa_itnim_offline(itnim->bfa_itnim);
if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE)
bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
wwn2str(rpwwn_buf, itnim->rport->pwwn);
if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
BFA_LOG(KERN_ERR, bfad, log_level,
"Target (WWN = %s) connectivity lost for "
"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
else
bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
BFA_LOG(KERN_INFO, bfad, log_level,
"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
break;
case BFA_FCS_ITNIM_SM_DELETE:
@ -343,7 +350,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
*/
static void
bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
enum bfa_fcs_itnim_event event)
enum bfa_fcs_itnim_event event)
{
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_trc(itnim->fcs, event);
@ -369,71 +376,34 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
}
}
/**
* itnim_private FCS ITNIM private interfaces
*/
static void
bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
enum bfa_itnim_aen_event event)
{
struct bfa_fcs_rport_s *rport = itnim->rport;
union bfa_aen_data_u aen_data;
struct bfa_log_mod_s *logmod = rport->fcs->logm;
wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port);
wwn_t rpwwn = rport->pwwn;
char lpwwn_ptr[BFA_STRING_32];
char rpwwn_ptr[BFA_STRING_32];
/*
* Don't post events for well known addresses
*/
if (BFA_FCS_PID_IS_WKA(rport->pid))
return;
wwn2str(lpwwn_ptr, lpwwn);
wwn2str(rpwwn_ptr, rpwwn);
bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
rpwwn_ptr, lpwwn_ptr);
aen_data.itnim.vf_id = rport->port->fabric->vf_id;
aen_data.itnim.ppwwn =
bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(itnim->fcs));
aen_data.itnim.lpwwn = lpwwn;
aen_data.itnim.rpwwn = rpwwn;
}
static void
bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
struct bfa_fcs_rport_s *rport = itnim->rport;
struct bfa_fcs_port_s *port = rport->port;
struct fchs_s fchs;
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
int len;
int len;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
itnim->stats.fcxp_alloc_wait++;
bfa_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
bfa_fcs_itnim_send_prli, itnim);
return;
}
itnim->fcxp = fcxp;
len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid,
bfa_fcs_port_get_fcid(port), 0);
len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0);
bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs,
bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ,
FC_ELS_TOV);
bfa_fcs_itnim_prli_response, (void *)itnim,
FC_MAX_PDUSZ, FC_ELS_TOV);
itnim->stats.prli_sent++;
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
@ -444,10 +414,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg;
struct fc_els_cmd_s *els_cmd;
struct fc_prli_s *prli_resp;
struct fc_ls_rjt_s *ls_rjt;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
struct fc_els_cmd_s *els_cmd;
struct fc_prli_s *prli_resp;
struct fc_ls_rjt_s *ls_rjt;
struct fc_prli_params_s *sparams;
bfa_trc(itnim->fcs, req_status);
@ -475,7 +445,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
if (prli_resp->parampage.servparams.initiator) {
bfa_trc(itnim->fcs, prli_resp->parampage.type);
itnim->rport->scsi_function =
BFA_RPORT_INITIATOR;
BFA_RPORT_INITIATOR;
itnim->stats.prli_rsp_acc++;
bfa_sm_send_event(itnim,
BFA_FCS_ITNIM_SM_RSP_OK);
@ -488,10 +458,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
itnim->rport->scsi_function = BFA_RPORT_TARGET;
sparams = &prli_resp->parampage.servparams;
itnim->seq_rec = sparams->retry;
itnim->rec_support = sparams->rec_support;
itnim->seq_rec = sparams->retry;
itnim->rec_support = sparams->rec_support;
itnim->task_retry_id = sparams->task_retry_id;
itnim->conf_comp = sparams->confirm;
itnim->conf_comp = sparams->confirm;
itnim->stats.prli_rsp_acc++;
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
@ -509,7 +479,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
static void
bfa_fcs_itnim_timeout(void *arg)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)arg;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg;
itnim->stats.timeout++;
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
@ -529,16 +499,16 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
*/
/**
* Called by rport when a new rport is created.
* Called by rport when a new rport is created.
*
* @param[in] rport - remote port.
*/
struct bfa_fcs_itnim_s *
bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_port_s *port = rport->port;
struct bfa_fcs_lport_s *port = rport->port;
struct bfa_fcs_itnim_s *itnim;
struct bfad_itnim_s *itnim_drv;
struct bfad_itnim_s *itnim_drv;
struct bfa_itnim_s *bfa_itnim;
/*
@ -560,7 +530,8 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
/*
* call BFA to create the itnim
*/
bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
bfa_itnim =
bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
if (bfa_itnim == NULL) {
bfa_trc(port->fcs, rport->pwwn);
@ -569,10 +540,10 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
return NULL;
}
itnim->bfa_itnim = bfa_itnim;
itnim->seq_rec = BFA_FALSE;
itnim->rec_support = BFA_FALSE;
itnim->conf_comp = BFA_FALSE;
itnim->bfa_itnim = bfa_itnim;
itnim->seq_rec = BFA_FALSE;
itnim->rec_support = BFA_FALSE;
itnim->conf_comp = BFA_FALSE;
itnim->task_retry_id = BFA_FALSE;
/*
@ -584,7 +555,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
}
/**
* Called by rport to delete the instance of FCPIM.
* Called by rport to delete the instance of FCPIM.
*
* @param[in] rport - remote port.
*/
@ -607,8 +578,8 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
} else {
/*
* For well known addresses, we set the itnim to initiator
* state
* For well known addresses, we set the itnim to initiator
* state
*/
itnim->stats.initiator++;
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
@ -651,7 +622,6 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
default:
return BFA_STATUS_NO_FCPIM_NEXUS;
}
}
@ -661,7 +631,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
void
bfa_cb_itnim_online(void *cbarg)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
@ -673,7 +643,7 @@ bfa_cb_itnim_online(void *cbarg)
void
bfa_cb_itnim_offline(void *cb_arg)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
@ -686,7 +656,7 @@ bfa_cb_itnim_offline(void *cb_arg)
void
bfa_cb_itnim_tov_begin(void *cb_arg)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
}
@ -697,14 +667,15 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
void
bfa_cb_itnim_tov(void *cb_arg)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
struct bfad_itnim_s *itnim_drv = itnim->itnim_drv;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
bfa_fcb_itnim_tov(itnim->itnim_drv);
itnim_drv->state = ITNIM_STATE_TIMEOUT;
}
/**
* BFA notification to FCS/driver for second level error recovery.
* BFA notification to FCS/driver for second level error recovery.
*
* Atleast one I/O request has timedout and target is unresponsive to
* repeated abort requests. Second level error recovery should be initiated
@ -713,7 +684,7 @@ bfa_cb_itnim_tov(void *cb_arg)
void
bfa_cb_itnim_sler(void *cb_arg)
{
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
itnim->stats.sler++;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
@ -721,7 +692,7 @@ bfa_cb_itnim_sler(void *cb_arg)
}
struct bfa_fcs_itnim_s *
bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
{
struct bfa_fcs_rport_s *rport;
rport = bfa_fcs_rport_lookup(port, rpwwn);
@ -734,7 +705,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
}
bfa_status_t
bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
struct bfa_itnim_attr_s *attr)
{
struct bfa_fcs_itnim_s *itnim = NULL;
@ -744,18 +715,16 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
if (itnim == NULL)
return BFA_STATUS_NO_FCPIM_NEXUS;
attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
attr->retry = itnim->seq_rec;
attr->rec_support = itnim->rec_support;
attr->conf_comp = itnim->conf_comp;
attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
attr->retry = itnim->seq_rec;
attr->rec_support = itnim->rec_support;
attr->conf_comp = itnim->conf_comp;
attr->task_retry_id = itnim->task_retry_id;
bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s));
return BFA_STATUS_OK;
}
bfa_status_t
bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
struct bfa_itnim_stats_s *stats)
{
struct bfa_fcs_itnim_s *itnim = NULL;
@ -773,7 +742,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
}
bfa_status_t
bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
{
struct bfa_fcs_itnim_s *itnim = NULL;
@ -789,10 +758,10 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
}
void
bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
u16 len)
bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
struct fchs_s *fchs, u16 len)
{
struct fc_els_cmd_s *els_cmd;
struct fc_els_cmd_s *els_cmd;
bfa_trc(itnim->fcs, fchs->type);
@ -812,13 +781,3 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
bfa_assert(0);
}
}
void
bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim)
{
}
void
bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
{
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,61 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfa_fcs_pport.c BFA FCS PPORT ( physical port)
*/
#include <fcs/bfa_fcs.h>
#include <bfa_svc.h>
#include <fcs/bfa_fcs_fabric.h>
#include "fcs_trcmod.h"
#include "fcs.h"
#include "fcs_fabric.h"
#include "fcs_port.h"
BFA_TRC_FILE(FCS, PPORT);
static void
bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
{
struct bfa_fcs_s *fcs = cbarg;
bfa_trc(fcs, event);
switch (event) {
case BFA_PPORT_LINKUP:
bfa_fcs_fabric_link_up(&fcs->fabric);
break;
case BFA_PPORT_LINKDOWN:
bfa_fcs_fabric_link_down(&fcs->fabric);
break;
case BFA_PPORT_TRUNK_LINKDOWN:
bfa_assert(0);
break;
default:
bfa_assert(0);
}
}
void
bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
{
bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,99 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames)
*/
#include <fcs/bfa_fcs.h>
#include <bfa_svc.h>
#include <fcs/bfa_fcs_fabric.h>
#include "fcs.h"
#include "fcs_trcmod.h"
#include "fcs_fabric.h"
#include "fcs_uf.h"
BFA_TRC_FILE(FCS, UF);
/**
* BFA callback for unsolicited frame receive handler.
*
* @param[in] cbarg callback arg for receive handler
* @param[in] uf unsolicited frame descriptor
*
* @return None
*/
static void
bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
{
struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg;
struct fchs_s *fchs = bfa_uf_get_frmbuf(uf);
u16 len = bfa_uf_get_frmlen(uf);
struct fc_vft_s *vft;
struct bfa_fcs_fabric_s *fabric;
/**
* check for VFT header
*/
if (fchs->routing == FC_RTG_EXT_HDR &&
fchs->cat_info == FC_CAT_VFT_HDR) {
bfa_stats(fcs, uf.tagged);
vft = bfa_uf_get_frmbuf(uf);
if (fcs->port_vfid == vft->vf_id)
fabric = &fcs->fabric;
else
fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
/**
* drop frame if vfid is unknown
*/
if (!fabric) {
bfa_assert(0);
bfa_stats(fcs, uf.vfid_unknown);
bfa_uf_free(uf);
return;
}
/**
* skip vft header
*/
fchs = (struct fchs_s *) (vft + 1);
len -= sizeof(struct fc_vft_s);
bfa_trc(fcs, vft->vf_id);
} else {
bfa_stats(fcs, uf.untagged);
fabric = &fcs->fabric;
}
bfa_trc(fcs, ((u32 *) fchs)[0]);
bfa_trc(fcs, ((u32 *) fchs)[1]);
bfa_trc(fcs, ((u32 *) fchs)[2]);
bfa_trc(fcs, ((u32 *) fchs)[3]);
bfa_trc(fcs, ((u32 *) fchs)[4]);
bfa_trc(fcs, ((u32 *) fchs)[5]);
bfa_trc(fcs, len);
bfa_fcs_fabric_uf_recv(fabric, fchs, len);
bfa_uf_free(uf);
}
void
bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
{
bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
}

Просмотреть файл

@ -1,774 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <bfi/bfi_uf.h>
#include <cs/bfa_debug.h>
BFA_TRC_FILE(HAL, FCXP);
BFA_MODULE(fcxp);
/**
* forward declarations
*/
static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_rsp_s *fcxp_rsp);
static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
static void bfa_fcxp_qresume(void *cbarg);
static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_req_s *send_req);
/**
* fcxp_pvt BFA FCXP private functions
*/
static void
claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
{
u8 *dm_kva = NULL;
u64 dm_pa;
u32 buf_pool_sz;
dm_kva = bfa_meminfo_dma_virt(mi);
dm_pa = bfa_meminfo_dma_phys(mi);
buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
/*
* Initialize the fcxp req payload list
*/
mod->req_pld_list_kva = dm_kva;
mod->req_pld_list_pa = dm_pa;
dm_kva += buf_pool_sz;
dm_pa += buf_pool_sz;
bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
/*
* Initialize the fcxp rsp payload list
*/
buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
mod->rsp_pld_list_kva = dm_kva;
mod->rsp_pld_list_pa = dm_pa;
dm_kva += buf_pool_sz;
dm_pa += buf_pool_sz;
bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
bfa_meminfo_dma_virt(mi) = dm_kva;
bfa_meminfo_dma_phys(mi) = dm_pa;
}
static void
claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
{
u16 i;
struct bfa_fcxp_s *fcxp;
fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
INIT_LIST_HEAD(&mod->fcxp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
mod->fcxp_list = fcxp;
for (i = 0; i < mod->num_fcxps; i++) {
fcxp->fcxp_mod = mod;
fcxp->fcxp_tag = i;
list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
fcxp->reqq_waiting = BFA_FALSE;
fcxp = fcxp + 1;
}
bfa_meminfo_kva(mi) = (void *)fcxp;
}
static void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
u32 *dm_len)
{
u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
if (num_fcxp_reqs == 0)
return;
/*
* Account for req/rsp payload
*/
*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
if (cfg->drvcfg.min_cfg)
*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
else
*dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
/*
* Account for fcxp structs
*/
*ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
}
static void
bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
mod->bfa = bfa;
mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
/**
* Initialize FCXP request and response payload sizes.
*/
mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
if (!cfg->drvcfg.min_cfg)
mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
INIT_LIST_HEAD(&mod->wait_q);
claim_fcxp_req_rsp_mem(mod, meminfo);
claim_fcxps_mem(mod, meminfo);
}
static void
bfa_fcxp_detach(struct bfa_s *bfa)
{
}
static void
bfa_fcxp_start(struct bfa_s *bfa)
{
}
static void
bfa_fcxp_stop(struct bfa_s *bfa)
{
}
static void
bfa_fcxp_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
struct bfa_fcxp_s *fcxp;
struct list_head *qe, *qen;
list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
fcxp = (struct bfa_fcxp_s *) qe;
if (fcxp->caller == NULL) {
fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
bfa_fcxp_free(fcxp);
} else {
fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
bfa_cb_queue(bfa, &fcxp->hcb_qe,
__bfa_fcxp_send_cbfn, fcxp);
}
}
}
static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
{
struct bfa_fcxp_s *fcxp;
bfa_q_deq(&fm->fcxp_free_q, &fcxp);
if (fcxp)
list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
return fcxp;
}
static void
bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
struct bfa_fcxp_wqe_s *wqe;
bfa_q_deq(&mod->wait_q, &wqe);
if (wqe) {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
return;
}
bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
list_del(&fcxp->qe);
list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
}
static void
bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
/* discarded fcxp completion */
}
static void
__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
{
struct bfa_fcxp_s *fcxp = cbarg;
if (complete) {
fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
fcxp->rsp_status, fcxp->rsp_len,
fcxp->residue_len, &fcxp->rsp_fchs);
} else {
bfa_fcxp_free(fcxp);
}
}
static void
hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
struct bfa_fcxp_s *fcxp;
u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
bfa_trc(bfa, fcxp_tag);
fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
/**
* @todo f/w should not set residue to non-0 when everything
* is received.
*/
if (fcxp_rsp->req_status == BFA_STATUS_OK)
fcxp_rsp->residue_len = 0;
else
fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
bfa_assert(fcxp->send_cbfn != NULL);
hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
if (fcxp->send_cbfn != NULL) {
if (fcxp->caller == NULL) {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
fcxp_rsp->req_status, fcxp_rsp->rsp_len,
fcxp_rsp->residue_len, &fcxp_rsp->fchs);
/*
* fcxp automatically freed on return from the callback
*/
bfa_fcxp_free(fcxp);
} else {
bfa_trc(mod->bfa, fcxp->fcxp_tag);
fcxp->rsp_status = fcxp_rsp->req_status;
fcxp->rsp_len = fcxp_rsp->rsp_len;
fcxp->residue_len = fcxp_rsp->residue_len;
fcxp->rsp_fchs = fcxp_rsp->fchs;
bfa_cb_queue(bfa, &fcxp->hcb_qe,
__bfa_fcxp_send_cbfn, fcxp);
}
} else {
bfa_trc(bfa, fcxp_tag);
}
}
static void
hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
{
union bfi_addr_u sga_zero = { {0} };
sge->sg_len = reqlen;
sge->flags = BFI_SGE_DATA_LAST;
bfa_dma_addr_set(sge[0].sga, req_pa);
bfa_sge_to_be(sge);
sge++;
sge->sga = sga_zero;
sge->sg_len = reqlen;
sge->flags = BFI_SGE_PGDLEN;
bfa_sge_to_be(sge);
}
static void
hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
struct fchs_s *fchs)
{
/*
* TODO: TX ox_id
*/
if (reqlen > 0) {
if (fcxp->use_ireqbuf) {
u32 pld_w0 =
*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_TX,
reqlen + sizeof(struct fchs_s), fchs, pld_w0);
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
fchs);
}
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
reqlen + sizeof(struct fchs_s), fchs);
}
}
static void
hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
struct bfi_fcxp_send_rsp_s *fcxp_rsp)
{
if (fcxp_rsp->rsp_len > 0) {
if (fcxp->use_irspbuf) {
u32 pld_w0 =
*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_RX,
(u16) fcxp_rsp->rsp_len,
&fcxp_rsp->fchs, pld_w0);
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
BFA_PL_EID_RX,
(u16) fcxp_rsp->rsp_len,
&fcxp_rsp->fchs);
}
} else {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
(u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
}
}
/**
* Handler to resume sending fcxp when space in available in cpe queue.
*/
static void
bfa_fcxp_qresume(void *cbarg)
{
struct bfa_fcxp_s *fcxp = cbarg;
struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
struct bfi_fcxp_send_req_s *send_req;
fcxp->reqq_waiting = BFA_FALSE;
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
bfa_fcxp_queue(fcxp, send_req);
}
/**
* Queue fcxp send request to foimrware.
*/
static void
bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
{
struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
struct bfa_rport_s *rport = reqi->bfa_rport;
bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
bfa_lpuid(bfa));
send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
if (rport) {
send_req->rport_fw_hndl = rport->fw_handle;
send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
if (send_req->max_frmsz == 0)
send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
} else {
send_req->rport_fw_hndl = 0;
send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
}
send_req->vf_id = bfa_os_htons(reqi->vf_id);
send_req->lp_tag = reqi->lp_tag;
send_req->class = reqi->class;
send_req->rsp_timeout = rspi->rsp_timeout;
send_req->cts = reqi->cts;
send_req->fchs = reqi->fchs;
send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
/*
* setup req sgles
*/
if (fcxp->use_ireqbuf == 1) {
hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
BFA_FCXP_REQ_PLD_PA(fcxp));
} else {
if (fcxp->nreq_sgles > 0) {
bfa_assert(fcxp->nreq_sgles == 1);
hal_fcxp_set_local_sges(send_req->req_sge,
reqi->req_tot_len,
fcxp->req_sga_cbfn(fcxp->caller,
0));
} else {
bfa_assert(reqi->req_tot_len == 0);
hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
}
}
/*
* setup rsp sgles
*/
if (fcxp->use_irspbuf == 1) {
bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
BFA_FCXP_RSP_PLD_PA(fcxp));
} else {
if (fcxp->nrsp_sgles > 0) {
bfa_assert(fcxp->nrsp_sgles == 1);
hal_fcxp_set_local_sges(send_req->rsp_sge,
rspi->rsp_maxlen,
fcxp->rsp_sga_cbfn(fcxp->caller,
0));
} else {
bfa_assert(rspi->rsp_maxlen == 0);
hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
}
}
hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
}
/**
* hal_fcxp_api BFA FCXP API
*/
/**
* Allocate an FCXP instance to send a response or to send a request
* that has a response. Request/response buffers are allocated by caller.
*
* @param[in] bfa BFA bfa instance
* @param[in] nreq_sgles Number of SG elements required for request
* buffer. 0, if fcxp internal buffers are used.
* Use bfa_fcxp_get_reqbuf() to get the
* internal req buffer.
* @param[in] req_sgles SG elements describing request buffer. Will be
* copied in by BFA and hence can be freed on
* return from this function.
* @param[in] get_req_sga function ptr to be called to get a request SG
* Address (given the sge index).
* @param[in] get_req_sglen function ptr to be called to get a request SG
* len (given the sge index).
* @param[in] get_rsp_sga function ptr to be called to get a response SG
* Address (given the sge index).
* @param[in] get_rsp_sglen function ptr to be called to get a response SG
* len (given the sge index).
*
* @return FCXP instance. NULL on failure.
*/
struct bfa_fcxp_s *
bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
bfa_fcxp_get_sglen_t req_sglen_cbfn,
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
{
struct bfa_fcxp_s *fcxp = NULL;
u32 nreq_sgpg, nrsp_sgpg;
bfa_assert(bfa != NULL);
fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
if (fcxp == NULL)
return NULL;
bfa_trc(bfa, fcxp->fcxp_tag);
fcxp->caller = caller;
if (nreq_sgles == 0) {
fcxp->use_ireqbuf = 1;
} else {
bfa_assert(req_sga_cbfn != NULL);
bfa_assert(req_sglen_cbfn != NULL);
fcxp->use_ireqbuf = 0;
fcxp->req_sga_cbfn = req_sga_cbfn;
fcxp->req_sglen_cbfn = req_sglen_cbfn;
fcxp->nreq_sgles = nreq_sgles;
/*
* alloc required sgpgs
*/
if (nreq_sgles > BFI_SGE_INLINE) {
nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
!= BFA_STATUS_OK) {
/*
* TODO
*/
}
}
}
if (nrsp_sgles == 0) {
fcxp->use_irspbuf = 1;
} else {
bfa_assert(rsp_sga_cbfn != NULL);
bfa_assert(rsp_sglen_cbfn != NULL);
fcxp->use_irspbuf = 0;
fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
fcxp->nrsp_sgles = nrsp_sgles;
/*
* alloc required sgpgs
*/
if (nrsp_sgles > BFI_SGE_INLINE) {
nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
if (bfa_sgpg_malloc
(bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
!= BFA_STATUS_OK) {
/* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
nrsp_sgpg); */
/*
* TODO
*/
}
}
}
return fcxp;
}
/**
* Get the internal request buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
*
* @return pointer to the internal request buffer
*/
void *
bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
void *reqbuf;
bfa_assert(fcxp->use_ireqbuf == 1);
reqbuf = ((u8 *)mod->req_pld_list_kva) +
fcxp->fcxp_tag * mod->req_pld_sz;
return reqbuf;
}
u32
bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
return mod->req_pld_sz;
}
/**
* Get the internal response buffer pointer
*
* @param[in] fcxp BFA fcxp pointer
*
* @return pointer to the internal request buffer
*/
void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
void *rspbuf;
bfa_assert(fcxp->use_irspbuf == 1);
rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
fcxp->fcxp_tag * mod->rsp_pld_sz;
return rspbuf;
}
/**
* Free the BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
*
* @return void
*/
void
bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
bfa_assert(fcxp != NULL);
bfa_trc(mod->bfa, fcxp->fcxp_tag);
bfa_fcxp_put(fcxp);
}
/**
* Send a FCXP request
*
* @param[in] fcxp BFA fcxp pointer
* @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
* @param[in] vf_id virtual Fabric ID
* @param[in] lp_tag lport tag
* @param[in] cts use Continous sequence
* @param[in] cos fc Class of Service
* @param[in] reqlen request length, does not include FCHS length
* @param[in] fchs fc Header Pointer. The header content will be copied
* in by BFA.
*
* @param[in] cbfn call back function to be called on receiving
* the response
* @param[in] cbarg arg for cbfn
* @param[in] rsp_timeout
* response timeout
*
* @return bfa_status_t
*/
void
bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
{
struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
struct bfi_fcxp_send_req_s *send_req;
bfa_trc(bfa, fcxp->fcxp_tag);
/**
* setup request/response info
*/
reqi->bfa_rport = rport;
reqi->vf_id = vf_id;
reqi->lp_tag = lp_tag;
reqi->class = cos;
rspi->rsp_timeout = rsp_timeout;
reqi->cts = cts;
reqi->fchs = *fchs;
reqi->req_tot_len = reqlen;
rspi->rsp_maxlen = rsp_maxlen;
fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
fcxp->send_cbarg = cbarg;
/**
* If no room in CPE queue, wait for space in request queue
*/
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
if (!send_req) {
bfa_trc(bfa, fcxp->fcxp_tag);
fcxp->reqq_waiting = BFA_TRUE;
bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
return;
}
bfa_fcxp_queue(fcxp, send_req);
}
/**
* Abort a BFA FCXP
*
* @param[in] fcxp BFA fcxp pointer
*
* @return void
*/
bfa_status_t
bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
{
bfa_assert(0);
return BFA_STATUS_OK;
}
void
bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
bfa_assert(list_empty(&mod->fcxp_free_q));
wqe->alloc_cbfn = alloc_cbfn;
wqe->alloc_cbarg = alloc_cbarg;
list_add_tail(&wqe->qe, &mod->wait_q);
}
void
bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
list_del(&wqe->qe);
}
void
bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
{
/**
* If waiting for room in request queue, cancel reqq wait
* and free fcxp.
*/
if (fcxp->reqq_waiting) {
fcxp->reqq_waiting = BFA_FALSE;
bfa_reqq_wcancel(&fcxp->reqq_wqe);
bfa_fcxp_free(fcxp);
return;
}
fcxp->send_cbfn = bfa_fcxp_null_comp;
}
/**
* hal_fcxp_public BFA FCXP public functions
*/
void
bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
switch (msg->mhdr.msg_id) {
case BFI_FCXP_I2H_SEND_RSP:
hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
break;
default:
bfa_trc(bfa, msg->mhdr.msg_id);
bfa_assert(0);
}
}
u32
bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
return mod->rsp_pld_sz;
}

Просмотреть файл

@ -1,138 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_FCXP_PRIV_H__
#define __BFA_FCXP_PRIV_H__
#include <cs/bfa_sm.h>
#include <protocol/fc.h>
#include <bfa_svc.h>
#include <bfi/bfi_fcxp.h>
#define BFA_FCXP_MIN (1)
#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
struct bfa_fcxp_mod_s {
struct bfa_s *bfa; /* backpointer to BFA */
struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
u16 num_fcxps; /* max num FCXP requests */
struct list_head fcxp_free_q; /* free FCXPs */
struct list_head fcxp_active_q; /* active FCXPs */
void *req_pld_list_kva; /* list of FCXP req pld */
u64 req_pld_list_pa; /* list of FCXP req pld */
void *rsp_pld_list_kva; /* list of FCXP resp pld */
u64 rsp_pld_list_pa; /* list of FCXP resp pld */
struct list_head wait_q; /* wait queue for free fcxp */
u32 req_pld_sz;
u32 rsp_pld_sz;
};
#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
void *cb_arg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs);
/**
* Information needed for a FCXP request
*/
struct bfa_fcxp_req_info_s {
struct bfa_rport_s *bfa_rport; /* Pointer to the bfa rport that was
*returned from bfa_rport_create().
*This could be left NULL for WKA or for
*FCXP interactions before the rport
*nexus is established
*/
struct fchs_s fchs; /* request FC header structure */
u8 cts; /* continous sequence */
u8 class; /* FC class for the request/response */
u16 max_frmsz; /* max send frame size */
u16 vf_id; /* vsan tag if applicable */
u8 lp_tag; /* lport tag */
u32 req_tot_len; /* request payload total length */
};
struct bfa_fcxp_rsp_info_s {
struct fchs_s rsp_fchs; /* Response frame's FC header will
* be *sent back in this field */
u8 rsp_timeout; /* timeout in seconds, 0-no response
*/
u8 rsvd2[3];
u32 rsp_maxlen; /* max response length expected */
};
struct bfa_fcxp_s {
struct list_head qe; /* fcxp queue element */
bfa_sm_t sm; /* state machine */
void *caller; /* driver or fcs */
struct bfa_fcxp_mod_s *fcxp_mod;
/* back pointer to fcxp mod */
u16 fcxp_tag; /* internal tag */
struct bfa_fcxp_req_info_s req_info;
/* request info */
struct bfa_fcxp_rsp_info_s rsp_info;
/* response info */
u8 use_ireqbuf; /* use internal req buf */
u8 use_irspbuf; /* use internal rsp buf */
u32 nreq_sgles; /* num request SGLEs */
u32 nrsp_sgles; /* num response SGLEs */
struct list_head req_sgpg_q; /* SG pages for request buf */
struct list_head req_sgpg_wqe; /* wait queue for req SG page */
struct list_head rsp_sgpg_q; /* SG pages for response buf */
struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
bfa_fcxp_get_sgaddr_t req_sga_cbfn;
/* SG elem addr user function */
bfa_fcxp_get_sglen_t req_sglen_cbfn;
/* SG elem len user function */
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
/* SG elem addr user function */
bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
/* SG elem len user function */
bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
void *send_cbarg; /* callback arg */
struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
/* req SG elems */
struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
/* rsp SG elems */
u8 rsp_status; /* comp: rsp status */
u32 rsp_len; /* comp: actual response len */
u32 residue_len; /* comp: residual rsp length */
struct fchs_s rsp_fchs; /* comp: response fchs */
struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
struct bfa_reqq_wait_s reqq_wqe;
bfa_boolean_t reqq_waiting;
};
#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
((_fcxp)->fcxp_mod->req_pld_list_pa + \
((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#endif /* __BFA_FCXP_PRIV_H__ */

Просмотреть файл

@ -1,44 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_FWIMG_PRIV_H__
#define __BFA_FWIMG_PRIV_H__
#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
/**
* BFI FW image type
*/
enum {
BFI_IMAGE_CB_FC,
BFI_IMAGE_CT_FC,
BFI_IMAGE_CT_CNA,
BFI_IMAGE_MAX,
};
extern u32 *bfi_image_get_chunk(int type, uint32_t off);
extern u32 bfi_image_get_size(int type);
extern u32 bfi_image_ct_fc_size;
extern u32 bfi_image_ct_cna_size;
extern u32 bfi_image_cb_fc_size;
extern u32 *bfi_image_ct_fc;
extern u32 *bfi_image_ct_cna;
extern u32 *bfi_image_cb_fc;
#endif /* __BFA_FWIMG_PRIV_H__ */

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,15 +15,15 @@
* General Public License for more details.
*/
#include <bfa_priv.h>
#include <bfi/bfi_cbreg.h>
#include "bfa_modules.h"
#include "bfi_cbreg.h"
void
bfa_hwcb_reginit(struct bfa_s *bfa)
{
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
if (fn == 0) {
bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,9 +15,8 @@
* General Public License for more details.
*/
#include <bfa_priv.h>
#include <bfi/bfi_ctreg.h>
#include <bfa_ioc.h>
#include "bfa_modules.h"
#include "bfi_ctreg.h"
BFA_TRC_FILE(HAL, IOCFC_CT);
@ -53,7 +52,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
{
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
if (fn == 0) {
bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@ -87,7 +86,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
void
bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
{
u32 r32;
u32 r32;
r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);

Просмотреть файл

@ -1,270 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <bfi/bfi_ctreg.h>
#include <bfa_port_priv.h>
#include <bfa_intr_priv.h>
#include <cs/bfa_debug.h>
BFA_TRC_FILE(HAL, INTR);
static void
bfa_msix_errint(struct bfa_s *bfa, u32 intr)
{
bfa_ioc_error_isr(&bfa->ioc);
}
static void
bfa_msix_lpu(struct bfa_s *bfa)
{
bfa_ioc_mbox_isr(&bfa->ioc);
}
static void
bfa_reqq_resume(struct bfa_s *bfa, int qid)
{
struct list_head *waitq, *qe, *qen;
struct bfa_reqq_wait_s *wqe;
waitq = bfa_reqq(bfa, qid);
list_for_each_safe(qe, qen, waitq) {
/**
* Callback only as long as there is room in request queue
*/
if (bfa_reqq_full(bfa, qid))
break;
list_del(qe);
wqe = (struct bfa_reqq_wait_s *) qe;
wqe->qresume(wqe->cbarg);
}
}
void
bfa_msix_all(struct bfa_s *bfa, int vec)
{
bfa_intx(bfa);
}
/**
* hal_intr_api
*/
bfa_boolean_t
bfa_intx(struct bfa_s *bfa)
{
u32 intr, qintr;
int queue;
intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
if (!intr)
return BFA_FALSE;
/**
* RME completion queue interrupt
*/
qintr = intr & __HFN_INT_RME_MASK;
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
if (intr & (__HFN_INT_RME_Q0 << queue))
bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
}
intr &= ~qintr;
if (!intr)
return BFA_TRUE;
/**
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK;
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
if (intr & (__HFN_INT_CPE_Q0 << queue))
bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
}
intr &= ~qintr;
if (!intr)
return BFA_TRUE;
bfa_msix_lpu_err(bfa, intr);
return BFA_TRUE;
}
void
bfa_isr_enable(struct bfa_s *bfa)
{
u32 intr_unmask;
int pci_func = bfa_ioc_pcifn(&bfa->ioc);
bfa_trc(bfa, pci_func);
bfa_msix_install(bfa);
intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
__HFN_INT_LL_HALT);
if (pci_func == 0)
intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
__HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
__HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
__HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
__HFN_INT_MBOX_LPU0);
else
intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
__HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
__HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
__HFN_INT_MBOX_LPU1);
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
bfa->iocfc.intr_mask = ~intr_unmask;
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
}
void
bfa_isr_disable(struct bfa_s *bfa)
{
bfa_isr_mode_set(bfa, BFA_FALSE);
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
bfa_msix_uninstall(bfa);
}
void
bfa_msix_reqq(struct bfa_s *bfa, int qid)
{
struct list_head *waitq;
qid &= (BFI_IOC_MAX_CQS - 1);
bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
/**
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
if (!list_empty(waitq))
bfa_reqq_resume(bfa, qid);
}
void
bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
{
bfa_trc(bfa, m->mhdr.msg_class);
bfa_trc(bfa, m->mhdr.msg_id);
bfa_trc(bfa, m->mhdr.mtag.i2htok);
bfa_assert(0);
bfa_trc_stop(bfa->trcmod);
}
void
bfa_msix_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
u32 pi, ci;
struct list_head *waitq;
bfa_trc_fp(bfa, qid);
qid &= (BFI_IOC_MAX_CQS - 1);
bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
ci = bfa_rspq_ci(bfa, qid);
pi = bfa_rspq_pi(bfa, qid);
bfa_trc_fp(bfa, ci);
bfa_trc_fp(bfa, pi);
if (bfa->rme_process) {
while (ci != pi) {
m = bfa_rspq_elem(bfa, qid, ci);
bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
bfa_isrs[m->mhdr.msg_class] (bfa, m);
CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
}
}
/**
* update CI
*/
bfa_rspq_ci(bfa, qid) = pi;
bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
bfa_os_mmiowb();
/**
* Resume any pending requests in the corresponding reqq.
*/
waitq = bfa_reqq(bfa, qid);
if (!list_empty(waitq))
bfa_reqq_resume(bfa, qid);
}
void
bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
{
u32 intr, curr_value;
intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
bfa_msix_lpu(bfa);
intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
if (intr) {
if (intr & __HFN_INT_LL_HALT) {
/**
* If LL_HALT bit is set then FW Init Halt LL Port
* Register needs to be cleared as well so Interrupt
* Status Register will be cleared.
*/
curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
curr_value &= ~__FW_INIT_HALT_P;
bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
}
if (intr & __HFN_INT_ERR_PSS) {
/**
* ERR_PSS bit needs to be cleared as well in case
* interrups are shared so driver's interrupt handler is
* still called eventhough it is already masked out.
*/
curr_value = bfa_reg_read(
bfa->ioc.ioc_regs.pss_err_status_reg);
curr_value &= __PSS_ERR_STATUS_SET;
bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
curr_value);
}
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
bfa_msix_errint(bfa, intr);
}
}
void
bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
{
bfa_isrs[mc] = isr_func;
}

Просмотреть файл

@ -1,117 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_INTR_PRIV_H__
#define __BFA_INTR_PRIV_H__
/**
* Message handler
*/
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq])
#define bfa_reqq_ci(__bfa, __reqq) \
(*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
#define bfa_reqq_full(__bfa, __reqq) \
(((bfa_reqq_pi(__bfa, __reqq) + 1) & \
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \
bfa_reqq_ci(__bfa, __reqq))
#define bfa_reqq_next(__bfa, __reqq) \
(bfa_reqq_full(__bfa, __reqq) ? NULL : \
((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+ bfa_reqq_pi((__bfa), (__reqq)))))
#define bfa_reqq_produce(__bfa, __reqq) do { \
(__bfa)->iocfc.req_cq_pi[__reqq]++; \
(__bfa)->iocfc.req_cq_pi[__reqq] &= \
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
(__bfa)->iocfc.req_cq_pi[__reqq]); \
bfa_os_mmiowb(); \
} while (0)
#define bfa_rspq_pi(__bfa, __rspq) \
(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq])
#define bfa_rspq_elem(__bfa, __rspq, __ci) \
(&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
#define CQ_INCR(__index, __size) do { \
(__index)++; \
(__index) &= ((__size) - 1); \
} while (0)
/**
* Queue element to wait for room in request queue. FIFO order is
* maintained when fullfilling requests.
*/
struct bfa_reqq_wait_s {
struct list_head qe;
void (*qresume) (void *cbarg);
void *cbarg;
};
/**
* Circular queue usage assignments
*/
enum {
BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */
BFA_REQQ_FCXP = 0, /* all FCXP messages */
BFA_REQQ_LPS = 0, /* all lport service msgs */
BFA_REQQ_PORT = 0, /* all port messages */
BFA_REQQ_FLASH = 0, /* for flash module */
BFA_REQQ_DIAG = 0, /* for diag module */
BFA_REQQ_RPORT = 0, /* all port messages */
BFA_REQQ_SBOOT = 0, /* all san boot messages */
BFA_REQQ_QOS_LO = 1, /* all low priority IO */
BFA_REQQ_QOS_MD = 2, /* all medium priority IO */
BFA_REQQ_QOS_HI = 3, /* all high priority IO */
};
static inline void
bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
void *cbarg)
{
wqe->qresume = qresume;
wqe->cbarg = cbarg;
}
#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq])
/**
* static inline void
* bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
*/
#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \
\
struct list_head *waitq = bfa_reqq(__bfa, __reqq); \
\
bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \
bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \
\
list_add_tail(&(__wqe)->qe, waitq); \
} while (0)
#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
#endif /* __BFA_INTR_PRIV_H__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -18,18 +18,74 @@
#ifndef __BFA_IOC_H__
#define __BFA_IOC_H__
#include <cs/bfa_sm.h>
#include <bfi/bfi.h>
#include <bfi/bfi_ioc.h>
#include <bfi/bfi_boot.h>
#include <bfa_timer.h>
#include "bfa_os_inc.h"
#include "bfa_cs.h"
#include "bfi.h"
/**
* BFA timer declarations
*/
typedef void (*bfa_timer_cbfn_t)(void *);
/**
* BFA timer data structure
*/
struct bfa_timer_s {
struct list_head qe;
bfa_timer_cbfn_t timercb;
void *arg;
int timeout; /**< in millisecs. */
};
/**
* Timer module structure
*/
struct bfa_timer_mod_s {
struct list_head timer_q;
};
#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
void bfa_timer_beat(struct bfa_timer_mod_s *mod);
void bfa_timer_init(struct bfa_timer_mod_s *mod);
void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
bfa_timer_cbfn_t timercb, void *arg,
unsigned int timeout);
void bfa_timer_stop(struct bfa_timer_s *timer);
/**
* Generic Scatter Gather Element used by driver
*/
struct bfa_sge_s {
u32 sg_len;
void *sg_addr;
};
#define bfa_sge_word_swap(__sge) do { \
((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \
((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \
((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \
} while (0)
#define bfa_swap_words(_x) ( \
((_x) << 32) | ((_x) >> 32))
#ifdef __BIGENDIAN
#define bfa_sge_to_be(_x)
#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x)
#define bfa_sgaddr_le(_x) bfa_swap_words(_x)
#else
#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x)
#define bfa_sge_to_le(_x)
#define bfa_sgaddr_le(_x) (_x)
#endif
/**
* PCI device information required by IOC
*/
struct bfa_pcidev_s {
int pci_slot;
u8 pci_func;
int pci_slot;
u8 pci_func;
u16 device_id;
bfa_os_addr_t pci_bar_kva;
};
@ -39,13 +95,18 @@ struct bfa_pcidev_s {
* Address
*/
struct bfa_dma_s {
void *kva; /*! Kernel virtual address */
u64 pa; /*! Physical address */
void *kva; /* ! Kernel virtual address */
u64 pa; /* ! Physical address */
};
#define BFA_DMA_ALIGN_SZ 256
#define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
/**
* smem size for Crossbow and Catapult
*/
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
#define bfa_dma_addr_set(dma_addr, pa) \
@ -101,7 +162,7 @@ struct bfa_ioc_regs_s {
* IOC Mailbox structures
*/
struct bfa_mbox_cmd_s {
struct list_head qe;
struct list_head qe;
u32 msg[BFI_IOC_MSGSZ];
};
@ -110,8 +171,8 @@ struct bfa_mbox_cmd_s {
*/
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
struct bfa_ioc_mbox_mod_s {
struct list_head cmd_q; /* pending mbox queue */
int nmclass; /* number of handlers */
struct list_head cmd_q; /* pending mbox queue */
int nmclass; /* number of handlers */
struct {
bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */
void *cbarg;
@ -149,49 +210,54 @@ struct bfa_ioc_hbfail_notify_s {
(__notify)->cbarg = (__cbarg); \
} while (0)
struct bfa_iocpf_s {
bfa_fsm_t fsm;
struct bfa_ioc_s *ioc;
u32 retry_count;
bfa_boolean_t auto_recover;
};
struct bfa_ioc_s {
bfa_fsm_t fsm;
struct bfa_s *bfa;
struct bfa_pcidev_s pcidev;
struct bfa_timer_mod_s *timer_mod;
struct bfa_timer_s ioc_timer;
struct bfa_timer_s sem_timer;
struct bfa_timer_mod_s *timer_mod;
struct bfa_timer_s ioc_timer;
struct bfa_timer_s sem_timer;
struct bfa_timer_s hb_timer;
u32 hb_count;
u32 retry_count;
struct list_head hb_notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
bfa_boolean_t dbg_fwsave_once;
enum bfi_mclass ioc_mc;
struct bfa_ioc_regs_s ioc_regs;
struct bfa_ioc_regs_s ioc_regs;
struct bfa_trc_mod_s *trcmod;
struct bfa_aen_s *aen;
struct bfa_log_mod_s *logm;
struct bfa_ioc_drv_stats_s stats;
bfa_boolean_t auto_recover;
bfa_boolean_t fcmode;
bfa_boolean_t ctdev;
bfa_boolean_t cna;
bfa_boolean_t pllinit;
bfa_boolean_t stats_busy; /* outstanding stats */
u8 port_id;
struct bfa_dma_s attr_dma;
struct bfi_ioc_attr_s *attr;
struct bfa_ioc_cbfn_s *cbfn;
struct bfa_ioc_mbox_mod_s mbox_mod;
struct bfa_ioc_hwif_s *ioc_hwif;
struct bfa_ioc_hwif_s *ioc_hwif;
struct bfa_iocpf_s iocpf;
};
struct bfa_ioc_hwif_s {
bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc);
bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
void (*ioc_map_port) (struct bfa_ioc_s *ioc);
void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
bfa_boolean_t msix);
void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
void (*ioc_map_port) (struct bfa_ioc_s *ioc);
void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
bfa_boolean_t msix);
void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@ -206,18 +272,19 @@ struct bfa_ioc_hwif_s {
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
#define bfa_ioc_get_nports(__ioc) \
#define bfa_ioc_get_nports(__ioc) \
BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
#define BFA_IOC_FWIMG_TYPE(__ioc) \
(((__ioc)->ctdev) ? \
(((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
#define BFA_IOC_FWIMG_TYPE(__ioc) \
(((__ioc)->ctdev) ? \
(((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
BFI_IMAGE_CB_FC)
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
(((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
/**
@ -235,18 +302,28 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
/**
* IOC interfaces
*/
#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc))
#define bfa_ioc_isr_mode_set(__ioc, __msix) \
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->fcmode))
bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
#define bfa_ioc_isr_mode_set(__ioc, __msix) \
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
#define bfa_ioc_ownership_reset(__ioc) \
#define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
struct bfa_trc_mod_s *trcmod,
struct bfa_aen_s *aen, struct bfa_log_mod_s *logm);
struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
void bfa_ioc_detach(struct bfa_ioc_s *ioc);
void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_mclass mc);
@ -256,21 +333,22 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc);
void bfa_ioc_disable(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_param);
void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
char *manufacturer);
char *manufacturer);
void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
@ -284,6 +362,8 @@ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
int *trclen);
bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
u32 *offset, int *buflen);
u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
@ -297,7 +377,8 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
/*
* bfa mfg wwn API functions
@ -310,5 +391,68 @@ wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
#endif /* __BFA_IOC_H__ */
/*
* F/W Image Size & Chunk
*/
extern u32 bfi_image_ct_fc_size;
extern u32 bfi_image_ct_cna_size;
extern u32 bfi_image_cb_fc_size;
extern u32 *bfi_image_ct_fc;
extern u32 *bfi_image_ct_cna;
extern u32 *bfi_image_cb_fc;
static inline u32 *
bfi_image_ct_fc_get_chunk(u32 off)
{ return (u32 *)(bfi_image_ct_fc + off); }
static inline u32 *
bfi_image_ct_cna_get_chunk(u32 off)
{ return (u32 *)(bfi_image_ct_cna + off); }
static inline u32 *
bfi_image_cb_fc_get_chunk(u32 off)
{ return (u32 *)(bfi_image_cb_fc + off); }
static inline u32*
bfa_cb_image_get_chunk(int type, u32 off)
{
switch (type) {
case BFI_IMAGE_CT_FC:
return bfi_image_ct_fc_get_chunk(off); break;
case BFI_IMAGE_CT_CNA:
return bfi_image_ct_cna_get_chunk(off); break;
case BFI_IMAGE_CB_FC:
return bfi_image_cb_fc_get_chunk(off); break;
default: return 0;
}
}
static inline u32
bfa_cb_image_get_size(int type)
{
switch (type) {
case BFI_IMAGE_CT_FC:
return bfi_image_ct_fc_size; break;
case BFI_IMAGE_CT_CNA:
return bfi_image_ct_cna_size; break;
case BFI_IMAGE_CB_FC:
return bfi_image_cb_fc_size; break;
default: return 0;
}
}
/**
* CNA TRCMOD declaration
*/
/*
* !!! Only append to the enums defined here to avoid any versioning
* !!! needed between trace utility and driver version
*/
enum {
BFA_TRC_CNA_PORT = 1,
BFA_TRC_CNA_IOC = 2,
BFA_TRC_CNA_IOC_CB = 3,
BFA_TRC_CNA_IOC_CT = 4,
};
#endif /* __BFA_IOC_H__ */

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,22 +15,15 @@
* General Public License for more details.
*/
#include <bfa.h>
#include <bfa_ioc.h>
#include <bfa_fwimg_priv.h>
#include <cna/bfa_cna_trcmod.h>
#include <cs/bfa_debug.h>
#include <bfi/bfi_ioc.h>
#include <bfi/bfi_cbreg.h>
#include <log/bfa_log_hal.h>
#include <defs/bfa_defs_pci.h>
#include "bfa_ioc.h"
#include "bfi_cbreg.h"
#include "bfa_defs.h"
BFA_TRC_FILE(CNA, IOC_CB);
/*
* forward declarations
*/
static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
@ -95,6 +88,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
* Host <-> LPU mailbox command/status registers
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
};
@ -154,6 +148,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
/**
* Initialize IOC to port mapping.
*/
static void
bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
{
@ -161,6 +156,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
* For crossbow, port id is same as pci function.
*/
ioc->port_id = bfa_ioc_pcifn(ioc);
bfa_trc(ioc, ioc->port_id);
}
@ -172,75 +168,6 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
}
static bfa_status_t
bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc)
{
bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
u32 pll_sclk, pll_fclk;
/*
* Hold semaphore so that nobody can access the chip during init.
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
__APP_PLL_212_P0_1(3U) |
__APP_PLL_212_JITLMT0_1(3U) |
__APP_PLL_212_CNTLMT0_1(3U);
pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
__APP_PLL_400_JITLMT0_1(3U) |
__APP_PLL_400_CNTLMT0_1(3U);
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
__APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
__APP_PLL_212_BYPASS |
__APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
__APP_PLL_400_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
__APP_PLL_400_BYPASS |
__APP_PLL_400_LOGIC_SOFT_RESET);
bfa_os_udelay(2);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
__APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
__APP_PLL_400_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
/**
* Wait for PLLs to lock.
*/
bfa_os_udelay(2000);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
/*
* release semaphore.
*/
bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
return BFA_STATUS_OK;
}
/**
* Cleanup hw semaphore and usecnt registers
*/
@ -256,3 +183,54 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
bfa_ioc_hw_sem_release(ioc);
}
bfa_status_t
bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
{
u32 pll_sclk, pll_fclk;
pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
__APP_PLL_212_P0_1(3U) |
__APP_PLL_212_JITLMT0_1(3U) |
__APP_PLL_212_CNTLMT0_1(3U);
pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
__APP_PLL_400_JITLMT0_1(3U) |
__APP_PLL_400_CNTLMT0_1(3U);
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
__APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
__APP_PLL_212_BYPASS |
__APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
__APP_PLL_400_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
__APP_PLL_400_BYPASS |
__APP_PLL_400_LOGIC_SOFT_RESET);
bfa_os_udelay(2);
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
__APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
__APP_PLL_400_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_212_CTL_REG,
pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_400_CTL_REG,
pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
bfa_os_udelay(2000);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
return BFA_STATUS_OK;
}

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,22 +15,15 @@
* General Public License for more details.
*/
#include <bfa.h>
#include <bfa_ioc.h>
#include <bfa_fwimg_priv.h>
#include <cna/bfa_cna_trcmod.h>
#include <cs/bfa_debug.h>
#include <bfi/bfi_ioc.h>
#include <bfi/bfi_ctreg.h>
#include <log/bfa_log_hal.h>
#include <defs/bfa_defs_pci.h>
#include "bfa_ioc.h"
#include "bfi_ctreg.h"
#include "bfa_defs.h"
BFA_TRC_FILE(CNA, IOC_CT);
/*
* forward declarations
*/
static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
@ -78,7 +71,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
/**
* If bios boot (flash based) -- do not increment usage count
*/
if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return BFA_TRUE;
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@ -136,7 +130,8 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
/**
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
/**
@ -308,111 +303,6 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
bfa_reg_write(rb + FNC_PERS_REG, r32);
}
static bfa_status_t
bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
{
bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
u32 pll_sclk, pll_fclk, r32;
/*
* Hold semaphore so that nobody can access the chip during init.
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
__APP_PLL_312_JITLMT0_1(3U) |
__APP_PLL_312_CNTLMT0_1(1U);
pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
__APP_PLL_425_JITLMT0_1(3U) |
__APP_PLL_425_CNTLMT0_1(1U);
/**
* For catapult, choose operational mode FC/FCoE
*/
if (ioc->fcmode) {
bfa_reg_write((rb + OP_MODE), 0);
bfa_reg_write((rb + ETH_MAC_SER_REG),
__APP_EMS_CMLCKSEL |
__APP_EMS_REFCKBUFEN2 |
__APP_EMS_CHANNEL_SEL);
} else {
ioc->pllinit = BFA_TRUE;
bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
bfa_reg_write((rb + ETH_MAC_SER_REG),
__APP_EMS_REFCKBUFEN1);
}
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
__APP_PLL_312_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
__APP_PLL_425_LOGIC_SOFT_RESET);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
/**
* Wait for PLLs to lock.
*/
bfa_reg_read(rb + HOSTFN0_INT_MSK);
bfa_os_udelay(2000);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
__APP_PLL_312_ENABLE);
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
__APP_PLL_425_ENABLE);
/**
* PSS memory reset is asserted at power-on-reset. Need to clear
* this before running EDRAM BISTR
*/
if (ioc->cna) {
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
}
r32 = bfa_reg_read((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
bfa_reg_write((rb + PSS_CTL_REG), r32);
bfa_os_udelay(1000);
if (ioc->cna) {
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
}
bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
bfa_os_udelay(1000);
r32 = bfa_reg_read((rb + MBIST_STAT_REG));
bfa_trc(ioc, r32);
/**
* Clear BISTR
*/
bfa_reg_write((rb + MBIST_CTL_REG), 0);
/*
* release semaphore.
*/
bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
return BFA_STATUS_OK;
}
/**
* Cleanup hw semaphore and usecnt registers
*/
@ -434,3 +324,86 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
bfa_ioc_hw_sem_release(ioc);
}
/*
* Check the firmware state to know if pll_init has been completed already
*/
bfa_boolean_t
bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
{
if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
(bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
return BFA_TRUE;
return BFA_FALSE;
}
bfa_status_t
bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
{
u32 pll_sclk, pll_fclk, r32;
pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
__APP_PLL_312_JITLMT0_1(3U) |
__APP_PLL_312_CNTLMT0_1(1U);
pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
__APP_PLL_425_JITLMT0_1(3U) |
__APP_PLL_425_CNTLMT0_1(1U);
if (fcmode) {
bfa_reg_write((rb + OP_MODE), 0);
bfa_reg_write((rb + ETH_MAC_SER_REG),
__APP_EMS_CMLCKSEL |
__APP_EMS_REFCKBUFEN2 |
__APP_EMS_CHANNEL_SEL);
} else {
bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
bfa_reg_write((rb + ETH_MAC_SER_REG),
__APP_EMS_REFCKBUFEN1);
}
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
__APP_PLL_312_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
__APP_PLL_425_LOGIC_SOFT_RESET);
bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
bfa_reg_read(rb + HOSTFN0_INT_MSK);
bfa_os_udelay(2000);
bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
__APP_PLL_312_ENABLE);
bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
__APP_PLL_425_ENABLE);
if (!fcmode) {
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
}
r32 = bfa_reg_read((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
bfa_reg_write((rb + PSS_CTL_REG), r32);
bfa_os_udelay(1000);
if (!fcmode) {
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
}
bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
bfa_os_udelay(1000);
r32 = bfa_reg_read((rb + MBIST_STAT_REG));
bfa_reg_write((rb + MBIST_CTL_REG), 0);
return BFA_STATUS_OK;
}

Просмотреть файл

@ -1,927 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <cs/bfa_debug.h>
#include <bfa_priv.h>
#include <log/bfa_log_hal.h>
#include <bfi/bfi_boot.h>
#include <bfi/bfi_cbreg.h>
#include <aen/bfa_aen_ioc.h>
#include <defs/bfa_defs_iocfc.h>
#include <defs/bfa_defs_pci.h>
#include "bfa_callback_priv.h"
#include "bfad_drv.h"
BFA_TRC_FILE(HAL, IOCFC);
/**
* IOC local definitions
*/
#define BFA_IOCFC_TOV 5000 /* msecs */
enum {
BFA_IOCFC_ACT_NONE = 0,
BFA_IOCFC_ACT_INIT = 1,
BFA_IOCFC_ACT_STOP = 2,
BFA_IOCFC_ACT_DISABLE = 3,
};
/*
* forward declarations
*/
static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
static void bfa_iocfc_disable_cbfn(void *bfa_arg);
static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
static void bfa_iocfc_reset_cbfn(void *bfa_arg);
static void bfa_iocfc_stats_clear(void *bfa_arg);
static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
struct bfa_fw_stats_s *s);
static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
static void bfa_iocfc_stats_timeout(void *bfa_arg);
static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
/**
* bfa_ioc_pvt BFA IOC private functions
*/
static void
bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
{
int i, per_reqq_sz, per_rspq_sz;
per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
BFA_DMA_ALIGN_SZ);
per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
BFA_DMA_ALIGN_SZ);
/*
* Calculate CQ size
*/
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
*dm_len = *dm_len + per_reqq_sz;
*dm_len = *dm_len + per_rspq_sz;
}
/*
* Calculate Shadow CI/PI size
*/
for (i = 0; i < cfg->fwcfg.num_cqs; i++)
*dm_len += (2 * BFA_CACHELINE_SZ);
}
static void
bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
{
*dm_len +=
BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
*dm_len +=
BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
BFA_CACHELINE_SZ);
*dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
}
/**
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
*/
static void
bfa_iocfc_send_cfg(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_cfg_req_s cfg_req;
struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
int i;
bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
bfa_trc(bfa, cfg->fwcfg.num_cqs);
bfa_iocfc_reset_queues(bfa);
/**
* initialize IOC configuration info
*/
cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
cfg_info->num_cqs = cfg->fwcfg.num_cqs;
bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
/**
* dma map REQ and RSP circular queues and shadow pointers
*/
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
iocfc->req_cq_ba[i].pa);
bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
iocfc->req_cq_shadow_ci[i].pa);
cfg_info->req_cq_elems[i] =
bfa_os_htons(cfg->drvcfg.num_reqq_elems);
bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
iocfc->rsp_cq_ba[i].pa);
bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
iocfc->rsp_cq_shadow_pi[i].pa);
cfg_info->rsp_cq_elems[i] =
bfa_os_htons(cfg->drvcfg.num_rspq_elems);
}
/**
* Enable interrupt coalescing if it is driver init path
* and not ioc disable/enable path.
*/
if (!iocfc->cfgdone)
cfg_info->intr_attr.coalesce = BFA_TRUE;
iocfc->cfgdone = BFA_FALSE;
/**
* dma map IOC configuration itself
*/
bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
bfa_lpuid(bfa));
bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
sizeof(struct bfi_iocfc_cfg_req_s));
}
static void
bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
bfa->bfad = bfad;
iocfc->bfa = bfa;
iocfc->action = BFA_IOCFC_ACT_NONE;
bfa_os_assign(iocfc->cfg, *cfg);
/**
* Initialize chip specific handlers.
*/
if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
iocfc->hwif.hw_reginit = bfa_hwct_reginit;
iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
} else {
iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
}
iocfc->hwif.hw_reginit(bfa);
bfa->msix.nvecs = 0;
}
static void
bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo)
{
u8 *dm_kva;
u64 dm_pa;
int i, per_reqq_sz, per_rspq_sz;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
int dbgsz;
dm_kva = bfa_meminfo_dma_virt(meminfo);
dm_pa = bfa_meminfo_dma_phys(meminfo);
/*
* First allocate dma memory for IOC.
*/
bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
dm_kva += bfa_ioc_meminfo();
dm_pa += bfa_ioc_meminfo();
/*
* Claim DMA-able memory for the request/response queues and for shadow
* ci/pi registers
*/
per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
BFA_DMA_ALIGN_SZ);
per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
BFA_DMA_ALIGN_SZ);
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
iocfc->req_cq_ba[i].kva = dm_kva;
iocfc->req_cq_ba[i].pa = dm_pa;
bfa_os_memset(dm_kva, 0, per_reqq_sz);
dm_kva += per_reqq_sz;
dm_pa += per_reqq_sz;
iocfc->rsp_cq_ba[i].kva = dm_kva;
iocfc->rsp_cq_ba[i].pa = dm_pa;
bfa_os_memset(dm_kva, 0, per_rspq_sz);
dm_kva += per_rspq_sz;
dm_pa += per_rspq_sz;
}
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
iocfc->req_cq_shadow_ci[i].kva = dm_kva;
iocfc->req_cq_shadow_ci[i].pa = dm_pa;
dm_kva += BFA_CACHELINE_SZ;
dm_pa += BFA_CACHELINE_SZ;
iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
dm_kva += BFA_CACHELINE_SZ;
dm_pa += BFA_CACHELINE_SZ;
}
/*
* Claim DMA-able memory for the config info page
*/
bfa->iocfc.cfg_info.kva = dm_kva;
bfa->iocfc.cfg_info.pa = dm_pa;
bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
/*
* Claim DMA-able memory for the config response
*/
bfa->iocfc.cfgrsp_dma.kva = dm_kva;
bfa->iocfc.cfgrsp_dma.pa = dm_pa;
bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
dm_kva +=
BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
BFA_CACHELINE_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
BFA_CACHELINE_SZ);
/*
* Claim DMA-able memory for iocfc stats
*/
bfa->iocfc.stats_kva = dm_kva;
bfa->iocfc.stats_pa = dm_pa;
bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
bfa_meminfo_dma_virt(meminfo) = dm_kva;
bfa_meminfo_dma_phys(meminfo) = dm_pa;
dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
if (dbgsz > 0) {
bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
bfa_meminfo_kva(meminfo) += dbgsz;
}
}
/**
* Start BFA submodules.
*/
static void
bfa_iocfc_start_submod(struct bfa_s *bfa)
{
int i;
bfa->rme_process = BFA_TRUE;
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->start(bfa);
}
/**
* Disable BFA submodules.
*/
static void
bfa_iocfc_disable_submod(struct bfa_s *bfa)
{
int i;
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->iocdisable(bfa);
}
static void
bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
{
struct bfa_s *bfa = bfa_arg;
if (complete) {
if (bfa->iocfc.cfgdone)
bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
else
bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
} else {
if (bfa->iocfc.cfgdone)
bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
}
}
static void
bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
{
struct bfa_s *bfa = bfa_arg;
struct bfad_s *bfad = bfa->bfad;
if (compl)
complete(&bfad->comp);
else
bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
}
static void
bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
{
struct bfa_s *bfa = bfa_arg;
struct bfad_s *bfad = bfa->bfad;
if (compl)
complete(&bfad->disable_comp);
}
/**
* Update BFA configuration from firmware configuration.
*/
static void
bfa_iocfc_cfgrsp(struct bfa_s *bfa)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
fwcfg->num_cqs = fwcfg->num_cqs;
fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
iocfc->cfgdone = BFA_TRUE;
/**
* Configuration is complete - initialize/start submodules
*/
bfa_fcport_init(bfa);
if (iocfc->action == BFA_IOCFC_ACT_INIT)
bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
else
bfa_iocfc_start_submod(bfa);
}
static void
bfa_iocfc_stats_clear(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_stats_req_s stats_req;
bfa_timer_start(bfa, &iocfc->stats_timer,
bfa_iocfc_stats_clr_timeout, bfa,
BFA_IOCFC_TOV);
bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
bfa_lpuid(bfa));
bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
sizeof(struct bfi_iocfc_stats_req_s));
}
static void
bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
{
u32 *dip = (u32 *) d;
u32 *sip = (u32 *) s;
int i;
for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
dip[i] = bfa_os_ntohl(sip[i]);
}
static void
bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
{
struct bfa_s *bfa = bfa_arg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
if (complete) {
bfa_ioc_clr_stats(&bfa->ioc);
iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
} else {
iocfc->stats_busy = BFA_FALSE;
iocfc->stats_status = BFA_STATUS_OK;
}
}
static void
bfa_iocfc_stats_clr_timeout(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
bfa_trc(bfa, 0);
iocfc->stats_status = BFA_STATUS_ETIMER;
bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
}
static void
bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
{
struct bfa_s *bfa = bfa_arg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
if (complete) {
if (iocfc->stats_status == BFA_STATUS_OK) {
bfa_os_memset(iocfc->stats_ret, 0,
sizeof(*iocfc->stats_ret));
bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
iocfc->fw_stats);
}
iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
} else {
iocfc->stats_busy = BFA_FALSE;
iocfc->stats_status = BFA_STATUS_OK;
}
}
static void
bfa_iocfc_stats_timeout(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
bfa_trc(bfa, 0);
iocfc->stats_status = BFA_STATUS_ETIMER;
bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
}
static void
bfa_iocfc_stats_query(struct bfa_s *bfa)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_stats_req_s stats_req;
bfa_timer_start(bfa, &iocfc->stats_timer,
bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
bfa_lpuid(bfa));
bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
sizeof(struct bfi_iocfc_stats_req_s));
}
void
bfa_iocfc_reset_queues(struct bfa_s *bfa)
{
int q;
for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
bfa_reqq_ci(bfa, q) = 0;
bfa_reqq_pi(bfa, q) = 0;
bfa_rspq_ci(bfa, q) = 0;
bfa_rspq_pi(bfa, q) = 0;
}
}
/**
* IOC enable request is complete
*/
static void
bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
{
struct bfa_s *bfa = bfa_arg;
if (status != BFA_STATUS_OK) {
bfa_isr_disable(bfa);
if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
bfa_iocfc_init_cb, bfa);
return;
}
bfa_iocfc_send_cfg(bfa);
}
/**
* IOC disable request is complete
*/
static void
bfa_iocfc_disable_cbfn(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
bfa_isr_disable(bfa);
bfa_iocfc_disable_submod(bfa);
if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
bfa);
else {
bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
bfa);
}
}
/**
* Notify sub-modules of hardware failure.
*/
static void
bfa_iocfc_hbfail_cbfn(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
bfa->rme_process = BFA_FALSE;
bfa_isr_disable(bfa);
bfa_iocfc_disable_submod(bfa);
if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
bfa);
}
/**
* Actions on chip-reset completion.
*/
static void
bfa_iocfc_reset_cbfn(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
bfa_iocfc_reset_queues(bfa);
bfa_isr_enable(bfa);
}
/**
* bfa_ioc_public
*/
/**
* Query IOC memory requirement information.
*/
void
bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
{
/* dma memory for IOC */
*dm_len += bfa_ioc_meminfo();
bfa_iocfc_fw_cfg_sz(cfg, dm_len);
bfa_iocfc_cqs_sz(cfg, dm_len);
*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
}
/**
* Query IOC memory requirement information.
*/
void
bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
int i;
bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
bfa->trcmod, bfa->aen, bfa->logm);
/**
* Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
*/
if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
bfa_ioc_set_fcmode(&bfa->ioc);
bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
bfa_iocfc_mem_claim(bfa, cfg, meminfo);
bfa_timer_init(&bfa->timer_mod);
INIT_LIST_HEAD(&bfa->comp_q);
for (i = 0; i < BFI_IOC_MAX_CQS; i++)
INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
}
/**
* Query IOC memory requirement information.
*/
void
bfa_iocfc_detach(struct bfa_s *bfa)
{
bfa_ioc_detach(&bfa->ioc);
}
/**
* Query IOC memory requirement information.
*/
void
bfa_iocfc_init(struct bfa_s *bfa)
{
bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
bfa_ioc_enable(&bfa->ioc);
}
/**
* IOC start called from bfa_start(). Called to start IOC operations
* at driver instantiation for this instance.
*/
void
bfa_iocfc_start(struct bfa_s *bfa)
{
if (bfa->iocfc.cfgdone)
bfa_iocfc_start_submod(bfa);
}
/**
* IOC stop called from bfa_stop(). Called only when driver is unloaded
* for this instance.
*/
void
bfa_iocfc_stop(struct bfa_s *bfa)
{
bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
bfa->rme_process = BFA_FALSE;
bfa_ioc_disable(&bfa->ioc);
}
void
bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
{
struct bfa_s *bfa = bfaarg;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
union bfi_iocfc_i2h_msg_u *msg;
msg = (union bfi_iocfc_i2h_msg_u *) m;
bfa_trc(bfa, msg->mh.msg_id);
switch (msg->mh.msg_id) {
case BFI_IOCFC_I2H_CFG_REPLY:
iocfc->cfg_reply = &msg->cfg_reply;
bfa_iocfc_cfgrsp(bfa);
break;
case BFI_IOCFC_I2H_GET_STATS_RSP:
if (iocfc->stats_busy == BFA_FALSE
|| iocfc->stats_status == BFA_STATUS_ETIMER)
break;
bfa_timer_stop(&iocfc->stats_timer);
iocfc->stats_status = BFA_STATUS_OK;
bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
bfa);
break;
case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
/*
* check for timer pop before processing the rsp
*/
if (iocfc->stats_busy == BFA_FALSE
|| iocfc->stats_status == BFA_STATUS_ETIMER)
break;
bfa_timer_stop(&iocfc->stats_timer);
iocfc->stats_status = BFA_STATUS_OK;
bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
bfa_iocfc_stats_clr_cb, bfa);
break;
case BFI_IOCFC_I2H_UPDATEQ_RSP:
iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
break;
default:
bfa_assert(0);
}
}
#ifndef BFA_BIOS_BUILD
void
bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
{
bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
}
u64
bfa_adapter_get_id(struct bfa_s *bfa)
{
return bfa_ioc_get_adid(&bfa->ioc);
}
void
bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
attr->config = iocfc->cfg;
}
bfa_status_t
bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_set_intr_req_s *m;
iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
if (!bfa_iocfc_is_operational(bfa))
return BFA_STATUS_OK;
m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
if (!m)
return BFA_STATUS_DEVBUSY;
bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
bfa_lpuid(bfa));
m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
m->delay = iocfc->cfginfo->intr_attr.delay;
m->latency = iocfc->cfginfo->intr_attr.latency;
bfa_trc(bfa, attr->delay);
bfa_trc(bfa, attr->latency);
bfa_reqq_produce(bfa, BFA_REQQ_IOC);
return BFA_STATUS_OK;
}
void
bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
}
bfa_status_t
bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
bfa_cb_ioc_t cbfn, void *cbarg)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
if (iocfc->stats_busy) {
bfa_trc(bfa, iocfc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
if (!bfa_iocfc_is_operational(bfa)) {
bfa_trc(bfa, 0);
return BFA_STATUS_IOC_NON_OP;
}
iocfc->stats_busy = BFA_TRUE;
iocfc->stats_ret = stats;
iocfc->stats_cbfn = cbfn;
iocfc->stats_cbarg = cbarg;
bfa_iocfc_stats_query(bfa);
return BFA_STATUS_OK;
}
bfa_status_t
bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
if (iocfc->stats_busy) {
bfa_trc(bfa, iocfc->stats_busy);
return BFA_STATUS_DEVBUSY;
}
if (!bfa_iocfc_is_operational(bfa)) {
bfa_trc(bfa, 0);
return BFA_STATUS_IOC_NON_OP;
}
iocfc->stats_busy = BFA_TRUE;
iocfc->stats_cbfn = cbfn;
iocfc->stats_cbarg = cbarg;
bfa_iocfc_stats_clear(bfa);
return BFA_STATUS_OK;
}
/**
* Enable IOC after it is disabled.
*/
void
bfa_iocfc_enable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Enable");
bfa_ioc_enable(&bfa->ioc);
}
void
bfa_iocfc_disable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Disable");
bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
bfa->rme_process = BFA_FALSE;
bfa_ioc_disable(&bfa->ioc);
}
bfa_boolean_t
bfa_iocfc_is_operational(struct bfa_s *bfa)
{
return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
}
/**
* Return boot target port wwns -- read from boot information in flash.
*/
void
bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
int i;
if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
*nwwns = cfgrsp->pbc_cfg.nbluns;
for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
return;
}
*nwwns = cfgrsp->bootwwns.nwwns;
memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
}
void
bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
}
int
bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
return cfgrsp->pbc_cfg.nvports;
}
#endif

Просмотреть файл

@ -1,184 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_IOCFC_H__
#define __BFA_IOCFC_H__
#include <bfa_ioc.h>
#include <bfa.h>
#include <bfi/bfi_iocfc.h>
#include <bfi/bfi_pbc.h>
#include <bfa_callback_priv.h>
#define BFA_REQQ_NELEMS_MIN (4)
#define BFA_RSPQ_NELEMS_MIN (4)
struct bfa_iocfc_regs_s {
bfa_os_addr_t intr_status;
bfa_os_addr_t intr_mask;
bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS];
bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS];
bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS];
bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS];
bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS];
};
/**
* MSIX vector handlers
*/
#define BFA_MSIX_MAX_VECTORS 22
typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
struct bfa_msix_s {
int nvecs;
bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
};
/**
* Chip specific interfaces
*/
struct bfa_hwif_s {
void (*hw_reginit)(struct bfa_s *bfa);
void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
void (*hw_msix_install)(struct bfa_s *bfa);
void (*hw_msix_uninstall)(struct bfa_s *bfa);
void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
u32 *nvecs, u32 *maxvec);
void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
u32 *end);
};
typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
struct bfa_iocfc_s {
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
int action;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
struct bfa_cb_qe_s init_hcb_qe;
struct bfa_cb_qe_s stop_hcb_qe;
struct bfa_cb_qe_s dis_hcb_qe;
struct bfa_cb_qe_s stats_hcb_qe;
bfa_boolean_t cfgdone;
struct bfa_dma_s cfg_info;
struct bfi_iocfc_cfg_s *cfginfo;
struct bfa_dma_s cfgrsp_dma;
struct bfi_iocfc_cfgrsp_s *cfgrsp;
struct bfi_iocfc_cfg_reply_s *cfg_reply;
u8 *stats_kva;
u64 stats_pa;
struct bfa_fw_stats_s *fw_stats;
struct bfa_timer_s stats_timer; /* timer */
struct bfa_iocfc_stats_s *stats_ret; /* driver stats location */
bfa_status_t stats_status; /* stats/statsclr status */
bfa_boolean_t stats_busy; /* outstanding stats */
bfa_cb_ioc_t stats_cbfn; /* driver callback function */
void *stats_cbarg; /* user callback arg */
struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */
struct bfa_hwif_s hwif;
bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
void *updateq_cbarg; /* bios callback arg */
u32 intr_mask;
};
#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
#define bfa_msix_init(__bfa, __nvecs) \
((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
#define bfa_msix_install(__bfa) \
((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
#define bfa_msix_uninstall(__bfa) \
((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
#define bfa_isr_mode_set(__bfa, __msix) \
((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
__nvecs, __maxvec))
#define bfa_msix_get_rme_range(__bfa, __start, __end) \
((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
/*
* FC specific IOC functions.
*/
void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len);
void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void bfa_iocfc_detach(struct bfa_s *bfa);
void bfa_iocfc_init(struct bfa_s *bfa);
void bfa_iocfc_start(struct bfa_s *bfa);
void bfa_iocfc_stop(struct bfa_s *bfa);
void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
void bfa_iocfc_reset_queues(struct bfa_s *bfa);
void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
u32 reqq_sci, u32 rspq_spi,
bfa_cb_iocfc_t cbfn, void *cbarg);
void bfa_msix_all(struct bfa_s *bfa, int vec);
void bfa_msix_reqq(struct bfa_s *bfa, int vec);
void bfa_msix_rspq(struct bfa_s *bfa, int vec);
void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
void bfa_hwcb_reginit(struct bfa_s *bfa);
void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwcb_msix_install(struct bfa_s *bfa);
void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
u32 *nvecs, u32 *maxvec);
void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
void bfa_hwct_reginit(struct bfa_s *bfa);
void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwct_msix_install(struct bfa_s *bfa);
void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
u32 *nvecs, u32 *maxvec);
void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
bfa_boolean_t mincfg);
void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
struct bfa_boot_pbc_s *pbcfg);
int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
struct bfi_pbc_vport_s *pbc_vport);
#endif /* __BFA_IOCFC_H__ */

Просмотреть файл

@ -1,44 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include "bfa_intr_priv.h"
BFA_TRC_FILE(HAL, IOCFC_Q);
void
bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
void *cbarg)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
struct bfi_iocfc_updateq_req_s updateq_req;
iocfc->updateq_cbfn = cbfn;
iocfc->updateq_cbarg = cbarg;
bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
bfa_lpuid(bfa));
updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);
bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
sizeof(struct bfi_iocfc_updateq_req_s));
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,346 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfa_log.c BFA log library
*/
#include <bfa_os_inc.h>
#include <cs/bfa_log.h>
/*
* global log info structure
*/
struct bfa_log_info_s {
u32 start_idx; /* start index for a module */
u32 total_count; /* total count for a module */
enum bfa_log_severity level; /* global log level */
bfa_log_cb_t cbfn; /* callback function */
};
static struct bfa_log_info_s bfa_log_info[BFA_LOG_MODULE_ID_MAX + 1];
static u32 bfa_log_msg_total_count;
static int bfa_log_initialized;
static char *bfa_log_severity[] =
{ "[none]", "[critical]", "[error]", "[warn]", "[info]", "" };
/**
* BFA log library initialization
*
* The log library initialization includes the following,
* - set log instance name and callback function
* - read the message array generated from xml files
* - calculate start index for each module
* - calculate message count for each module
* - perform error checking
*
* @param[in] log_mod - log module info
* @param[in] instance_name - instance name
* @param[in] cbfn - callback function
*
* It return 0 on success, or -1 on failure
*/
int
bfa_log_init(struct bfa_log_mod_s *log_mod, char *instance_name,
bfa_log_cb_t cbfn)
{
struct bfa_log_msgdef_s *msg;
u32 pre_mod_id = 0;
u32 cur_mod_id = 0;
u32 i, pre_idx, idx, msg_id;
/*
* set instance name
*/
if (log_mod) {
strncpy(log_mod->instance_info, instance_name,
sizeof(log_mod->instance_info));
log_mod->cbfn = cbfn;
for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++)
log_mod->log_level[i] = BFA_LOG_WARNING;
}
if (bfa_log_initialized)
return 0;
for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) {
bfa_log_info[i].start_idx = 0;
bfa_log_info[i].total_count = 0;
bfa_log_info[i].level = BFA_LOG_WARNING;
bfa_log_info[i].cbfn = cbfn;
}
pre_idx = 0;
idx = 0;
msg = bfa_log_msg_array;
msg_id = BFA_LOG_GET_MSG_ID(msg);
pre_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
while (msg_id != 0) {
cur_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
if (cur_mod_id > BFA_LOG_MODULE_ID_MAX) {
cbfn(log_mod, msg_id,
"%s%s log: module id %u out of range\n",
BFA_LOG_CAT_NAME,
bfa_log_severity[BFA_LOG_ERROR],
cur_mod_id);
return -1;
}
if (pre_mod_id > BFA_LOG_MODULE_ID_MAX) {
cbfn(log_mod, msg_id,
"%s%s log: module id %u out of range\n",
BFA_LOG_CAT_NAME,
bfa_log_severity[BFA_LOG_ERROR],
pre_mod_id);
return -1;
}
if (cur_mod_id != pre_mod_id) {
bfa_log_info[pre_mod_id].start_idx = pre_idx;
bfa_log_info[pre_mod_id].total_count = idx - pre_idx;
pre_mod_id = cur_mod_id;
pre_idx = idx;
}
idx++;
msg++;
msg_id = BFA_LOG_GET_MSG_ID(msg);
}
bfa_log_info[cur_mod_id].start_idx = pre_idx;
bfa_log_info[cur_mod_id].total_count = idx - pre_idx;
bfa_log_msg_total_count = idx;
cbfn(log_mod, msg_id, "%s%s log: init OK, msg total count %u\n",
BFA_LOG_CAT_NAME,
bfa_log_severity[BFA_LOG_INFO], bfa_log_msg_total_count);
bfa_log_initialized = 1;
return 0;
}
/**
* BFA log set log level for a module
*
* @param[in] log_mod - log module info
* @param[in] mod_id - module id
* @param[in] log_level - log severity level
*
* It return BFA_STATUS_OK on success, or > 0 on failure
*/
bfa_status_t
bfa_log_set_level(struct bfa_log_mod_s *log_mod, int mod_id,
enum bfa_log_severity log_level)
{
if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
return BFA_STATUS_EINVAL;
if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
return BFA_STATUS_EINVAL;
if (log_mod)
log_mod->log_level[mod_id] = log_level;
else
bfa_log_info[mod_id].level = log_level;
return BFA_STATUS_OK;
}
/**
* BFA log set log level for all modules
*
* @param[in] log_mod - log module info
* @param[in] log_level - log severity level
*
* It return BFA_STATUS_OK on success, or > 0 on failure
*/
bfa_status_t
bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
enum bfa_log_severity log_level)
{
int mod_id = BFA_LOG_UNUSED_ID + 1;
if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
return BFA_STATUS_EINVAL;
if (log_mod) {
for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
log_mod->log_level[mod_id] = log_level;
} else {
for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
bfa_log_info[mod_id].level = log_level;
}
return BFA_STATUS_OK;
}
/**
* BFA log set log level for all aen sub-modules
*
* @param[in] log_mod - log module info
* @param[in] log_level - log severity level
*
* It return BFA_STATUS_OK on success, or > 0 on failure
*/
bfa_status_t
bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
enum bfa_log_severity log_level)
{
int mod_id = BFA_LOG_AEN_MIN + 1;
if (log_mod) {
for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
log_mod->log_level[mod_id] = log_level;
} else {
for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
bfa_log_info[mod_id].level = log_level;
}
return BFA_STATUS_OK;
}
/**
* BFA log get log level for a module
*
* @param[in] log_mod - log module info
* @param[in] mod_id - module id
*
* It returns log level or -1 on error
*/
enum bfa_log_severity
bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
{
if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
return BFA_LOG_INVALID;
if (log_mod)
return log_mod->log_level[mod_id];
else
return bfa_log_info[mod_id].level;
}
enum bfa_log_severity
bfa_log_get_msg_level(struct bfa_log_mod_s *log_mod, u32 msg_id)
{
struct bfa_log_msgdef_s *msg;
u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
if (!bfa_log_initialized)
return BFA_LOG_INVALID;
if (mod > BFA_LOG_MODULE_ID_MAX)
return BFA_LOG_INVALID;
if (idx >= bfa_log_info[mod].total_count) {
bfa_log_info[mod].cbfn(log_mod, msg_id,
"%s%s log: inconsistent idx %u vs. total count %u\n",
BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
bfa_log_info[mod].total_count);
return BFA_LOG_INVALID;
}
msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
bfa_log_info[mod].cbfn(log_mod, msg_id,
"%s%s log: inconsistent msg id %u array msg id %u\n",
BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
msg_id, BFA_LOG_GET_MSG_ID(msg));
return BFA_LOG_INVALID;
}
return BFA_LOG_GET_SEVERITY(msg);
}
/**
* BFA log message handling
*
* BFA log message handling finds the message based on message id and prints
* out the message based on its format and arguments. It also does prefix
* the severity etc.
*
* @param[in] log_mod - log module info
* @param[in] msg_id - message id
* @param[in] ... - message arguments
*
* It return 0 on success, or -1 on errors
*/
int
bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...)
{
va_list ap;
char buf[256];
struct bfa_log_msgdef_s *msg;
int log_level;
u32 mod = BFA_LOG_GET_MOD_ID(msg_id);
u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
if (!bfa_log_initialized)
return -1;
if (mod > BFA_LOG_MODULE_ID_MAX)
return -1;
if (idx >= bfa_log_info[mod].total_count) {
bfa_log_info[mod].
cbfn
(log_mod, msg_id,
"%s%s log: inconsistent idx %u vs. total count %u\n",
BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
bfa_log_info[mod].total_count);
return -1;
}
msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
bfa_log_info[mod].
cbfn
(log_mod, msg_id,
"%s%s log: inconsistent msg id %u array msg id %u\n",
BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
msg_id, BFA_LOG_GET_MSG_ID(msg));
return -1;
}
log_level = log_mod ? log_mod->log_level[mod] : bfa_log_info[mod].level;
if ((BFA_LOG_GET_SEVERITY(msg) > log_level) &&
(msg->attributes != BFA_LOG_ATTR_NONE))
return 0;
va_start(ap, msg_id);
bfa_os_vsprintf(buf, BFA_LOG_GET_MSG_FMT_STRING(msg), ap);
va_end(ap);
if (log_mod)
log_mod->cbfn(log_mod, msg_id, "%s[%s]%s%s %s: %s\n",
BFA_LOG_CAT_NAME, log_mod->instance_info,
bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
(msg->attributes & BFA_LOG_ATTR_AUDIT)
? " (audit) " : "", msg->msg_value, buf);
else
bfa_log_info[mod].cbfn(log_mod, msg_id, "%s%s%s %s: %s\n",
BFA_LOG_CAT_NAME,
bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
(msg->attributes & BFA_LOG_ATTR_AUDIT) ?
" (audit) " : "", msg->msg_value, buf);
return 0;
}

Просмотреть файл

@ -1,537 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <cs/bfa_log.h>
#include <aen/bfa_aen_adapter.h>
#include <aen/bfa_aen_audit.h>
#include <aen/bfa_aen_ethport.h>
#include <aen/bfa_aen_ioc.h>
#include <aen/bfa_aen_itnim.h>
#include <aen/bfa_aen_lport.h>
#include <aen/bfa_aen_port.h>
#include <aen/bfa_aen_rport.h>
#include <log/bfa_log_fcs.h>
#include <log/bfa_log_hal.h>
#include <log/bfa_log_linux.h>
#include <log/bfa_log_wdrv.h>
struct bfa_log_msgdef_s bfa_log_msg_array[] = {
/* messages define for BFA_AEN_CAT_ADAPTER Module */
{BFA_AEN_ADAPTER_ADD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ADAPTER_ADD",
"New adapter found: SN = %s, base port WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_ADAPTER_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_ADAPTER_REMOVE",
"Adapter removed: SN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for BFA_AEN_CAT_AUDIT Module */
{BFA_AEN_AUDIT_AUTH_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_ENABLE",
"Authentication enabled for base port: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_AUDIT_AUTH_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_DISABLE",
"Authentication disabled for base port: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for BFA_AEN_CAT_ETHPORT Module */
{BFA_AEN_ETHPORT_LINKUP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ETHPORT_LINKUP",
"Base port ethernet linkup: mac = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_ETHPORT_LINKDOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ETHPORT_LINKDOWN",
"Base port ethernet linkdown: mac = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_ETHPORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ETHPORT_ENABLE",
"Base port ethernet interface enabled: mac = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_ETHPORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ETHPORT_DISABLE",
"Base port ethernet interface disabled: mac = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for BFA_AEN_CAT_IOC Module */
{BFA_AEN_IOC_HBGOOD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_IOC_HBGOOD",
"Heart Beat of IOC %d is good.",
((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_IOC_HBFAIL, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_CRITICAL,
"BFA_AEN_IOC_HBFAIL",
"Heart Beat of IOC %d has failed.",
((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_IOC_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_IOC_ENABLE",
"IOC %d is enabled.",
((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_IOC_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_IOC_DISABLE",
"IOC %d is disabled.",
((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_IOC_FWMISMATCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWMISMATCH",
"Running firmware version is incompatible with the driver version.",
(0), 0},
{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR",
"Link initialization failed due to firmware configuration read error:"
" WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR",
"Unsupported switch vendor. Link initialization failed: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN",
"Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.",
(0), 0},
{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN",
"Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.",
(0), 0},
/* messages define for BFA_AEN_CAT_ITNIM Module */
{BFA_AEN_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ITNIM_ONLINE",
"Target (WWN = %s) is online for initiator (WWN = %s).",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_ITNIM_OFFLINE",
"Target (WWN = %s) offlined by initiator (WWN = %s).",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_ITNIM_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "BFA_AEN_ITNIM_DISCONNECT",
"Target (WWN = %s) connectivity lost for initiator (WWN = %s).",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
/* messages define for BFA_AEN_CAT_LPORT Module */
{BFA_AEN_LPORT_NEW, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_LPORT_NEW",
"New logical port created: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_DELETE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_LPORT_DELETE",
"Logical port deleted: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_LPORT_ONLINE",
"Logical port online: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_LPORT_OFFLINE",
"Logical port taken offline: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "BFA_AEN_LPORT_DISCONNECT",
"Logical port lost fabric connectivity: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_NEW_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_LPORT_NEW_PROP",
"New virtual port created using proprietary interface: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_DELETE_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_PROP",
"Virtual port deleted using proprietary interface: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_NEW_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "BFA_AEN_LPORT_NEW_STANDARD",
"New virtual port created using standard interface: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_DELETE_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_STANDARD",
"Virtual port deleted using standard interface: WWN = %s, Role = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_LPORT_NPIV_DUP_WWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_DUP_WWN",
"Virtual port login failed. Duplicate WWN = %s reported by fabric.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_LPORT_NPIV_FABRIC_MAX, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_FABRIC_MAX",
"Virtual port (WWN = %s) login failed. Max NPIV ports already exist in"
" fabric/fport.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_LPORT_NPIV_UNKNOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_UNKNOWN",
"Virtual port (WWN = %s) login failed.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for BFA_AEN_CAT_PORT Module */
{BFA_AEN_PORT_ONLINE, BFA_LOG_ATTR_NONE, BFA_LOG_INFO, "BFA_AEN_PORT_ONLINE",
"Base port online: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
"BFA_AEN_PORT_OFFLINE",
"Base port offline: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_RLIR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_PORT_RLIR",
"RLIR event not supported.",
(0), 0},
{BFA_AEN_PORT_SFP_INSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_PORT_SFP_INSERT",
"New SFP found: WWN/MAC = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_SFP_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_REMOVE",
"SFP removed: WWN/MAC = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_SFP_POM, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
"BFA_AEN_PORT_SFP_POM",
"SFP POM level to %s: WWN/MAC = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_PORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_PORT_ENABLE",
"Base port enabled: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_PORT_DISABLE",
"Base port disabled: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_AUTH_ON, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_PORT_AUTH_ON",
"Authentication successful for base port: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_AUTH_OFF, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
"BFA_AEN_PORT_AUTH_OFF",
"Authentication unsuccessful for base port: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
"BFA_AEN_PORT_DISCONNECT",
"Base port (WWN = %s) lost fabric connectivity.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_QOS_NEG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
"BFA_AEN_PORT_QOS_NEG",
"QOS negotiation failed for base port: WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_FABRIC_NAME_CHANGE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_PORT_FABRIC_NAME_CHANGE",
"Base port WWN = %s, Fabric WWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_PORT_SFP_ACCESS_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_ACCESS_ERROR",
"SFP access error: WWN/MAC = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_AEN_PORT_SFP_UNSUPPORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_UNSUPPORT",
"Unsupported SFP found: WWN/MAC = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for BFA_AEN_CAT_RPORT Module */
{BFA_AEN_RPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_RPORT_ONLINE",
"Remote port (WWN = %s) online for logical port (WWN = %s).",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_RPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_RPORT_OFFLINE",
"Remote port (WWN = %s) offlined by logical port (WWN = %s).",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_RPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "BFA_AEN_RPORT_DISCONNECT",
"Remote port (WWN = %s) connectivity lost for logical port (WWN = %s).",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
{BFA_AEN_RPORT_QOS_PRIO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_RPORT_QOS_PRIO",
"QOS priority changed to %s: RPWWN = %s and LPWWN = %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
(BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
{BFA_AEN_RPORT_QOS_FLOWID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"BFA_AEN_RPORT_QOS_FLOWID",
"QOS flow ID changed to %d: RPWWN = %s and LPWWN = %s.",
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
(BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
/* messages define for FCS Module */
{BFA_LOG_FCS_FABRIC_NOSWITCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "FCS_FABRIC_NOSWITCH",
"No switched fabric presence is detected.",
(0), 0},
{BFA_LOG_FCS_FABRIC_ISOLATED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "FCS_FABRIC_ISOLATED",
"Port is isolated due to VF_ID mismatch. PWWN: %s, Port VF_ID: %04x and"
" switch port VF_ID: %04x.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_X << BFA_LOG_ARG1) |
(BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
/* messages define for HAL Module */
{BFA_LOG_HAL_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
"HAL_ASSERT",
"Assertion failure: %s:%d: %s",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
(BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
{BFA_LOG_HAL_HEARTBEAT_FAILURE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_CRITICAL, "HAL_HEARTBEAT_FAILURE",
"Firmware heartbeat failure at %d",
((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_HAL_FCPIM_PARM_INVALID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "HAL_FCPIM_PARM_INVALID",
"Driver configuration %s value %d is invalid. Value should be within"
" %d and %d.",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
(BFA_LOG_D << BFA_LOG_ARG2) | (BFA_LOG_D << BFA_LOG_ARG3) | 0), 4},
{BFA_LOG_HAL_SM_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
"HAL_SM_ASSERT",
"SM Assertion failure: %s:%d: event = %d",
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
(BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "HAL_DRIVER_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_HAL_DRIVER_CONFIG_ERROR,
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"HAL_DRIVER_CONFIG_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "HAL_MBOX_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for LINUX Module */
{BFA_LOG_LINUX_DEVICE_CLAIMED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_DEVICE_CLAIMED",
"bfa device at %s claimed.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_HASH_INIT_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_HASH_INIT_FAILED",
"Hash table initialization failure for the port %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_SYSFS_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_SYSFS_FAILED",
"sysfs file creation failure for the port %s.",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_MEM_ALLOC_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_MEM_ALLOC_FAILED",
"Memory allocation failed: %s. ",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED,
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_DRIVER_REGISTRATION_FAILED",
"%s. ",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_ITNIM_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_ITNIM_FREE",
"scsi%d: FCID: %s WWPN: %s",
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
(BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
{BFA_LOG_LINUX_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_ITNIM_ONLINE",
"Target: %d:0:%d FCID: %s WWPN: %s",
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
(BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
{BFA_LOG_LINUX_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_ITNIM_OFFLINE",
"Target: %d:0:%d FCID: %s WWPN: %s",
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
(BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
{BFA_LOG_LINUX_SCSI_HOST_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_SCSI_HOST_FREE",
"Free scsi%d",
((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_SCSI_ABORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_SCSI_ABORT",
"scsi%d: abort cmnd %p, iotag %x",
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
(BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
{BFA_LOG_LINUX_SCSI_ABORT_COMP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_SCSI_ABORT_COMP",
"scsi%d: complete abort 0x%p, iotag 0x%x",
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
(BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR,
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_DRIVER_CONFIG_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_BNA_STATE_MACHINE,
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_BNA_STATE_MACHINE",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_IOC_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR,
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_RESOURCE_ALLOC_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_RING_BUFFER_ERROR,
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
"LINUX_RING_BUFFER_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_ERROR, "LINUX_DRIVER_ERROR",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_DRIVER_INFO",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_DRIVER_DIAG",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "LINUX_DRIVER_AEN",
"%s",
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
/* messages define for WDRV Module */
{BFA_LOG_WDRV_IOC_INIT_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "WDRV_IOC_INIT_ERROR",
"IOC initialization has failed.",
(0), 0},
{BFA_LOG_WDRV_IOC_INTERNAL_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "WDRV_IOC_INTERNAL_ERROR",
"IOC internal error. ",
(0), 0},
{BFA_LOG_WDRV_IOC_START_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "WDRV_IOC_START_ERROR",
"IOC could not be started. ",
(0), 0},
{BFA_LOG_WDRV_IOC_STOP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "WDRV_IOC_STOP_ERROR",
"IOC could not be stopped. ",
(0), 0},
{BFA_LOG_WDRV_INSUFFICIENT_RESOURCES, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "WDRV_INSUFFICIENT_RESOURCES",
"Insufficient memory. ",
(0), 0},
{BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
BFA_LOG_INFO, "WDRV_BASE_ADDRESS_MAP_ERROR",
"Unable to map the IOC onto the system address space. ",
(0), 0},
{0, 0, 0, "", "", 0, 0},
};

Просмотреть файл

@ -1,892 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <bfi/bfi_lps.h>
#include <cs/bfa_debug.h>
#include <defs/bfa_defs_pci.h>
BFA_TRC_FILE(HAL, LPS);
BFA_MODULE(lps);
#define BFA_LPS_MIN_LPORTS (1)
#define BFA_LPS_MAX_LPORTS (256)
/*
* Maximum Vports supported per physical port or vf.
*/
#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
/**
* forward declarations
*/
static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
u32 *dm_len);
static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
static void bfa_lps_detach(struct bfa_s *bfa);
static void bfa_lps_start(struct bfa_s *bfa);
static void bfa_lps_stop(struct bfa_s *bfa);
static void bfa_lps_iocdisable(struct bfa_s *bfa);
static void bfa_lps_login_rsp(struct bfa_s *bfa,
struct bfi_lps_login_rsp_s *rsp);
static void bfa_lps_logout_rsp(struct bfa_s *bfa,
struct bfi_lps_logout_rsp_s *rsp);
static void bfa_lps_reqq_resume(void *lps_arg);
static void bfa_lps_free(struct bfa_lps_s *lps);
static void bfa_lps_send_login(struct bfa_lps_s *lps);
static void bfa_lps_send_logout(struct bfa_lps_s *lps);
static void bfa_lps_login_comp(struct bfa_lps_s *lps);
static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
/**
* lps_pvt BFA LPS private functions
*/
enum bfa_lps_event {
BFA_LPS_SM_LOGIN = 1, /* login request from user */
BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
BFA_LPS_SM_DELETE = 5, /* lps delete from user */
BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
};
static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps,
enum bfa_lps_event event);
static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
static void bfa_lps_sm_logowait(struct bfa_lps_s *lps,
enum bfa_lps_event event);
/**
* Init state -- no login
*/
static void
bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->lp_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_LOGIN:
if (bfa_reqq_full(lps->bfa, lps->reqq)) {
bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
} else {
bfa_sm_set_state(lps, bfa_lps_sm_login);
bfa_lps_send_login(lps);
}
if (lps->fdisc)
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FDISC Request");
else
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FLOGI Request");
break;
case BFA_LPS_SM_LOGOUT:
bfa_lps_logout_comp(lps);
break;
case BFA_LPS_SM_DELETE:
bfa_lps_free(lps);
break;
case BFA_LPS_SM_RX_CVL:
case BFA_LPS_SM_OFFLINE:
break;
case BFA_LPS_SM_FWRSP:
/* Could happen when fabric detects loopback and discards
* the lps request. Fw will eventually sent out the timeout
* Just ignore
*/
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/**
* login is in progress -- awaiting response from firmware
*/
static void
bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->lp_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_FWRSP:
if (lps->status == BFA_STATUS_OK) {
bfa_sm_set_state(lps, bfa_lps_sm_online);
if (lps->fdisc)
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FDISC Accept");
else
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
} else {
bfa_sm_set_state(lps, bfa_lps_sm_init);
if (lps->fdisc)
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0,
"FDISC Fail (RJT or timeout)");
else
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGIN, 0,
"FLOGI Fail (RJT or timeout)");
}
bfa_lps_login_comp(lps);
break;
case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/**
* login pending - awaiting space in request queue
*/
static void
bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->lp_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_login);
break;
case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
case BFA_LPS_SM_RX_CVL:
/*
* Login was not even sent out; so when getting out
* of this state, it will appear like a login retry
* after Clear virtual link
*/
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/**
* login complete
*/
static void
bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->lp_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_LOGOUT:
if (bfa_reqq_full(lps->bfa, lps->reqq)) {
bfa_sm_set_state(lps, bfa_lps_sm_logowait);
bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
} else {
bfa_sm_set_state(lps, bfa_lps_sm_logout);
bfa_lps_send_logout(lps);
}
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_LOGO, 0, "Logout");
break;
case BFA_LPS_SM_RX_CVL:
bfa_sm_set_state(lps, bfa_lps_sm_init);
/* Let the vport module know about this event */
bfa_lps_cvl_event(lps);
bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
break;
case BFA_LPS_SM_OFFLINE:
case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/**
* logout in progress - awaiting firmware response
*/
static void
bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->lp_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_FWRSP:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_lps_logout_comp(lps);
break;
case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/**
* logout pending -- awaiting space in request queue
*/
static void
bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
bfa_trc(lps->bfa, lps->lp_tag);
bfa_trc(lps->bfa, event);
switch (event) {
case BFA_LPS_SM_RESUME:
bfa_sm_set_state(lps, bfa_lps_sm_logout);
bfa_lps_send_logout(lps);
break;
case BFA_LPS_SM_OFFLINE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
default:
bfa_sm_fault(lps->bfa, event);
}
}
/**
* lps_pvt BFA LPS private functions
*/
/**
* return memory requirement
*/
static void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
{
if (cfg->drvcfg.min_cfg)
*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
else
*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
}
/**
* bfa module attach at initialization time
*/
static void
bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
int i;
bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
mod->num_lps = BFA_LPS_MAX_LPORTS;
if (cfg->drvcfg.min_cfg)
mod->num_lps = BFA_LPS_MIN_LPORTS;
else
mod->num_lps = BFA_LPS_MAX_LPORTS;
mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
INIT_LIST_HEAD(&mod->lps_free_q);
INIT_LIST_HEAD(&mod->lps_active_q);
for (i = 0; i < mod->num_lps; i++, lps++) {
lps->bfa = bfa;
lps->lp_tag = (u8) i;
lps->reqq = BFA_REQQ_LPS;
bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
list_add_tail(&lps->qe, &mod->lps_free_q);
}
}
static void
bfa_lps_detach(struct bfa_s *bfa)
{
}
static void
bfa_lps_start(struct bfa_s *bfa)
{
}
static void
bfa_lps_stop(struct bfa_s *bfa)
{
}
/**
* IOC in disabled state -- consider all lps offline
*/
static void
bfa_lps_iocdisable(struct bfa_s *bfa)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
struct list_head *qe, *qen;
list_for_each_safe(qe, qen, &mod->lps_active_q) {
lps = (struct bfa_lps_s *) qe;
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
}
/**
* Firmware login response
*/
static void
bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
bfa_assert(rsp->lp_tag < mod->num_lps);
lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
lps->status = rsp->status;
switch (rsp->status) {
case BFA_STATUS_OK:
lps->fport = rsp->f_port;
lps->npiv_en = rsp->npiv_en;
lps->lp_pid = rsp->lp_pid;
lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
lps->pr_pwwn = rsp->port_name;
lps->pr_nwwn = rsp->node_name;
lps->auth_req = rsp->auth_req;
lps->lp_mac = rsp->lp_mac;
lps->brcd_switch = rsp->brcd_switch;
lps->fcf_mac = rsp->fcf_mac;
break;
case BFA_STATUS_FABRIC_RJT:
lps->lsrjt_rsn = rsp->lsrjt_rsn;
lps->lsrjt_expl = rsp->lsrjt_expl;
break;
case BFA_STATUS_EPROTOCOL:
lps->ext_status = rsp->ext_status;
break;
default:
/* Nothing to do with other status */
break;
}
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
/**
* Firmware logout response
*/
static void
bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
bfa_assert(rsp->lp_tag < mod->num_lps);
lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
/**
* Firmware received a Clear virtual link request (for FCoE)
*/
static void
bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
}
/**
* Space is available in request queue, resume queueing request to firmware.
*/
static void
bfa_lps_reqq_resume(void *lps_arg)
{
struct bfa_lps_s *lps = lps_arg;
bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
}
/**
* lps is freed -- triggered by vport delete
*/
static void
bfa_lps_free(struct bfa_lps_s *lps)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
list_del(&lps->qe);
list_add_tail(&lps->qe, &mod->lps_free_q);
}
/**
* send login request to firmware
*/
static void
bfa_lps_send_login(struct bfa_lps_s *lps)
{
struct bfi_lps_login_req_s *m;
m = bfa_reqq_next(lps->bfa, lps->reqq);
bfa_assert(m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
bfa_lpuid(lps->bfa));
m->lp_tag = lps->lp_tag;
m->alpa = lps->alpa;
m->pdu_size = bfa_os_htons(lps->pdusz);
m->pwwn = lps->pwwn;
m->nwwn = lps->nwwn;
m->fdisc = lps->fdisc;
m->auth_en = lps->auth_en;
bfa_reqq_produce(lps->bfa, lps->reqq);
}
/**
* send logout request to firmware
*/
static void
bfa_lps_send_logout(struct bfa_lps_s *lps)
{
struct bfi_lps_logout_req_s *m;
m = bfa_reqq_next(lps->bfa, lps->reqq);
bfa_assert(m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
bfa_lpuid(lps->bfa));
m->lp_tag = lps->lp_tag;
m->port_name = lps->pwwn;
bfa_reqq_produce(lps->bfa, lps->reqq);
}
/**
* Indirect login completion handler for non-fcs
*/
static void
bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
{
struct bfa_lps_s *lps = arg;
if (!complete)
return;
if (lps->fdisc)
bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
else
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
/**
* Login completion handler -- direct call for fcs, queue for others
*/
static void
bfa_lps_login_comp(struct bfa_lps_s *lps)
{
if (!lps->bfa->fcs) {
bfa_cb_queue(lps->bfa, &lps->hcb_qe,
bfa_lps_login_comp_cb, lps);
return;
}
if (lps->fdisc)
bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
else
bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
}
/**
* Indirect logout completion handler for non-fcs
*/
static void
bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
{
struct bfa_lps_s *lps = arg;
if (!complete)
return;
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
else
bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
/**
* Logout completion handler -- direct call for fcs, queue for others
*/
static void
bfa_lps_logout_comp(struct bfa_lps_s *lps)
{
if (!lps->bfa->fcs) {
bfa_cb_queue(lps->bfa, &lps->hcb_qe,
bfa_lps_logout_comp_cb, lps);
return;
}
if (lps->fdisc)
bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
else
bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
/**
* Clear virtual link completion handler for non-fcs
*/
static void
bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
{
struct bfa_lps_s *lps = arg;
if (!complete)
return;
/* Clear virtual link to base port will result in link down */
if (lps->fdisc)
bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
}
/**
* Received Clear virtual link event --direct call for fcs,
* queue for others
*/
static void
bfa_lps_cvl_event(struct bfa_lps_s *lps)
{
if (!lps->bfa->fcs) {
bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
lps);
return;
}
/* Clear virtual link to base port will result in link down */
if (lps->fdisc)
bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
}
u32
bfa_lps_get_max_vport(struct bfa_s *bfa)
{
if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
return BFA_LPS_MAX_VPORTS_SUPP_CT;
else
return BFA_LPS_MAX_VPORTS_SUPP_CB;
}
/**
* lps_public BFA LPS public functions
*/
/**
* Allocate a lport srvice tag.
*/
struct bfa_lps_s *
bfa_lps_alloc(struct bfa_s *bfa)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps = NULL;
bfa_q_deq(&mod->lps_free_q, &lps);
if (lps == NULL)
return NULL;
list_add_tail(&lps->qe, &mod->lps_active_q);
bfa_sm_set_state(lps, bfa_lps_sm_init);
return lps;
}
/**
* Free lport service tag. This can be called anytime after an alloc.
* No need to wait for any pending login/logout completions.
*/
void
bfa_lps_delete(struct bfa_lps_s *lps)
{
bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
}
/**
* Initiate a lport login.
*/
void
bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
{
lps->uarg = uarg;
lps->alpa = alpa;
lps->pdusz = pdusz;
lps->pwwn = pwwn;
lps->nwwn = nwwn;
lps->fdisc = BFA_FALSE;
lps->auth_en = auth_en;
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
/**
* Initiate a lport fdisc login.
*/
void
bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
wwn_t nwwn)
{
lps->uarg = uarg;
lps->alpa = 0;
lps->pdusz = pdusz;
lps->pwwn = pwwn;
lps->nwwn = nwwn;
lps->fdisc = BFA_TRUE;
lps->auth_en = BFA_FALSE;
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
/**
* Initiate a lport logout (flogi).
*/
void
bfa_lps_flogo(struct bfa_lps_s *lps)
{
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
/**
* Initiate a lport FDSIC logout.
*/
void
bfa_lps_fdisclogo(struct bfa_lps_s *lps)
{
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
/**
* Discard a pending login request -- should be called only for
* link down handling.
*/
void
bfa_lps_discard(struct bfa_lps_s *lps)
{
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
/**
* Return lport services tag
*/
u8
bfa_lps_get_tag(struct bfa_lps_s *lps)
{
return lps->lp_tag;
}
/**
* Return lport services tag given the pid
*/
u8
bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
int i;
for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
if (lps->lp_pid == pid)
return lps->lp_tag;
}
/* Return base port tag anyway */
return 0;
}
/**
* return if fabric login indicates support for NPIV
*/
bfa_boolean_t
bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
{
return lps->npiv_en;
}
/**
* Return TRUE if attached to F-Port, else return FALSE
*/
bfa_boolean_t
bfa_lps_is_fport(struct bfa_lps_s *lps)
{
return lps->fport;
}
/**
* Return TRUE if attached to a Brocade Fabric
*/
bfa_boolean_t
bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
{
return lps->brcd_switch;
}
/**
* return TRUE if authentication is required
*/
bfa_boolean_t
bfa_lps_is_authreq(struct bfa_lps_s *lps)
{
return lps->auth_req;
}
bfa_eproto_status_t
bfa_lps_get_extstatus(struct bfa_lps_s *lps)
{
return lps->ext_status;
}
/**
* return port id assigned to the lport
*/
u32
bfa_lps_get_pid(struct bfa_lps_s *lps)
{
return lps->lp_pid;
}
/**
* Return bb_credit assigned in FLOGI response
*/
u16
bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
{
return lps->pr_bbcred;
}
/**
* Return peer port name
*/
wwn_t
bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
{
return lps->pr_pwwn;
}
/**
* Return peer node name
*/
wwn_t
bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
{
return lps->pr_nwwn;
}
/**
* return reason code if login request is rejected
*/
u8
bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
{
return lps->lsrjt_rsn;
}
/**
* return explanation code if login request is rejected
*/
u8
bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
{
return lps->lsrjt_expl;
}
/**
* Return fpma/spma MAC for lport
*/
struct mac_s
bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
{
return lps->lp_mac;
}
/**
* LPS firmware message class handler.
*/
void
bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
union bfi_lps_i2h_msg_u msg;
bfa_trc(bfa, m->mhdr.msg_id);
msg.msg = m;
switch (m->mhdr.msg_id) {
case BFI_LPS_H2I_LOGIN_RSP:
bfa_lps_login_rsp(bfa, msg.login_rsp);
break;
case BFI_LPS_H2I_LOGOUT_RSP:
bfa_lps_logout_rsp(bfa, msg.logout_rsp);
break;
case BFI_LPS_H2I_CVL_EVENT:
bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
break;
default:
bfa_trc(bfa, m->mhdr.msg_id);
bfa_assert(0);
}
}

Просмотреть файл

@ -1,38 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_LPS_PRIV_H__
#define __BFA_LPS_PRIV_H__
#include <bfa_svc.h>
struct bfa_lps_mod_s {
struct list_head lps_free_q;
struct list_head lps_active_q;
struct bfa_lps_s *lps_arr;
int num_lps;
};
#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
/*
* external functions
*/
void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#endif /* __BFA_LPS_PRIV_H__ */

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,26 +15,52 @@
* General Public License for more details.
*/
#ifndef __BFA_PRIV_H__
#define __BFA_PRIV_H__
/**
* bfa_modules.h BFA modules
*/
#ifndef __BFA_MODULES_H__
#define __BFA_MODULES_H__
#include "bfa_cs.h"
#include "bfa.h"
#include "bfa_svc.h"
#include "bfa_fcpim.h"
#include "bfa_port.h"
struct bfa_modules_s {
struct bfa_fcport_s fcport; /* fc port module */
struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
struct bfa_lps_mod_s lps_mod; /* fcxp module */
struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
struct bfa_rport_mod_s rport_mod; /* remote port module */
struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
struct bfa_port_s port; /* Physical port module */
};
/*
* !!! Only append to the enums defined here to avoid any versioning
* !!! needed between trace utility and driver version
*/
enum {
BFA_TRC_HAL_CORE = 1,
BFA_TRC_HAL_FCXP = 2,
BFA_TRC_HAL_FCPIM = 3,
BFA_TRC_HAL_IOCFC_CT = 4,
BFA_TRC_HAL_IOCFC_CB = 5,
};
#include "bfa_iocfc.h"
#include "bfa_intr_priv.h"
#include "bfa_trcmod_priv.h"
#include "bfa_modules_priv.h"
#include "bfa_fwimg_priv.h"
#include <cs/bfa_log.h>
#include <bfa_timer.h>
/**
* Macro to define a new BFA module
*/
#define BFA_MODULE(__mod) \
#define BFA_MODULE(__mod) \
static void bfa_ ## __mod ## _meminfo( \
struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \
u32 *dm_len); \
static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
void *bfad, struct bfa_iocfc_cfg_s *cfg, \
void *bfad, struct bfa_iocfc_cfg_s *cfg, \
struct bfa_meminfo_s *meminfo, \
struct bfa_pcidev_s *pcidev); \
static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
@ -77,17 +103,15 @@ extern struct bfa_module_s *hal_mods[];
struct bfa_s {
void *bfad; /* BFA driver instance */
struct bfa_aen_s *aen; /* AEN module */
struct bfa_plog_s *plog; /* portlog buffer */
struct bfa_log_mod_s *logm; /* driver logging modulen */
struct bfa_trc_mod_s *trcmod; /* driver tracing */
struct bfa_ioc_s ioc; /* IOC module */
struct bfa_iocfc_s iocfc; /* IOCFC module */
struct bfa_timer_mod_s timer_mod; /* timer module */
struct bfa_modules_s modules; /* BFA modules */
struct list_head comp_q; /* pending completions */
bfa_boolean_t rme_process; /* RME processing enabled */
struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
struct list_head comp_q; /* pending completions */
bfa_boolean_t rme_process; /* RME processing enabled */
struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
bfa_boolean_t fcs; /* FCS is attached to BFA */
struct bfa_msix_s msix;
};
@ -95,8 +119,6 @@ struct bfa_s {
extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[];
extern bfa_boolean_t bfa_auto_recover;
extern struct bfa_module_s hal_mod_flash;
extern struct bfa_module_s hal_mod_fcdiag;
extern struct bfa_module_s hal_mod_sgpg;
extern struct bfa_module_s hal_mod_fcport;
extern struct bfa_module_s hal_mod_fcxp;
@ -104,7 +126,5 @@ extern struct bfa_module_s hal_mod_lps;
extern struct bfa_module_s hal_mod_uf;
extern struct bfa_module_s hal_mod_rport;
extern struct bfa_module_s hal_mod_fcpim;
extern struct bfa_module_s hal_mod_pbind;
#endif /* __BFA_PRIV_H__ */
#endif /* __BFA_MODULES_H__ */

Просмотреть файл

@ -1,43 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_MODULES_PRIV_H__
#define __BFA_MODULES_PRIV_H__
#include "bfa_uf_priv.h"
#include "bfa_port_priv.h"
#include "bfa_rport_priv.h"
#include "bfa_fcxp_priv.h"
#include "bfa_lps_priv.h"
#include "bfa_fcpim_priv.h"
#include <cee/bfa_cee.h>
#include <port/bfa_port.h>
struct bfa_modules_s {
struct bfa_fcport_s fcport; /* fc port module */
struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
struct bfa_lps_mod_s lps_mod; /* fcxp module */
struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
struct bfa_rport_mod_s rport_mod; /* remote port module */
struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
struct bfa_cee_s cee; /* CEE Module */
struct bfa_port_s port; /* Physical port module */
};
#endif /* __BFA_MODULES_PRIV_H__ */

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -22,30 +22,20 @@
#ifndef __BFA_OS_INC_H__
#define __BFA_OS_INC_H__
#ifndef __KERNEL__
#include <stdint.h>
#else
#include <linux/types.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#define SET_MODULE_VERSION(VER)
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
@ -54,97 +44,75 @@
#define __BIGENDIAN
#endif
#define BFA_ERR KERN_ERR
#define BFA_WARNING KERN_WARNING
#define BFA_NOTICE KERN_NOTICE
#define BFA_INFO KERN_INFO
#define BFA_DEBUG KERN_DEBUG
static inline u64 bfa_os_get_clock(void)
{
return jiffies;
}
#define LOG_BFAD_INIT 0x00000001
#define LOG_FCP_IO 0x00000002
static inline u64 bfa_os_get_log_time(void)
{
u64 system_time = 0;
struct timeval tv;
do_gettimeofday(&tv);
#ifdef DEBUG
#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) \
BFA_LOG(bfad, level, mask, fmt, ## arg)
#define BFA_DEV_TRACE(bfad, level, fmt, arg...) \
BFA_DEV_PRINTF(bfad, level, fmt, ## arg)
#define BFA_TRACE(level, fmt, arg...) \
BFA_PRINTF(level, fmt, ## arg)
#else
#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...)
#define BFA_DEV_TRACE(bfad, level, fmt, arg...)
#define BFA_TRACE(level, fmt, arg...)
#endif
/* We are interested in seconds only. */
system_time = tv.tv_sec;
return system_time;
}
#define bfa_io_lat_clock_res_div HZ
#define bfa_io_lat_clock_res_mul 1000
#define BFA_ASSERT(p) do { \
if (!(p)) { \
printk(KERN_ERR "assert(%s) failed at %s:%d\n", \
#p, __FILE__, __LINE__); \
BUG(); \
} \
} while (0)
#define BFA_LOG(bfad, level, mask, fmt, arg...) \
do { \
if (((mask) & (((struct bfad_s *)(bfad))-> \
cfg_data[cfg_log_mask])) || (level[1] <= '3')) \
dev_printk(level, &(((struct bfad_s *) \
(bfad))->pcidev->dev), fmt, ##arg); \
#define BFA_LOG(level, bfad, mask, fmt, arg...) \
do { \
if (((mask) == 4) || (level[1] <= '4')) \
dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \
} while (0)
#ifndef BFA_DEV_PRINTF
#define BFA_DEV_PRINTF(bfad, level, fmt, arg...) \
dev_printk(level, &(((struct bfad_s *) \
(bfad))->pcidev->dev), fmt, ##arg);
#endif
#define BFA_PRINTF(level, fmt, arg...) \
printk(level fmt, ##arg);
int bfa_os_MWB(void *);
#define bfa_os_mmiowb() mmiowb()
#define bfa_swap_3b(_x) \
((((_x) & 0xff) << 16) | \
((_x) & 0x00ff00) | \
(((_x) & 0xff0000) >> 16))
#define bfa_swap_8b(_x) \
((((_x) & 0xff00000000000000ull) >> 56) \
| (((_x) & 0x00ff000000000000ull) >> 40) \
| (((_x) & 0x0000ff0000000000ull) >> 24) \
| (((_x) & 0x000000ff00000000ull) >> 8) \
| (((_x) & 0x00000000ff000000ull) << 8) \
| (((_x) & 0x0000000000ff0000ull) << 24) \
| (((_x) & 0x000000000000ff00ull) << 40) \
| (((_x) & 0x00000000000000ffull) << 56))
#define bfa_swap_8b(_x) \
((((_x) & 0xff00000000000000ull) >> 56) \
| (((_x) & 0x00ff000000000000ull) >> 40) \
| (((_x) & 0x0000ff0000000000ull) >> 24) \
| (((_x) & 0x000000ff00000000ull) >> 8) \
| (((_x) & 0x00000000ff000000ull) << 8) \
| (((_x) & 0x0000000000ff0000ull) << 24) \
| (((_x) & 0x000000000000ff00ull) << 40) \
| (((_x) & 0x00000000000000ffull) << 56))
#define bfa_os_swap32(_x) \
((((_x) & 0xff) << 24) | \
#define bfa_os_swap32(_x) \
((((_x) & 0xff) << 24) | \
(((_x) & 0x0000ff00) << 8) | \
(((_x) & 0x00ff0000) >> 8) | \
(((_x) & 0xff000000) >> 24))
#define bfa_os_swap_sgaddr(_x) ((u64)( \
(((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
(((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
(((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
#define bfa_os_swap_sgaddr(_x) ((u64)( \
(((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
(((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
(((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
#ifndef __BIGENDIAN
#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
(((_x) & 0x00ff) << 8)))
#define bfa_os_htonl(_x) bfa_os_swap32(_x)
#define bfa_os_htonll(_x) bfa_swap_8b(_x)
#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
#define bfa_os_wtole(_x) (_x)
#define bfa_os_sgaddr(_x) (_x)
@ -170,17 +138,16 @@ int bfa_os_MWB(void *);
#define bfa_os_memcpy memcpy
#define bfa_os_udelay udelay
#define bfa_os_vsprintf vsprintf
#define bfa_os_snprintf snprintf
#define bfa_os_assign(__t, __s) __t = __s
#define bfa_os_addr_t char __iomem *
#define bfa_os_panic()
#define bfa_os_addr_t void __iomem *
#define bfa_os_reg_read(_raddr) readl(_raddr)
#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
#define bfa_os_mem_read(_raddr, _off) \
#define bfa_os_mem_read(_raddr, _off) \
bfa_os_swap32(readl(((_raddr) + (_off))))
#define bfa_os_mem_write(_raddr, _off, _val) \
#define bfa_os_mem_write(_raddr, _off, _val) \
writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
#define BFA_TRC_TS(_trcm) \
@ -191,11 +158,6 @@ int bfa_os_MWB(void *);
(tv.tv_sec*1000000+tv.tv_usec); \
})
struct bfa_log_mod_s;
void bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
const char *fmt, ...);
#endif
#define boolean_t int
/**
@ -206,7 +168,15 @@ struct bfa_timeval_s {
u32 tv_usec; /* microseconds */
};
void bfa_os_gettimeofday(struct bfa_timeval_s *tv);
static inline void
bfa_os_gettimeofday(struct bfa_timeval_s *tv)
{
struct timeval tmp_tv;
do_gettimeofday(&tmp_tv);
tv->tv_sec = (u32) tmp_tv.tv_sec;
tv->tv_usec = (u32) tmp_tv.tv_usec;
}
static inline void
wwn2str(char *wwn_str, u64 wwn)

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -17,8 +17,8 @@
#ifndef __BFA_PORTLOG_H__
#define __BFA_PORTLOG_H__
#include "protocol/fc.h"
#include <defs/bfa_defs_types.h>
#include "bfa_fc.h"
#include "bfa_defs.h"
#define BFA_PL_NLOG_ENTS 256
#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
@ -27,38 +27,30 @@
#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */
enum bfa_plog_log_type {
BFA_PL_LOG_TYPE_INVALID = 0,
BFA_PL_LOG_TYPE_INT = 1,
BFA_PL_LOG_TYPE_STRING = 2,
BFA_PL_LOG_TYPE_INVALID = 0,
BFA_PL_LOG_TYPE_INT = 1,
BFA_PL_LOG_TYPE_STRING = 2,
};
/*
* the (fixed size) record format for each entry in the portlog
*/
struct bfa_plog_rec_s {
u32 tv; /* Filled by the portlog driver when the *
* entry is added to the circular log. */
u8 port; /* Source port that logged this entry. CM
* entities will use 0xFF */
u8 mid; /* Integer value to be used by all entities *
* while logging. The module id to string *
* conversion will be done by BFAL. See
* enum bfa_plog_mid */
u8 eid; /* indicates Rx, Tx, IOCTL, etc. See
* enum bfa_plog_eid */
u8 log_type; /* indicates string log or integer log.
* see bfa_plog_log_type_t */
u8 log_num_ints;
u64 tv; /* timestamp */
u8 port; /* Source port that logged this entry */
u8 mid; /* module id */
u8 eid; /* indicates Rx, Tx, IOCTL, etc. bfa_plog_eid */
u8 log_type; /* string/integer log, bfa_plog_log_type_t */
u8 log_num_ints;
/*
* interpreted only if log_type is INT_LOG. indicates number of
* integers in the int_log[] (0-PL_INT_LOG_SZ).
*/
u8 rsvd;
u16 misc; /* can be used to indicate fc frame length,
*etc.. */
u8 rsvd;
u16 misc; /* can be used to indicate fc frame length */
union {
char string_log[BFA_PL_STRING_LOG_SZ];
u32 int_log[BFA_PL_INT_LOG_SZ];
char string_log[BFA_PL_STRING_LOG_SZ];
u32 int_log[BFA_PL_INT_LOG_SZ];
} log_entry;
};
@ -73,20 +65,20 @@ struct bfa_plog_rec_s {
* - Do not remove any entry or rearrange the order.
*/
enum bfa_plog_mid {
BFA_PL_MID_INVALID = 0,
BFA_PL_MID_DEBUG = 1,
BFA_PL_MID_DRVR = 2,
BFA_PL_MID_HAL = 3,
BFA_PL_MID_HAL_FCXP = 4,
BFA_PL_MID_HAL_UF = 5,
BFA_PL_MID_FCS = 6,
BFA_PL_MID_INVALID = 0,
BFA_PL_MID_DEBUG = 1,
BFA_PL_MID_DRVR = 2,
BFA_PL_MID_HAL = 3,
BFA_PL_MID_HAL_FCXP = 4,
BFA_PL_MID_HAL_UF = 5,
BFA_PL_MID_FCS = 6,
BFA_PL_MID_LPS = 7,
BFA_PL_MID_MAX = 8
BFA_PL_MID_MAX = 8
};
#define BFA_PL_MID_STRLEN 8
struct bfa_plog_mid_strings_s {
char m_str[BFA_PL_MID_STRLEN];
char m_str[BFA_PL_MID_STRLEN];
};
/*
@ -99,36 +91,37 @@ struct bfa_plog_mid_strings_s {
* - Do not remove any entry or rearrange the order.
*/
enum bfa_plog_eid {
BFA_PL_EID_INVALID = 0,
BFA_PL_EID_IOC_DISABLE = 1,
BFA_PL_EID_IOC_ENABLE = 2,
BFA_PL_EID_PORT_DISABLE = 3,
BFA_PL_EID_PORT_ENABLE = 4,
BFA_PL_EID_PORT_ST_CHANGE = 5,
BFA_PL_EID_TX = 6,
BFA_PL_EID_TX_ACK1 = 7,
BFA_PL_EID_TX_RJT = 8,
BFA_PL_EID_TX_BSY = 9,
BFA_PL_EID_RX = 10,
BFA_PL_EID_RX_ACK1 = 11,
BFA_PL_EID_RX_RJT = 12,
BFA_PL_EID_RX_BSY = 13,
BFA_PL_EID_CT_IN = 14,
BFA_PL_EID_CT_OUT = 15,
BFA_PL_EID_DRIVER_START = 16,
BFA_PL_EID_RSCN = 17,
BFA_PL_EID_DEBUG = 18,
BFA_PL_EID_MISC = 19,
BFA_PL_EID_INVALID = 0,
BFA_PL_EID_IOC_DISABLE = 1,
BFA_PL_EID_IOC_ENABLE = 2,
BFA_PL_EID_PORT_DISABLE = 3,
BFA_PL_EID_PORT_ENABLE = 4,
BFA_PL_EID_PORT_ST_CHANGE = 5,
BFA_PL_EID_TX = 6,
BFA_PL_EID_TX_ACK1 = 7,
BFA_PL_EID_TX_RJT = 8,
BFA_PL_EID_TX_BSY = 9,
BFA_PL_EID_RX = 10,
BFA_PL_EID_RX_ACK1 = 11,
BFA_PL_EID_RX_RJT = 12,
BFA_PL_EID_RX_BSY = 13,
BFA_PL_EID_CT_IN = 14,
BFA_PL_EID_CT_OUT = 15,
BFA_PL_EID_DRIVER_START = 16,
BFA_PL_EID_RSCN = 17,
BFA_PL_EID_DEBUG = 18,
BFA_PL_EID_MISC = 19,
BFA_PL_EID_FIP_FCF_DISC = 20,
BFA_PL_EID_FIP_FCF_CVL = 21,
BFA_PL_EID_LOGIN = 22,
BFA_PL_EID_LOGO = 23,
BFA_PL_EID_MAX = 24
BFA_PL_EID_TRUNK_SCN = 24,
BFA_PL_EID_MAX
};
#define BFA_PL_ENAME_STRLEN 8
#define BFA_PL_ENAME_STRLEN 8
struct bfa_plog_eid_strings_s {
char e_str[BFA_PL_ENAME_STRLEN];
char e_str[BFA_PL_ENAME_STRLEN];
};
#define BFA_PL_SIG_LEN 8
@ -138,12 +131,12 @@ struct bfa_plog_eid_strings_s {
* per port circular log buffer
*/
struct bfa_plog_s {
char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */
u8 plog_enabled;
u8 rsvd[7];
u32 ticks;
u16 head;
u16 tail;
char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */
u8 plog_enabled;
u8 rsvd[7];
u32 ticks;
u16 head;
u16 tail;
struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS];
};
@ -154,8 +147,7 @@ void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc,
u32 *intarr, u32 num_ints);
void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc,
struct fchs_s *fchdr);
enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr);
void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc,
struct fchs_s *fchdr, u32 pld_w0);

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -15,30 +15,25 @@
* General Public License for more details.
*/
#include <defs/bfa_defs_port.h>
#include <cs/bfa_trc.h>
#include <cs/bfa_log.h>
#include <cs/bfa_debug.h>
#include <port/bfa_port.h>
#include <bfi/bfi.h>
#include <bfi/bfi_port.h>
#include <bfa_ioc.h>
#include <cna/bfa_cna_trcmod.h>
#include "bfa_defs_svc.h"
#include "bfa_port.h"
#include "bfi.h"
#include "bfa_ioc.h"
BFA_TRC_FILE(CNA, PORT);
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
static void
bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
{
u32 *dip = (u32 *) stats;
u32 t0, t1;
int i;
u32 *dip = (u32 *) stats;
u32 t0, t1;
int i;
for (i = 0; i < sizeof(union bfa_pport_stats_u) / sizeof(u32);
i += 2) {
for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
i += 2) {
t0 = dip[i];
t1 = dip[i + 1];
#ifdef __BIGENDIAN
@ -49,11 +44,6 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
dip[i + 1] = bfa_os_ntohl(t0);
#endif
}
/** todo
* QoS stats r also swapped as 64bit; that structure also
* has to use 64 bit counters
*/
}
/**
@ -68,7 +58,9 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
static void
bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
{
bfa_assert(0);
bfa_trc(port, status);
port->endis_pending = BFA_FALSE;
port->endis_cbfn(port->endis_cbarg, status);
}
/**
@ -83,7 +75,9 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
{
bfa_assert(0);
bfa_trc(port, status);
port->endis_pending = BFA_FALSE;
port->endis_cbfn(port->endis_cbarg, status);
}
/**
@ -105,7 +99,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
struct bfa_timeval_s tv;
memcpy(port->stats, port->stats_dma.kva,
sizeof(union bfa_pport_stats_u));
sizeof(union bfa_port_stats_u));
bfa_port_stats_swap(port, port->stats);
bfa_os_gettimeofday(&tv);
@ -133,11 +127,11 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
struct bfa_timeval_s tv;
port->stats_status = status;
port->stats_busy = BFA_FALSE;
port->stats_busy = BFA_FALSE;
/**
* re-initialize time stamp for stats reset
*/
* re-initialize time stamp for stats reset
*/
bfa_os_gettimeofday(&tv);
port->stats_reset_time = tv.tv_sec;
@ -158,10 +152,10 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
{
struct bfa_port_s *port = (struct bfa_port_s *)cbarg;
struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
union bfi_port_i2h_msg_u *i2hmsg;
i2hmsg = (union bfi_port_i2h_msg_u *)m;
i2hmsg = (union bfi_port_i2h_msg_u *) m;
bfa_trc(port, m->mh.msg_id);
switch (m->mh.msg_id) {
@ -178,9 +172,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
break;
case BFI_PORT_I2H_GET_STATS_RSP:
/*
* Stats busy flag is still set? (may be cmd timed out)
*/
/* Stats busy flag is still set? (may be cmd timed out) */
if (port->stats_busy == BFA_FALSE)
break;
bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
@ -208,7 +200,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
u32
bfa_port_meminfo(void)
{
return BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), BFA_DMA_ALIGN_SZ);
return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
}
/**
@ -216,8 +208,8 @@ bfa_port_meminfo(void)
*
*
* @param[in] port Port module pointer
* dma_kva Kernel Virtual Address of Port DMA Memory
* dma_pa Physical Address of Port DMA Memory
* dma_kva Kernel Virtual Address of Port DMA Memory
* dma_pa Physical Address of Port DMA Memory
*
* @return void
*/
@ -225,7 +217,7 @@ void
bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
{
port->stats_dma.kva = dma_kva;
port->stats_dma.pa = dma_pa;
port->stats_dma.pa = dma_pa;
}
/**
@ -239,12 +231,14 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
*/
bfa_status_t
bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
void *cbarg)
void *cbarg)
{
struct bfi_port_generic_req_s *m;
/** todo Not implemented */
bfa_assert(0);
if (bfa_ioc_is_disabled(port->ioc)) {
bfa_trc(port, BFA_STATUS_IOC_DISABLED);
return BFA_STATUS_IOC_DISABLED;
}
if (!bfa_ioc_is_operational(port->ioc)) {
bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@ -256,11 +250,11 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
return BFA_STATUS_DEVBUSY;
}
m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;
m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
port->msgtag++;
port->endis_cbfn = cbfn;
port->endis_cbarg = cbarg;
port->endis_cbfn = cbfn;
port->endis_cbarg = cbarg;
port->endis_pending = BFA_TRUE;
bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
@ -281,12 +275,14 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
*/
bfa_status_t
bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
void *cbarg)
void *cbarg)
{
struct bfi_port_generic_req_s *m;
/** todo Not implemented */
bfa_assert(0);
if (bfa_ioc_is_disabled(port->ioc)) {
bfa_trc(port, BFA_STATUS_IOC_DISABLED);
return BFA_STATUS_IOC_DISABLED;
}
if (!bfa_ioc_is_operational(port->ioc)) {
bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@ -298,11 +294,11 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
return BFA_STATUS_DEVBUSY;
}
m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;
m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
port->msgtag++;
port->endis_cbfn = cbfn;
port->endis_cbarg = cbarg;
port->endis_cbfn = cbfn;
port->endis_cbarg = cbarg;
port->endis_pending = BFA_TRUE;
bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
@ -322,8 +318,8 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
* @return Status
*/
bfa_status_t
bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
bfa_port_stats_cbfn_t cbfn, void *cbarg)
bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
bfa_port_stats_cbfn_t cbfn, void *cbarg)
{
struct bfi_port_get_stats_req_s *m;
@ -337,12 +333,12 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
return BFA_STATUS_DEVBUSY;
}
m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg;
m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
port->stats = stats;
port->stats_cbfn = cbfn;
port->stats = stats;
port->stats_cbfn = cbfn;
port->stats_cbarg = cbarg;
port->stats_busy = BFA_TRUE;
port->stats_busy = BFA_TRUE;
bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
@ -362,7 +358,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
*/
bfa_status_t
bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
void *cbarg)
void *cbarg)
{
struct bfi_port_generic_req_s *m;
@ -376,11 +372,11 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
return BFA_STATUS_DEVBUSY;
}
m = (struct bfi_port_generic_req_s *)port->stats_mb.msg;
m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
port->stats_cbfn = cbfn;
port->stats_cbfn = cbfn;
port->stats_cbarg = cbarg;
port->stats_busy = BFA_TRUE;
port->stats_busy = BFA_TRUE;
bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
bfa_ioc_portid(port->ioc));
@ -400,11 +396,9 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
void
bfa_port_hbfail(void *arg)
{
struct bfa_port_s *port = (struct bfa_port_s *)arg;
struct bfa_port_s *port = (struct bfa_port_s *) arg;
/*
* Fail any pending get_stats/clear_stats requests
*/
/* Fail any pending get_stats/clear_stats requests */
if (port->stats_busy) {
if (port->stats_cbfn)
port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
@ -412,9 +406,7 @@ bfa_port_hbfail(void *arg)
port->stats_busy = BFA_FALSE;
}
/*
* Clear any enable/disable is pending
*/
/* Clear any enable/disable is pending */
if (port->endis_pending) {
if (port->endis_cbfn)
port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
@ -433,22 +425,20 @@ bfa_port_hbfail(void *arg)
* The device driver specific mbox ISR functions have
* this pointer as one of the parameters.
* trcmod -
* logmod -
*
* @return void
*/
void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod)
{
struct bfa_timeval_s tv;
bfa_assert(port);
port->dev = dev;
port->ioc = ioc;
port->dev = dev;
port->ioc = ioc;
port->trcmod = trcmod;
port->logmod = logmod;
port->stats_busy = BFA_FALSE;
port->endis_pending = BFA_FALSE;

Просмотреть файл

@ -0,0 +1,66 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_PORT_H__
#define __BFA_PORT_H__
#include "bfa_defs_svc.h"
#include "bfa_ioc.h"
#include "bfa_cs.h"
typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
struct bfa_port_s {
void *dev;
struct bfa_ioc_s *ioc;
struct bfa_trc_mod_s *trcmod;
u32 msgtag;
bfa_boolean_t stats_busy;
struct bfa_mbox_cmd_s stats_mb;
bfa_port_stats_cbfn_t stats_cbfn;
void *stats_cbarg;
bfa_status_t stats_status;
u32 stats_reset_time;
union bfa_port_stats_u *stats;
struct bfa_dma_s stats_dma;
bfa_boolean_t endis_pending;
struct bfa_mbox_cmd_s endis_mb;
bfa_port_endis_cbfn_t endis_cbfn;
void *endis_cbarg;
bfa_status_t endis_status;
struct bfa_ioc_hbfail_notify_s hbfail;
};
void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod);
void bfa_port_detach(struct bfa_port_s *port);
void bfa_port_hbfail(void *arg);
bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
union bfa_port_stats_u *stats,
bfa_port_stats_cbfn_t cbfn, void *cbarg);
bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
bfa_port_stats_cbfn_t cbfn, void *cbarg);
bfa_status_t bfa_port_enable(struct bfa_port_s *port,
bfa_port_endis_cbfn_t cbfn, void *cbarg);
bfa_status_t bfa_port_disable(struct bfa_port_s *port,
bfa_port_endis_cbfn_t cbfn, void *cbarg);
u32 bfa_port_meminfo(void);
void bfa_port_mem_claim(struct bfa_port_s *port,
u8 *dma_kva, u64 dma_pa);
#endif /* __BFA_PORT_H__ */

Просмотреть файл

@ -1,94 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_PORT_PRIV_H__
#define __BFA_PORT_PRIV_H__
#include <defs/bfa_defs_pport.h>
#include <bfi/bfi_pport.h>
#include "bfa_intr_priv.h"
/**
* Link notification data structure
*/
struct bfa_fcport_ln_s {
struct bfa_fcport_s *fcport;
bfa_sm_t sm;
struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
enum bfa_pport_linkstate ln_event; /* ln event for callback */
};
/**
* BFA FC port data structure
*/
struct bfa_fcport_s {
struct bfa_s *bfa; /* parent BFA instance */
bfa_sm_t sm; /* port state machine */
wwn_t nwwn; /* node wwn of physical port */
wwn_t pwwn; /* port wwn of physical oprt */
enum bfa_pport_speed speed_sup;
/* supported speeds */
enum bfa_pport_speed speed; /* current speed */
enum bfa_pport_topology topology; /* current topology */
u8 myalpa; /* my ALPA in LOOP topology */
u8 rsvd[3];
u32 mypid:24;
u32 rsvd_b:8;
struct bfa_pport_cfg_s cfg; /* current port configuration */
struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
struct bfa_reqq_wait_s reqq_wait;
/* to wait for room in reqq */
struct bfa_reqq_wait_s svcreq_wait;
/* to wait for room in reqq */
struct bfa_reqq_wait_s stats_reqq_wait;
/* to wait for room in reqq (stats) */
void *event_cbarg;
void (*event_cbfn) (void *cbarg,
bfa_pport_event_t event);
union {
union bfi_fcport_i2h_msg_u i2hmsg;
} event_arg;
void *bfad; /* BFA driver handle */
struct bfa_fcport_ln_s ln; /* Link Notification */
struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
struct bfa_timer_s timer; /* timer */
u32 msgtag; /* fimrware msg tag for reply */
u8 *stats_kva;
u64 stats_pa;
union bfa_fcport_stats_u *stats;
union bfa_fcport_stats_u *stats_ret; /* driver stats location */
bfa_status_t stats_status; /* stats/statsclr status */
bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
bfa_boolean_t stats_qfull;
u32 stats_reset_time; /* stats reset time stamp */
bfa_cb_pport_t stats_cbfn; /* driver callback function */
void *stats_cbarg; /* user callback arg */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
};
#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
/*
* public functions
*/
void bfa_fcport_init(struct bfa_s *bfa);
void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#endif /* __BFA_PORT_PRIV_H__ */

Просмотреть файл

@ -1,906 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <bfa_svc.h>
#include <cs/bfa_debug.h>
#include <bfi/bfi_rport.h>
#include "bfa_intr_priv.h"
BFA_TRC_FILE(HAL, RPORT);
BFA_MODULE(rport);
#define bfa_rport_offline_cb(__rp) do { \
if ((__rp)->bfa->fcs) \
bfa_cb_rport_offline((__rp)->rport_drv); \
else { \
bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
__bfa_cb_rport_offline, (__rp)); \
} \
} while (0)
#define bfa_rport_online_cb(__rp) do { \
if ((__rp)->bfa->fcs) \
bfa_cb_rport_online((__rp)->rport_drv); \
else { \
bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
__bfa_cb_rport_online, (__rp)); \
} \
} while (0)
/*
* forward declarations
*/
static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
static void bfa_rport_free(struct bfa_rport_s *rport);
static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
/**
* bfa_rport_sm BFA rport state machine
*/
enum bfa_rport_event {
BFA_RPORT_SM_CREATE = 1, /* rport create event */
BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
BFA_RPORT_SM_ONLINE = 3, /* rport is online */
BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
BFA_RPORT_SM_FWRSP = 5, /* firmware response */
BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
};
static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_created(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_online(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
enum bfa_rport_event event);
/**
* Beginning state, only online event expected.
*/
static void
bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_CREATE:
bfa_stats(rp, sm_un_cr);
bfa_sm_set_state(rp, bfa_rport_sm_created);
break;
default:
bfa_stats(rp, sm_un_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
static void
bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_ONLINE:
bfa_stats(rp, sm_cr_on);
if (bfa_rport_send_fwcreate(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_cr_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_cr_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
default:
bfa_stats(rp, sm_cr_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Waiting for rport create response from firmware.
*/
static void
bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_fwc_rsp);
bfa_sm_set_state(rp, bfa_rport_sm_online);
bfa_rport_online_cb(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwc_del);
bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
break;
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_fwc_off);
bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwc_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
default:
bfa_stats(rp, sm_fwc_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Request queue is full, awaiting queue resume to send create request.
*/
static void
bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_QRESUME:
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
bfa_rport_send_fwcreate(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwc_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_fwc_off);
bfa_sm_set_state(rp, bfa_rport_sm_offline);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_offline_cb(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwc_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_reqq_wcancel(&rp->reqq_wait);
break;
default:
bfa_stats(rp, sm_fwc_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Online state - normal parking state.
*/
static void
bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
struct bfi_rport_qos_scn_s *qos_scn;
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_on_off);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_on_del);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
else
bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_on_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
case BFA_RPORT_SM_SET_SPEED:
bfa_rport_send_fwspeed(rp);
break;
case BFA_RPORT_SM_QOS_SCN:
qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
rp->qos_attr = qos_scn->new_qos_attr;
bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
qos_scn->old_qos_attr.qos_flow_id =
bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
qos_scn->new_qos_attr.qos_flow_id =
bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
qos_scn->old_qos_attr.qos_priority =
bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
qos_scn->new_qos_attr.qos_priority =
bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
if (qos_scn->old_qos_attr.qos_flow_id !=
qos_scn->new_qos_attr.qos_flow_id)
bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
qos_scn->old_qos_attr,
qos_scn->new_qos_attr);
if (qos_scn->old_qos_attr.qos_priority !=
qos_scn->new_qos_attr.qos_priority)
bfa_cb_rport_qos_scn_prio(rp->rport_drv,
qos_scn->old_qos_attr,
qos_scn->new_qos_attr);
break;
default:
bfa_stats(rp, sm_on_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Firmware rport is being deleted - awaiting f/w response.
*/
static void
bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_fwd_rsp);
bfa_sm_set_state(rp, bfa_rport_sm_offline);
bfa_rport_offline_cb(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwd_del);
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwd_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_fwd_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
static void
bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_QRESUME:
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
bfa_rport_send_fwdelete(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_fwd_del);
bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_fwd_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_offline_cb(rp);
break;
default:
bfa_stats(rp, sm_fwd_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Offline state.
*/
static void
bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_off_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_ONLINE:
bfa_stats(rp, sm_off_on);
if (bfa_rport_send_fwcreate(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_off_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
default:
bfa_stats(rp, sm_off_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Rport is deleted, waiting for firmware response to delete.
*/
static void
bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_del_fwrsp);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_del_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
default:
bfa_sm_fault(rp->bfa, event);
}
}
static void
bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_QRESUME:
bfa_stats(rp, sm_del_fwrsp);
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
bfa_rport_send_fwdelete(rp);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_del_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_reqq_wcancel(&rp->reqq_wait);
bfa_rport_free(rp);
break;
default:
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Waiting for rport create response from firmware. A delete is pending.
*/
static void
bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_delp_fwrsp);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_deleting);
else
bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_delp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
default:
bfa_stats(rp, sm_delp_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* Waiting for rport create response from firmware. Rport offline is pending.
*/
static void
bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_FWRSP:
bfa_stats(rp, sm_offp_fwrsp);
if (bfa_rport_send_fwdelete(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_offp_del);
bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
break;
case BFA_RPORT_SM_HWFAIL:
bfa_stats(rp, sm_offp_hwf);
bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
break;
default:
bfa_stats(rp, sm_offp_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* IOC h/w failed.
*/
static void
bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
{
bfa_trc(rp->bfa, rp->rport_tag);
bfa_trc(rp->bfa, event);
switch (event) {
case BFA_RPORT_SM_OFFLINE:
bfa_stats(rp, sm_iocd_off);
bfa_rport_offline_cb(rp);
break;
case BFA_RPORT_SM_DELETE:
bfa_stats(rp, sm_iocd_del);
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
bfa_rport_free(rp);
break;
case BFA_RPORT_SM_ONLINE:
bfa_stats(rp, sm_iocd_on);
if (bfa_rport_send_fwcreate(rp))
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
else
bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
break;
case BFA_RPORT_SM_HWFAIL:
break;
default:
bfa_stats(rp, sm_iocd_unexp);
bfa_sm_fault(rp->bfa, event);
}
}
/**
* bfa_rport_private BFA rport private functions
*/
static void
__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
{
struct bfa_rport_s *rp = cbarg;
if (complete)
bfa_cb_rport_online(rp->rport_drv);
}
static void
__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
{
struct bfa_rport_s *rp = cbarg;
if (complete)
bfa_cb_rport_offline(rp->rport_drv);
}
static void
bfa_rport_qresume(void *cbarg)
{
struct bfa_rport_s *rp = cbarg;
bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
}
static void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
{
if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
cfg->fwcfg.num_rports = BFA_RPORT_MIN;
*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
}
static void
bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
struct bfa_rport_s *rp;
u16 i;
INIT_LIST_HEAD(&mod->rp_free_q);
INIT_LIST_HEAD(&mod->rp_active_q);
rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
mod->rps_list = rp;
mod->num_rports = cfg->fwcfg.num_rports;
bfa_assert(mod->num_rports
&& !(mod->num_rports & (mod->num_rports - 1)));
for (i = 0; i < mod->num_rports; i++, rp++) {
bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
rp->bfa = bfa;
rp->rport_tag = i;
bfa_sm_set_state(rp, bfa_rport_sm_uninit);
/**
* - is unused
*/
if (i)
list_add_tail(&rp->qe, &mod->rp_free_q);
bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
}
/**
* consume memory
*/
bfa_meminfo_kva(meminfo) = (u8 *) rp;
}
static void
bfa_rport_detach(struct bfa_s *bfa)
{
}
static void
bfa_rport_start(struct bfa_s *bfa)
{
}
static void
bfa_rport_stop(struct bfa_s *bfa)
{
}
static void
bfa_rport_iocdisable(struct bfa_s *bfa)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
struct bfa_rport_s *rport;
struct list_head *qe, *qen;
list_for_each_safe(qe, qen, &mod->rp_active_q) {
rport = (struct bfa_rport_s *) qe;
bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
}
}
static struct bfa_rport_s *
bfa_rport_alloc(struct bfa_rport_mod_s *mod)
{
struct bfa_rport_s *rport;
bfa_q_deq(&mod->rp_free_q, &rport);
if (rport)
list_add_tail(&rport->qe, &mod->rp_active_q);
return rport;
}
static void
bfa_rport_free(struct bfa_rport_s *rport)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
list_del(&rport->qe);
list_add_tail(&rport->qe, &mod->rp_free_q);
}
static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
{
struct bfi_rport_create_req_s *m;
/**
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
bfa_lpuid(rp->bfa));
m->bfa_handle = rp->rport_tag;
m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
m->pid = rp->rport_info.pid;
m->lp_tag = rp->rport_info.lp_tag;
m->local_pid = rp->rport_info.local_pid;
m->fc_class = rp->rport_info.fc_class;
m->vf_en = rp->rport_info.vf_en;
m->vf_id = rp->rport_info.vf_id;
m->cisc = rp->rport_info.cisc;
/**
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
return BFA_TRUE;
}
static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
{
struct bfi_rport_delete_req_s *m;
/**
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
bfa_lpuid(rp->bfa));
m->fw_handle = rp->fw_handle;
/**
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
return BFA_TRUE;
}
static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
{
struct bfa_rport_speed_req_s *m;
/**
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_trc(rp->bfa, rp->rport_info.speed);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
bfa_lpuid(rp->bfa));
m->fw_handle = rp->fw_handle;
m->speed = (u8)rp->rport_info.speed;
/**
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
return BFA_TRUE;
}
/**
* bfa_rport_public
*/
/**
* Rport interrupt processing.
*/
void
bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
union bfi_rport_i2h_msg_u msg;
struct bfa_rport_s *rp;
bfa_trc(bfa, m->mhdr.msg_id);
msg.msg = m;
switch (m->mhdr.msg_id) {
case BFI_RPORT_I2H_CREATE_RSP:
rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
rp->fw_handle = msg.create_rsp->fw_handle;
rp->qos_attr = msg.create_rsp->qos_attr;
bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
case BFI_RPORT_I2H_DELETE_RSP:
rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
case BFI_RPORT_I2H_QOS_SCN:
rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
rp->event_arg.fw_msg = msg.qos_scn_evt;
bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
break;
default:
bfa_trc(bfa, m->mhdr.msg_id);
bfa_assert(0);
}
}
/**
* bfa_rport_api
*/
struct bfa_rport_s *
bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
{
struct bfa_rport_s *rp;
rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
if (rp == NULL)
return NULL;
rp->bfa = bfa;
rp->rport_drv = rport_drv;
bfa_rport_clear_stats(rp);
bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
return rp;
}
void
bfa_rport_delete(struct bfa_rport_s *rport)
{
bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
}
void
bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
{
bfa_assert(rport_info->max_frmsz != 0);
/**
* Some JBODs are seen to be not setting PDU size correctly in PLOGI
* responses. Default to minimum size.
*/
if (rport_info->max_frmsz == 0) {
bfa_trc(rport->bfa, rport->rport_tag);
rport_info->max_frmsz = FC_MIN_PDUSZ;
}
bfa_os_assign(rport->rport_info, *rport_info);
bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
}
void
bfa_rport_offline(struct bfa_rport_s *rport)
{
bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
}
void
bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
{
bfa_assert(speed != 0);
bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
rport->rport_info.speed = speed;
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
}
void
bfa_rport_get_stats(struct bfa_rport_s *rport,
struct bfa_rport_hal_stats_s *stats)
{
*stats = rport->stats;
}
void
bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
struct bfa_rport_qos_attr_s *qos_attr)
{
qos_attr->qos_priority = bfa_os_ntohl(rport->qos_attr.qos_priority);
qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
}
void
bfa_rport_clear_stats(struct bfa_rport_s *rport)
{
bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
}

Просмотреть файл

@ -1,45 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_RPORT_PRIV_H__
#define __BFA_RPORT_PRIV_H__
#include <bfa_svc.h>
#define BFA_RPORT_MIN 4
struct bfa_rport_mod_s {
struct bfa_rport_s *rps_list; /* list of rports */
struct list_head rp_free_q; /* free bfa_rports */
struct list_head rp_active_q; /* free bfa_rports */
u16 num_rports; /* number of rports */
};
#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
/**
* Convert rport tag to RPORT
*/
#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
(BFA_RPORT_MOD(__bfa)->rps_list + \
((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
/*
* external functions
*/
void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#endif /* __BFA_RPORT_PRIV_H__ */

Просмотреть файл

@ -1,226 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
BFA_TRC_FILE(HAL, SGPG);
BFA_MODULE(sgpg);
/**
* bfa_sgpg_mod BFA SGPG Mode module
*/
/**
* Compute and return memory needed by FCP(im) module.
*/
static void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
u32 *dm_len)
{
if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
*km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
*dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
}
static void
bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
int i;
struct bfa_sgpg_s *hsgpg;
struct bfi_sgpg_s *sgpg;
u64 align_len;
union {
u64 pa;
union bfi_addr_u addr;
} sgpg_pa;
INIT_LIST_HEAD(&mod->sgpg_q);
INIT_LIST_HEAD(&mod->sgpg_wait_q);
bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
mod->sgpg_arr_pa += align_len;
mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
align_len);
mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
align_len);
hsgpg = mod->hsgpg_arr;
sgpg = mod->sgpg_arr;
sgpg_pa.pa = mod->sgpg_arr_pa;
mod->free_sgpgs = mod->num_sgpgs;
bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
for (i = 0; i < mod->num_sgpgs; i++) {
bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
bfa_os_memset(sgpg, 0, sizeof(*sgpg));
hsgpg->sgpg = sgpg;
hsgpg->sgpg_pa = sgpg_pa.addr;
list_add_tail(&hsgpg->qe, &mod->sgpg_q);
hsgpg++;
sgpg++;
sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
}
bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
}
static void
bfa_sgpg_detach(struct bfa_s *bfa)
{
}
static void
bfa_sgpg_start(struct bfa_s *bfa)
{
}
static void
bfa_sgpg_stop(struct bfa_s *bfa)
{
}
static void
bfa_sgpg_iocdisable(struct bfa_s *bfa)
{
}
/**
* bfa_sgpg_public BFA SGPG public functions
*/
bfa_status_t
bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
struct bfa_sgpg_s *hsgpg;
int i;
bfa_trc_fp(bfa, nsgpgs);
if (mod->free_sgpgs < nsgpgs)
return BFA_STATUS_ENOMEM;
for (i = 0; i < nsgpgs; i++) {
bfa_q_deq(&mod->sgpg_q, &hsgpg);
bfa_assert(hsgpg);
list_add_tail(&hsgpg->qe, sgpg_q);
}
mod->free_sgpgs -= nsgpgs;
return BFA_STATUS_OK;
}
void
bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
struct bfa_sgpg_wqe_s *wqe;
bfa_trc_fp(bfa, nsgpg);
mod->free_sgpgs += nsgpg;
bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
list_splice_tail_init(sgpg_q, &mod->sgpg_q);
if (list_empty(&mod->sgpg_wait_q))
return;
/**
* satisfy as many waiting requests as possible
*/
do {
wqe = bfa_q_first(&mod->sgpg_wait_q);
if (mod->free_sgpgs < wqe->nsgpg)
nsgpg = mod->free_sgpgs;
else
nsgpg = wqe->nsgpg;
bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
wqe->nsgpg -= nsgpg;
if (wqe->nsgpg == 0) {
list_del(&wqe->qe);
wqe->cbfn(wqe->cbarg);
}
} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
}
void
bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
bfa_assert(nsgpg > 0);
bfa_assert(nsgpg > mod->free_sgpgs);
wqe->nsgpg_total = wqe->nsgpg = nsgpg;
/**
* allocate any left to this one first
*/
if (mod->free_sgpgs) {
/**
* no one else is waiting for SGPG
*/
bfa_assert(list_empty(&mod->sgpg_wait_q));
list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
wqe->nsgpg -= mod->free_sgpgs;
mod->free_sgpgs = 0;
}
list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
}
void
bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
list_del(&wqe->qe);
if (wqe->nsgpg_total != wqe->nsgpg)
bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
wqe->nsgpg_total - wqe->nsgpg);
}
void
bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
void *cbarg)
{
INIT_LIST_HEAD(&wqe->sgpg_q);
wqe->cbfn = cbfn;
wqe->cbarg = cbarg;
}

Просмотреть файл

@ -1,79 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* hal_sgpg.h BFA SG page module
*/
#ifndef __BFA_SGPG_PRIV_H__
#define __BFA_SGPG_PRIV_H__
#include <cs/bfa_q.h>
#define BFA_SGPG_MIN (16)
/**
* Alignment macro for SG page allocation
*/
#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
& ~(sizeof(struct bfi_sgpg_s) - 1))
struct bfa_sgpg_wqe_s {
struct list_head qe; /* queue sg page element */
int nsgpg; /* pages to be allocated */
int nsgpg_total; /* total pages required */
void (*cbfn) (void *cbarg);
/* callback function */
void *cbarg; /* callback arg */
struct list_head sgpg_q; /* queue of alloced sgpgs */
};
struct bfa_sgpg_s {
struct list_head qe; /* queue sg page element */
struct bfi_sgpg_s *sgpg; /* va of SG page */
union bfi_addr_u sgpg_pa;/* pa of SG page */
};
/**
* Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
* SG pages required.
*/
#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
struct bfa_sgpg_mod_s {
struct bfa_s *bfa;
int num_sgpgs; /* number of SG pages */
int free_sgpgs; /* number of free SG pages */
struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
u64 sgpg_arr_pa; /* SG page array DMA addr */
struct list_head sgpg_q; /* queue of free SG pages */
struct list_head sgpg_wait_q; /* wait queue for SG pages */
};
#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
int nsgpgs);
void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q,
int nsgpgs);
void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
void (*cbfn) (void *cbarg), void *cbarg);
void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
int nsgpgs);
void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
#endif /* __BFA_SGPG_PRIV_H__ */

Просмотреть файл

@ -1,38 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfasm.c BFA State machine utility functions
*/
#include <cs/bfa_sm.h>
/**
* cs_sm_api
*/
int
bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
{
int i = 0;
while (smt[i].sm && smt[i].sm != sm)
i++;
return smt[i].state;
}

5423
drivers/scsi/bfa/bfa_svc.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

657
drivers/scsi/bfa/bfa_svc.h Normal file
Просмотреть файл

@ -0,0 +1,657 @@
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_SVC_H__
#define __BFA_SVC_H__
#include "bfa_cs.h"
#include "bfi_ms.h"
/**
* Scatter-gather DMA related defines
*/
#define BFA_SGPG_MIN (16)
/**
* Alignment macro for SG page allocation
*/
#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
& ~(sizeof(struct bfi_sgpg_s) - 1))
struct bfa_sgpg_wqe_s {
struct list_head qe; /* queue sg page element */
int nsgpg; /* pages to be allocated */
int nsgpg_total; /* total pages required */
void (*cbfn) (void *cbarg); /* callback function */
void *cbarg; /* callback arg */
struct list_head sgpg_q; /* queue of alloced sgpgs */
};
struct bfa_sgpg_s {
struct list_head qe; /* queue sg page element */
struct bfi_sgpg_s *sgpg; /* va of SG page */
union bfi_addr_u sgpg_pa; /* pa of SG page */
};
/**
* Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
* SG pages required.
*/
#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
struct bfa_sgpg_mod_s {
struct bfa_s *bfa;
int num_sgpgs; /* number of SG pages */
int free_sgpgs; /* number of free SG pages */
struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
u64 sgpg_arr_pa; /* SG page array DMA addr */
struct list_head sgpg_q; /* queue of free SG pages */
struct list_head sgpg_wait_q; /* wait queue for SG pages */
};
#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
int nsgpgs);
void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs);
void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
void (*cbfn) (void *cbarg), void *cbarg);
void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
/**
* FCXP related defines
*/
#define BFA_FCXP_MIN (1)
#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
struct bfa_fcxp_mod_s {
struct bfa_s *bfa; /* backpointer to BFA */
struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
u16 num_fcxps; /* max num FCXP requests */
struct list_head fcxp_free_q; /* free FCXPs */
struct list_head fcxp_active_q; /* active FCXPs */
void *req_pld_list_kva; /* list of FCXP req pld */
u64 req_pld_list_pa; /* list of FCXP req pld */
void *rsp_pld_list_kva; /* list of FCXP resp pld */
u64 rsp_pld_list_pa; /* list of FCXP resp pld */
struct list_head wait_q; /* wait queue for free fcxp */
u32 req_pld_sz;
u32 rsp_pld_sz;
};
#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
void *cb_arg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs);
typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
void *cbarg, enum bfa_status req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs);
typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
/**
* Information needed for a FCXP request
*/
struct bfa_fcxp_req_info_s {
struct bfa_rport_s *bfa_rport;
/** Pointer to the bfa rport that was
* returned from bfa_rport_create().
* This could be left NULL for WKA or
* for FCXP interactions before the
* rport nexus is established
*/
struct fchs_s fchs; /* request FC header structure */
u8 cts; /* continous sequence */
u8 class; /* FC class for the request/response */
u16 max_frmsz; /* max send frame size */
u16 vf_id; /* vsan tag if applicable */
u8 lp_tag; /* lport tag */
u32 req_tot_len; /* request payload total length */
};
struct bfa_fcxp_rsp_info_s {
struct fchs_s rsp_fchs;
/** !< Response frame's FC header will
* be sent back in this field */
u8 rsp_timeout;
/** !< timeout in seconds, 0-no response
*/
u8 rsvd2[3];
u32 rsp_maxlen; /* max response length expected */
};
struct bfa_fcxp_s {
struct list_head qe; /* fcxp queue element */
bfa_sm_t sm; /* state machine */
void *caller; /* driver or fcs */
struct bfa_fcxp_mod_s *fcxp_mod;
/* back pointer to fcxp mod */
u16 fcxp_tag; /* internal tag */
struct bfa_fcxp_req_info_s req_info;
/* request info */
struct bfa_fcxp_rsp_info_s rsp_info;
/* response info */
u8 use_ireqbuf; /* use internal req buf */
u8 use_irspbuf; /* use internal rsp buf */
u32 nreq_sgles; /* num request SGLEs */
u32 nrsp_sgles; /* num response SGLEs */
struct list_head req_sgpg_q; /* SG pages for request buf */
struct list_head req_sgpg_wqe; /* wait queue for req SG page */
struct list_head rsp_sgpg_q; /* SG pages for response buf */
struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */
bfa_fcxp_get_sgaddr_t req_sga_cbfn;
/* SG elem addr user function */
bfa_fcxp_get_sglen_t req_sglen_cbfn;
/* SG elem len user function */
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
/* SG elem addr user function */
bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
/* SG elem len user function */
bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */
void *send_cbarg; /* callback arg */
struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES];
/* req SG elems */
struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES];
/* rsp SG elems */
u8 rsp_status; /* comp: rsp status */
u32 rsp_len; /* comp: actual response len */
u32 residue_len; /* comp: residual rsp length */
struct fchs_s rsp_fchs; /* comp: response fchs */
struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
struct bfa_reqq_wait_s reqq_wqe;
bfa_boolean_t reqq_waiting;
};
struct bfa_fcxp_wqe_s {
struct list_head qe;
bfa_fcxp_alloc_cbfn_t alloc_cbfn;
void *alloc_cbarg;
void *caller;
struct bfa_s *bfa;
int nreq_sgles;
int nrsp_sgles;
bfa_fcxp_get_sgaddr_t req_sga_cbfn;
bfa_fcxp_get_sglen_t req_sglen_cbfn;
bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
};
#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp))
#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
((_fcxp)->fcxp_mod->req_pld_list_pa + \
((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
/**
* RPORT related defines
*/
#define BFA_RPORT_MIN 4
struct bfa_rport_mod_s {
struct bfa_rport_s *rps_list; /* list of rports */
struct list_head rp_free_q; /* free bfa_rports */
struct list_head rp_active_q; /* free bfa_rports */
u16 num_rports; /* number of rports */
};
#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
/**
* Convert rport tag to RPORT
*/
#define BFA_RPORT_FROM_TAG(__bfa, _tag) \
(BFA_RPORT_MOD(__bfa)->rps_list + \
((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
/*
* protected functions
*/
void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
/**
* BFA rport information.
*/
struct bfa_rport_info_s {
u16 max_frmsz; /* max rcv pdu size */
u32 pid:24, /* remote port ID */
lp_tag:8; /* tag */
u32 local_pid:24, /* local port ID */
cisc:8; /* CIRO supported */
u8 fc_class; /* supported FC classes. enum fc_cos */
u8 vf_en; /* virtual fabric enable */
u16 vf_id; /* virtual fabric ID */
enum bfa_port_speed speed; /* Rport's current speed */
};
/**
* BFA rport data structure
*/
struct bfa_rport_s {
struct list_head qe; /* queue element */
bfa_sm_t sm; /* state machine */
struct bfa_s *bfa; /* backpointer to BFA */
void *rport_drv; /* fcs/driver rport object */
u16 fw_handle; /* firmware rport handle */
u16 rport_tag; /* BFA rport tag */
struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
struct bfa_rport_qos_attr_s qos_attr;
union a {
bfa_status_t status; /* f/w status */
void *fw_msg; /* QoS scn event */
} event_arg;
};
#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class)
/**
* UF - unsolicited receive related defines
*/
#define BFA_UF_MIN (4)
struct bfa_uf_s {
struct list_head qe; /* queue element */
struct bfa_s *bfa; /* bfa instance */
u16 uf_tag; /* identifying tag fw msgs */
u16 vf_id;
u16 src_rport_handle;
u16 rsvd;
u8 *data_ptr;
u16 data_len; /* actual receive length */
u16 pb_len; /* posted buffer length */
void *buf_kva; /* buffer virtual address */
u64 buf_pa; /* buffer physical address */
struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */
struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
};
/**
* Callback prototype for unsolicited frame receive handler.
*
* @param[in] cbarg callback arg for receive handler
* @param[in] uf unsolicited frame descriptor
*
* @return None
*/
typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
struct bfa_uf_mod_s {
struct bfa_s *bfa; /* back pointer to BFA */
struct bfa_uf_s *uf_list; /* array of UFs */
u16 num_ufs; /* num unsolicited rx frames */
struct list_head uf_free_q; /* free UFs */
struct list_head uf_posted_q; /* UFs posted to IOC */
struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
u64 uf_pbs_pa; /* phy addr for UF bufs */
struct bfi_uf_buf_post_s *uf_buf_posts;
/* pre-built UF post msgs */
bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
void *cbarg; /* uf receive handler arg */
};
#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
#define ufm_pbs_pa(_ufmod, _uftag) \
((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#define BFA_UF_BUFSZ (2 * 1024 + 256)
/**
* @todo private
*/
struct bfa_uf_buf_s {
u8 d[BFA_UF_BUFSZ];
};
/**
* LPS - bfa lport login/logout service interface
*/
struct bfa_lps_s {
struct list_head qe; /* queue element */
struct bfa_s *bfa; /* parent bfa instance */
bfa_sm_t sm; /* finite state machine */
u8 lp_tag; /* lport tag */
u8 reqq; /* lport request queue */
u8 alpa; /* ALPA for loop topologies */
u32 lp_pid; /* lport port ID */
bfa_boolean_t fdisc; /* snd FDISC instead of FLOGI */
bfa_boolean_t auth_en; /* enable authentication */
bfa_boolean_t auth_req; /* authentication required */
bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
bfa_boolean_t fport; /* attached peer is F_PORT */
bfa_boolean_t brcd_switch; /* attached peer is brcd sw */
bfa_status_t status; /* login status */
u16 pdusz; /* max receive PDU size */
u16 pr_bbcred; /* BB_CREDIT from peer */
u8 lsrjt_rsn; /* LSRJT reason */
u8 lsrjt_expl; /* LSRJT explanation */
wwn_t pwwn; /* port wwn of lport */
wwn_t nwwn; /* node wwn of lport */
wwn_t pr_pwwn; /* port wwn of lport peer */
wwn_t pr_nwwn; /* node wwn of lport peer */
mac_t lp_mac; /* fpma/spma MAC for lport */
mac_t fcf_mac; /* FCF MAC of lport */
struct bfa_reqq_wait_s wqe; /* request wait queue element */
void *uarg; /* user callback arg */
struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
struct bfi_lps_login_rsp_s *loginrsp;
bfa_eproto_status_t ext_status;
};
struct bfa_lps_mod_s {
struct list_head lps_free_q;
struct list_head lps_active_q;
struct bfa_lps_s *lps_arr;
int num_lps;
};
#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
/*
* external functions
*/
void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
/**
* FCPORT related defines
*/
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
/**
* Link notification data structure
*/
struct bfa_fcport_ln_s {
struct bfa_fcport_s *fcport;
bfa_sm_t sm;
struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
enum bfa_port_linkstate ln_event; /* ln event for callback */
};
struct bfa_fcport_trunk_s {
struct bfa_trunk_attr_s attr;
};
/**
* BFA FC port data structure
*/
struct bfa_fcport_s {
struct bfa_s *bfa; /* parent BFA instance */
bfa_sm_t sm; /* port state machine */
wwn_t nwwn; /* node wwn of physical port */
wwn_t pwwn; /* port wwn of physical oprt */
enum bfa_port_speed speed_sup;
/* supported speeds */
enum bfa_port_speed speed; /* current speed */
enum bfa_port_topology topology; /* current topology */
u8 myalpa; /* my ALPA in LOOP topology */
u8 rsvd[3];
struct bfa_port_cfg_s cfg; /* current port configuration */
struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
struct bfa_reqq_wait_s reqq_wait;
/* to wait for room in reqq */
struct bfa_reqq_wait_s svcreq_wait;
/* to wait for room in reqq */
struct bfa_reqq_wait_s stats_reqq_wait;
/* to wait for room in reqq (stats) */
void *event_cbarg;
void (*event_cbfn) (void *cbarg,
enum bfa_port_linkstate event);
union {
union bfi_fcport_i2h_msg_u i2hmsg;
} event_arg;
void *bfad; /* BFA driver handle */
struct bfa_fcport_ln_s ln; /* Link Notification */
struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
struct bfa_timer_s timer; /* timer */
u32 msgtag; /* fimrware msg tag for reply */
u8 *stats_kva;
u64 stats_pa;
union bfa_fcport_stats_u *stats;
union bfa_fcport_stats_u *stats_ret; /* driver stats location */
bfa_status_t stats_status; /* stats/statsclr status */
bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
bfa_boolean_t stats_qfull;
u32 stats_reset_time; /* stats reset time stamp */
bfa_cb_port_t stats_cbfn; /* driver callback function */
void *stats_cbarg; /* *!< user callback arg */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
struct bfa_fcport_trunk_s trunk;
u16 fcoe_vlan;
};
#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
/*
* protected functions
*/
void bfa_fcport_init(struct bfa_s *bfa);
void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
/*
* bfa fcport API functions
*/
bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
enum bfa_port_speed speed);
enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
enum bfa_port_topology topo);
enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr);
wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
void bfa_fcport_event_register(struct bfa_s *bfa,
void (*event_cbfn) (void *cbarg,
enum bfa_port_linkstate event), void *event_cbarg);
bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
enum bfa_port_speed speed);
enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon);
void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
struct bfa_qos_attr_s *qos_attr);
void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
struct bfa_qos_vc_attr_s *qos_vc_attr);
bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
union bfa_fcport_stats_u *stats,
bfa_cb_port_t cbfn, void *cbarg);
bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
void *cbarg);
bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
union bfa_fcport_stats_u *stats,
bfa_cb_port_t cbfn, void *cbarg);
bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
void *cbarg);
bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
union bfa_fcport_stats_u *stats,
bfa_cb_port_t cbfn, void *cbarg);
bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
void *cbarg);
bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
/*
* bfa rport API functions
*/
struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
void bfa_rport_delete(struct bfa_rport_s *rport);
void bfa_rport_online(struct bfa_rport_s *rport,
struct bfa_rport_info_s *rport_info);
void bfa_rport_offline(struct bfa_rport_s *rport);
void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
void bfa_rport_get_stats(struct bfa_rport_s *rport,
struct bfa_rport_hal_stats_s *stats);
void bfa_rport_clear_stats(struct bfa_rport_s *rport);
void bfa_cb_rport_online(void *rport);
void bfa_cb_rport_offline(void *rport);
void bfa_cb_rport_qos_scn_flowid(void *rport,
struct bfa_rport_qos_attr_s old_qos_attr,
struct bfa_rport_qos_attr_s new_qos_attr);
void bfa_cb_rport_qos_scn_prio(void *rport,
struct bfa_rport_qos_attr_s old_qos_attr,
struct bfa_rport_qos_attr_s new_qos_attr);
void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
struct bfa_rport_qos_attr_s *qos_attr);
/*
* bfa fcxp API functions
*/
struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
int nreq_sgles, int nrsp_sgles,
bfa_fcxp_get_sgaddr_t get_req_sga,
bfa_fcxp_get_sglen_t get_req_sglen,
bfa_fcxp_get_sgaddr_t get_rsp_sga,
bfa_fcxp_get_sglen_t get_rsp_sglen);
void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
bfa_fcxp_alloc_cbfn_t alloc_cbfn,
void *cbarg, void *bfad_fcxp,
int nreq_sgles, int nrsp_sgles,
bfa_fcxp_get_sgaddr_t get_req_sga,
bfa_fcxp_get_sglen_t get_req_sglen,
bfa_fcxp_get_sgaddr_t get_rsp_sga,
bfa_fcxp_get_sglen_t get_rsp_sglen);
void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
struct bfa_fcxp_wqe_s *wqe);
void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
u16 vf_id, u8 lp_tag,
bfa_boolean_t cts, enum fc_cos cos,
u32 reqlen, struct fchs_s *fchs,
bfa_cb_fcxp_send_t cbfn,
void *cbarg,
u32 rsp_maxlen, u8 rsp_timeout);
bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
static inline void *
bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
{
return uf->data_ptr;
}
static inline u16
bfa_uf_get_frmlen(struct bfa_uf_s *uf)
{
return uf->data_len;
}
/*
* bfa uf API functions
*/
void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
void *cbarg);
void bfa_uf_free(struct bfa_uf_s *uf);
/**
* bfa lport service api
*/
u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
void bfa_lps_delete(struct bfa_lps_s *lps);
void bfa_lps_discard(struct bfa_lps_s *lps);
void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
u16 pdusz, wwn_t pwwn, wwn_t nwwn,
bfa_boolean_t auth_en);
void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
wwn_t pwwn, wwn_t nwwn);
void bfa_lps_flogo(struct bfa_lps_s *lps);
void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
void bfa_trunk_enable_cfg(struct bfa_s *bfa);
bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
struct bfa_trunk_attr_s *attr);
#endif /* __BFA_SVC_H__ */

Просмотреть файл

@ -1,90 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa_timer.h>
#include <cs/bfa_debug.h>
void
bfa_timer_init(struct bfa_timer_mod_s *mod)
{
INIT_LIST_HEAD(&mod->timer_q);
}
void
bfa_timer_beat(struct bfa_timer_mod_s *mod)
{
struct list_head *qh = &mod->timer_q;
struct list_head *qe, *qe_next;
struct bfa_timer_s *elem;
struct list_head timedout_q;
INIT_LIST_HEAD(&timedout_q);
qe = bfa_q_next(qh);
while (qe != qh) {
qe_next = bfa_q_next(qe);
elem = (struct bfa_timer_s *) qe;
if (elem->timeout <= BFA_TIMER_FREQ) {
elem->timeout = 0;
list_del(&elem->qe);
list_add_tail(&elem->qe, &timedout_q);
} else {
elem->timeout -= BFA_TIMER_FREQ;
}
qe = qe_next; /* go to next elem */
}
/*
* Pop all the timeout entries
*/
while (!list_empty(&timedout_q)) {
bfa_q_deq(&timedout_q, &elem);
elem->timercb(elem->arg);
}
}
/**
* Should be called with lock protection
*/
void
bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
void (*timercb) (void *), void *arg, unsigned int timeout)
{
bfa_assert(timercb != NULL);
bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
timer->timeout = timeout;
timer->timercb = timercb;
timer->arg = arg;
list_add_tail(&timer->qe, &mod->timer_q);
}
/**
* Should be called with lock protection
*/
void
bfa_timer_stop(struct bfa_timer_s *timer)
{
bfa_assert(!list_empty(&timer->qe));
list_del(&timer->qe);
}

Просмотреть файл

@ -1,64 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* hal_trcmod.h BFA trace modules
*/
#ifndef __BFA_TRCMOD_PRIV_H__
#define __BFA_TRCMOD_PRIV_H__
#include <cs/bfa_trc.h>
/*
* !!! Only append to the enums defined here to avoid any versioning
* !!! needed between trace utility and driver version
*/
enum {
BFA_TRC_HAL_INTR = 1,
BFA_TRC_HAL_FCXP = 2,
BFA_TRC_HAL_UF = 3,
BFA_TRC_HAL_RPORT = 4,
BFA_TRC_HAL_FCPIM = 5,
BFA_TRC_HAL_IOIM = 6,
BFA_TRC_HAL_TSKIM = 7,
BFA_TRC_HAL_ITNIM = 8,
BFA_TRC_HAL_FCPORT = 9,
BFA_TRC_HAL_SGPG = 10,
BFA_TRC_HAL_FLASH = 11,
BFA_TRC_HAL_DEBUG = 12,
BFA_TRC_HAL_WWN = 13,
BFA_TRC_HAL_FLASH_RAW = 14,
BFA_TRC_HAL_SBOOT = 15,
BFA_TRC_HAL_SBOOT_IO = 16,
BFA_TRC_HAL_SBOOT_INTR = 17,
BFA_TRC_HAL_SBTEST = 18,
BFA_TRC_HAL_IPFC = 19,
BFA_TRC_HAL_IOCFC = 20,
BFA_TRC_HAL_FCPTM = 21,
BFA_TRC_HAL_IOTM = 22,
BFA_TRC_HAL_TSKTM = 23,
BFA_TRC_HAL_TIN = 24,
BFA_TRC_HAL_LPS = 25,
BFA_TRC_HAL_FCDIAG = 26,
BFA_TRC_HAL_PBIND = 27,
BFA_TRC_HAL_IOCFC_CT = 28,
BFA_TRC_HAL_IOCFC_CB = 29,
BFA_TRC_HAL_IOCFC_Q = 30,
};
#endif /* __BFA_TRCMOD_PRIV_H__ */

Просмотреть файл

@ -1,690 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <bfa_cb_ioim_macros.h>
BFA_TRC_FILE(HAL, TSKIM);
/**
* task management completion handling
*/
#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \
__cbfn, (__tskim)); \
bfa_tskim_notify_comp(__tskim); \
} while (0)
#define bfa_tskim_notify_comp(__tskim) do { \
if ((__tskim)->notify) \
bfa_itnim_tskdone((__tskim)->itnim); \
} while (0)
/*
* forward declarations
*/
static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
lun_t lun);
static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
/**
* bfa_tskim_sm
*/
enum bfa_tskim_event {
BFA_TSKIM_SM_START = 1, /* TM command start */
BFA_TSKIM_SM_DONE = 2, /* TM completion */
BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
};
static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event);
/**
* Task management command beginning state.
*/
static void
bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_START:
bfa_sm_set_state(tskim, bfa_tskim_sm_active);
bfa_tskim_gather_ios(tskim);
/**
* If device is offline, do not send TM on wire. Just cleanup
* any pending IO requests and complete TM request.
*/
if (!bfa_itnim_is_online(tskim->itnim)) {
bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
tskim->tsk_status = BFI_TSKIM_STS_OK;
bfa_tskim_cleanup_ios(tskim);
return;
}
if (!bfa_tskim_send(tskim)) {
bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
&tskim->reqq_wait);
}
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
/**
* brief
* TM command is active, awaiting completion from firmware to
* cleanup IO requests in TM scope.
*/
static void
bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
bfa_tskim_cleanup_ios(tskim);
break;
case BFA_TSKIM_SM_CLEANUP:
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
if (!bfa_tskim_send_abort(tskim)) {
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
&tskim->reqq_wait);
}
break;
case BFA_TSKIM_SM_HWFAIL:
bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
bfa_tskim_iocdisable_ios(tskim);
bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
/**
* An active TM is being cleaned up since ITN is offline. Awaiting cleanup
* completion event from firmware.
*/
static void
bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_DONE:
/**
* Ignore and wait for ABORT completion from firmware.
*/
break;
case BFA_TSKIM_SM_CLEANUP_DONE:
bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
bfa_tskim_cleanup_ios(tskim);
break;
case BFA_TSKIM_SM_HWFAIL:
bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
bfa_tskim_iocdisable_ios(tskim);
bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
static void
bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_IOS_DONE:
bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
break;
case BFA_TSKIM_SM_CLEANUP:
/**
* Ignore, TM command completed on wire.
* Notify TM conmpletion on IO cleanup completion.
*/
break;
case BFA_TSKIM_SM_HWFAIL:
bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
bfa_tskim_iocdisable_ios(tskim);
bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
/**
* Task management command is waiting for room in request CQ
*/
static void
bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_QRESUME:
bfa_sm_set_state(tskim, bfa_tskim_sm_active);
bfa_tskim_send(tskim);
break;
case BFA_TSKIM_SM_CLEANUP:
/**
* No need to send TM on wire since ITN is offline.
*/
bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
bfa_reqq_wcancel(&tskim->reqq_wait);
bfa_tskim_cleanup_ios(tskim);
break;
case BFA_TSKIM_SM_HWFAIL:
bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
bfa_reqq_wcancel(&tskim->reqq_wait);
bfa_tskim_iocdisable_ios(tskim);
bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
/**
* Task management command is active, awaiting for room in request CQ
* to send clean up request.
*/
static void
bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_DONE:
bfa_reqq_wcancel(&tskim->reqq_wait);
/**
*
* Fall through !!!
*/
case BFA_TSKIM_SM_QRESUME:
bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
bfa_tskim_send_abort(tskim);
break;
case BFA_TSKIM_SM_HWFAIL:
bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
bfa_reqq_wcancel(&tskim->reqq_wait);
bfa_tskim_iocdisable_ios(tskim);
bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
/**
* BFA callback is pending
*/
static void
bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
{
bfa_trc(tskim->bfa, event);
switch (event) {
case BFA_TSKIM_SM_HCB:
bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
bfa_tskim_free(tskim);
break;
case BFA_TSKIM_SM_CLEANUP:
bfa_tskim_notify_comp(tskim);
break;
case BFA_TSKIM_SM_HWFAIL:
break;
default:
bfa_sm_fault(tskim->bfa, event);
}
}
/**
* bfa_tskim_private
*/
static void
__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
{
struct bfa_tskim_s *tskim = cbarg;
if (!complete) {
bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
return;
}
bfa_stats(tskim->itnim, tm_success);
bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
}
static void
__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
{
struct bfa_tskim_s *tskim = cbarg;
if (!complete) {
bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
return;
}
bfa_stats(tskim->itnim, tm_failures);
bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
BFI_TSKIM_STS_FAILED);
}
static bfa_boolean_t
bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
{
switch (tskim->tm_cmnd) {
case FCP_TM_TARGET_RESET:
return BFA_TRUE;
case FCP_TM_ABORT_TASK_SET:
case FCP_TM_CLEAR_TASK_SET:
case FCP_TM_LUN_RESET:
case FCP_TM_CLEAR_ACA:
return (tskim->lun == lun);
default:
bfa_assert(0);
}
return BFA_FALSE;
}
/**
* Gather affected IO requests and task management commands.
*/
static void
bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
{
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfa_ioim_s *ioim;
struct list_head *qe, *qen;
INIT_LIST_HEAD(&tskim->io_q);
/**
* Gather any active IO requests first.
*/
list_for_each_safe(qe, qen, &itnim->io_q) {
ioim = (struct bfa_ioim_s *) qe;
if (bfa_tskim_match_scope
(tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
list_del(&ioim->qe);
list_add_tail(&ioim->qe, &tskim->io_q);
}
}
/**
* Failback any pending IO requests immediately.
*/
list_for_each_safe(qe, qen, &itnim->pending_q) {
ioim = (struct bfa_ioim_s *) qe;
if (bfa_tskim_match_scope
(tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
list_del(&ioim->qe);
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
bfa_ioim_tov(ioim);
}
}
}
/**
* IO cleanup completion
*/
static void
bfa_tskim_cleanp_comp(void *tskim_cbarg)
{
struct bfa_tskim_s *tskim = tskim_cbarg;
bfa_stats(tskim->itnim, tm_io_comps);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
}
/**
* Gather affected IO requests and task management commands.
*/
static void
bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
{
struct bfa_ioim_s *ioim;
struct list_head *qe, *qen;
bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
list_for_each_safe(qe, qen, &tskim->io_q) {
ioim = (struct bfa_ioim_s *) qe;
bfa_wc_up(&tskim->wc);
bfa_ioim_cleanup_tm(ioim, tskim);
}
bfa_wc_wait(&tskim->wc);
}
/**
* Send task management request to firmware.
*/
static bfa_boolean_t
bfa_tskim_send(struct bfa_tskim_s *tskim)
{
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfi_tskim_req_s *m;
/**
* check for room in queue to send request now
*/
m = bfa_reqq_next(tskim->bfa, itnim->reqq);
if (!m)
return BFA_FALSE;
/**
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
bfa_lpuid(tskim->bfa));
m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
m->itn_fhdl = tskim->itnim->rport->fw_handle;
m->t_secs = tskim->tsecs;
m->lun = tskim->lun;
m->tm_flags = tskim->tm_cmnd;
/**
* queue I/O message to firmware
*/
bfa_reqq_produce(tskim->bfa, itnim->reqq);
return BFA_TRUE;
}
/**
* Send abort request to cleanup an active TM to firmware.
*/
static bfa_boolean_t
bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
{
struct bfa_itnim_s *itnim = tskim->itnim;
struct bfi_tskim_abortreq_s *m;
/**
* check for room in queue to send request now
*/
m = bfa_reqq_next(tskim->bfa, itnim->reqq);
if (!m)
return BFA_FALSE;
/**
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
bfa_lpuid(tskim->bfa));
m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
/**
* queue I/O message to firmware
*/
bfa_reqq_produce(tskim->bfa, itnim->reqq);
return BFA_TRUE;
}
/**
* Call to resume task management cmnd waiting for room in request queue.
*/
static void
bfa_tskim_qresume(void *cbarg)
{
struct bfa_tskim_s *tskim = cbarg;
bfa_fcpim_stats(tskim->fcpim, qresumes);
bfa_stats(tskim->itnim, tm_qresumes);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
}
/**
* Cleanup IOs associated with a task mangement command on IOC failures.
*/
static void
bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
{
struct bfa_ioim_s *ioim;
struct list_head *qe, *qen;
list_for_each_safe(qe, qen, &tskim->io_q) {
ioim = (struct bfa_ioim_s *) qe;
bfa_ioim_iocdisable(ioim);
}
}
/**
* bfa_tskim_friend
*/
/**
* Notification on completions from related ioim.
*/
void
bfa_tskim_iodone(struct bfa_tskim_s *tskim)
{
bfa_wc_down(&tskim->wc);
}
/**
* Handle IOC h/w failure notification from itnim.
*/
void
bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
{
tskim->notify = BFA_FALSE;
bfa_stats(tskim->itnim, tm_iocdowns);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
}
/**
* Cleanup TM command and associated IOs as part of ITNIM offline.
*/
void
bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
{
tskim->notify = BFA_TRUE;
bfa_stats(tskim->itnim, tm_cleanups);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
}
/**
* Memory allocation and initialization.
*/
void
bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
{
struct bfa_tskim_s *tskim;
u16 i;
INIT_LIST_HEAD(&fcpim->tskim_free_q);
tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
fcpim->tskim_arr = tskim;
for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
/*
* initialize TSKIM
*/
bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
tskim->tsk_tag = i;
tskim->bfa = fcpim->bfa;
tskim->fcpim = fcpim;
tskim->notify = BFA_FALSE;
bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
tskim);
bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
}
bfa_meminfo_kva(minfo) = (u8 *) tskim;
}
void
bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
{
/**
* @todo
*/
}
void
bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
struct bfa_tskim_s *tskim;
u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
bfa_assert(tskim->tsk_tag == tsk_tag);
tskim->tsk_status = rsp->tsk_status;
/**
* Firmware sends BFI_TSKIM_STS_ABORTED status for abort
* requests. All other statuses are for normal completions.
*/
if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
bfa_stats(tskim->itnim, tm_cleanup_comps);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
} else {
bfa_stats(tskim->itnim, tm_fw_rsps);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
}
}
/**
* bfa_tskim_api
*/
struct bfa_tskim_s *
bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
{
struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
struct bfa_tskim_s *tskim;
bfa_q_deq(&fcpim->tskim_free_q, &tskim);
if (!tskim)
bfa_fcpim_stats(fcpim, no_tskims);
else
tskim->dtsk = dtsk;
return tskim;
}
void
bfa_tskim_free(struct bfa_tskim_s *tskim)
{
bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
list_del(&tskim->qe);
list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
}
/**
* Start a task management command.
*
* @param[in] tskim BFA task management command instance
* @param[in] itnim i-t nexus for the task management command
* @param[in] lun lun, if applicable
* @param[in] tm_cmnd Task management command code.
* @param[in] t_secs Timeout in seconds
*
* @return None.
*/
void
bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
{
tskim->itnim = itnim;
tskim->lun = lun;
tskim->tm_cmnd = tm_cmnd;
tskim->tsecs = tsecs;
tskim->notify = BFA_FALSE;
bfa_stats(itnim, tm_cmnds);
list_add_tail(&tskim->qe, &itnim->tsk_q);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
}

Просмотреть файл

@ -1,343 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfa_uf.c BFA unsolicited frame receive implementation
*/
#include <bfa.h>
#include <bfa_svc.h>
#include <bfi/bfi_uf.h>
#include <cs/bfa_debug.h>
BFA_TRC_FILE(HAL, UF);
BFA_MODULE(uf);
/*
*****************************************************************************
* Internal functions
*****************************************************************************
*/
static void
__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
{
struct bfa_uf_s *uf = cbarg;
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
if (complete)
ufm->ufrecv(ufm->cbarg, uf);
}
static void
claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
{
u32 uf_pb_tot_sz;
ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
BFA_DMA_ALIGN_SZ);
bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
}
static void
claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
{
struct bfi_uf_buf_post_s *uf_bp_msg;
struct bfi_sge_s *sge;
union bfi_addr_u sga_zero = { {0} };
u16 i;
u16 buf_len;
ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
uf_bp_msg = ufm->uf_buf_posts;
for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
i++, uf_bp_msg++) {
bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
uf_bp_msg->buf_tag = i;
buf_len = sizeof(struct bfa_uf_buf_s);
uf_bp_msg->buf_len = bfa_os_htons(buf_len);
bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
bfa_lpuid(ufm->bfa));
sge = uf_bp_msg->sge;
sge[0].sg_len = buf_len;
sge[0].flags = BFI_SGE_DATA_LAST;
bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
bfa_sge_to_be(sge);
sge[1].sg_len = buf_len;
sge[1].flags = BFI_SGE_PGDLEN;
sge[1].sga = sga_zero;
bfa_sge_to_be(&sge[1]);
}
/**
* advance pointer beyond consumed memory
*/
bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
}
static void
claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
{
u16 i;
struct bfa_uf_s *uf;
/*
* Claim block of memory for UF list
*/
ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
/*
* Initialize UFs and queue it in UF free queue
*/
for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
uf->bfa = ufm->bfa;
uf->uf_tag = i;
uf->pb_len = sizeof(struct bfa_uf_buf_s);
uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
uf->buf_pa = ufm_pbs_pa(ufm, i);
list_add_tail(&uf->qe, &ufm->uf_free_q);
}
/**
* advance memory pointer
*/
bfa_meminfo_kva(mi) = (u8 *) uf;
}
static void
uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
{
claim_uf_pbs(ufm, mi);
claim_ufs(ufm, mi);
claim_uf_post_msgs(ufm, mi);
}
static void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
{
u32 num_ufs = cfg->fwcfg.num_uf_bufs;
/*
* dma-able memory for UF posted bufs
*/
*dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
BFA_DMA_ALIGN_SZ);
/*
* kernel Virtual memory for UFs and UF buf post msg copies
*/
*ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
*ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
}
static void
bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
ufm->bfa = bfa;
ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
INIT_LIST_HEAD(&ufm->uf_free_q);
INIT_LIST_HEAD(&ufm->uf_posted_q);
uf_mem_claim(ufm, meminfo);
}
static void
bfa_uf_detach(struct bfa_s *bfa)
{
}
static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
{
struct bfa_uf_s *uf;
bfa_q_deq(&uf_mod->uf_free_q, &uf);
return uf;
}
static void
bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
{
list_add_tail(&uf->qe, &uf_mod->uf_free_q);
}
static bfa_status_t
bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
{
struct bfi_uf_buf_post_s *uf_post_msg;
uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
if (!uf_post_msg)
return BFA_STATUS_FAILED;
bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
sizeof(struct bfi_uf_buf_post_s));
bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
bfa_trc(ufm->bfa, uf->uf_tag);
list_add_tail(&uf->qe, &ufm->uf_posted_q);
return BFA_STATUS_OK;
}
static void
bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
{
struct bfa_uf_s *uf;
while ((uf = bfa_uf_get(uf_mod)) != NULL) {
if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
break;
}
}
static void
uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
u16 uf_tag = m->buf_tag;
struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
u8 *buf = &uf_buf->d[0];
struct fchs_s *fchs;
m->frm_len = bfa_os_ntohs(m->frm_len);
m->xfr_len = bfa_os_ntohs(m->xfr_len);
fchs = (struct fchs_s *) uf_buf;
list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf;
uf->data_len = m->xfr_len;
bfa_assert(uf->data_len >= sizeof(struct fchs_s));
if (uf->data_len == sizeof(struct fchs_s)) {
bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
uf->data_len, (struct fchs_s *) buf);
} else {
u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
BFA_PL_EID_RX, uf->data_len,
(struct fchs_s *) buf, pld_w0);
}
if (bfa->fcs)
__bfa_cb_uf_recv(uf, BFA_TRUE);
else
bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
}
static void
bfa_uf_stop(struct bfa_s *bfa)
{
}
static void
bfa_uf_iocdisable(struct bfa_s *bfa)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
struct bfa_uf_s *uf;
struct list_head *qe, *qen;
list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
uf = (struct bfa_uf_s *) qe;
list_del(&uf->qe);
bfa_uf_put(ufm, uf);
}
}
static void
bfa_uf_start(struct bfa_s *bfa)
{
bfa_uf_post_all(BFA_UF_MOD(bfa));
}
/**
* bfa_uf_api
*/
/**
* Register handler for all unsolicted recieve frames.
*
* @param[in] bfa BFA instance
* @param[in] ufrecv receive handler function
* @param[in] cbarg receive handler arg
*/
void
bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
ufm->ufrecv = ufrecv;
ufm->cbarg = cbarg;
}
/**
* Free an unsolicited frame back to BFA.
*
* @param[in] uf unsolicited frame to be freed
*
* @return None
*/
void
bfa_uf_free(struct bfa_uf_s *uf)
{
bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
}
/**
* uf_pub BFA uf module public functions
*/
void
bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
bfa_trc(bfa, msg->mhdr.msg_id);
switch (msg->mhdr.msg_id) {
case BFI_UF_I2H_FRM_RCVD:
uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
break;
default:
bfa_trc(bfa, msg->mhdr.msg_id);
bfa_assert(0);
}
}

Просмотреть файл

@ -1,47 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFA_UF_PRIV_H__
#define __BFA_UF_PRIV_H__
#include <cs/bfa_sm.h>
#include <bfa_svc.h>
#include <bfi/bfi_uf.h>
#define BFA_UF_MIN (4)
struct bfa_uf_mod_s {
struct bfa_s *bfa; /* back pointer to BFA */
struct bfa_uf_s *uf_list; /* array of UFs */
u16 num_ufs; /* num unsolicited rx frames */
struct list_head uf_free_q; /* free UFs */
struct list_head uf_posted_q; /* UFs posted to IOC */
struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
u64 uf_pbs_pa; /* phy addr for UF bufs */
struct bfi_uf_buf_post_s *uf_buf_posts;
/* pre-built UF post msgs */
bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
void *cbarg; /* uf receive handler arg */
};
#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
#define ufm_pbs_pa(_ufmod, _uftag) \
((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#endif /* __BFA_UF_PRIV_H__ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -19,15 +19,8 @@
* bfa_attr.c Linux driver configuration interface module.
*/
#include <linux/slab.h>
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfad_trcmod.h"
#include "bfad_attr.h"
/**
* FC_transport_template FC transport template
*/
/**
* FC transport template entry, get SCSI target port ID.
@ -42,7 +35,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
u32 fc_id = -1;
unsigned long flags;
shost = bfad_os_starget_to_shost(starget);
shost = dev_to_shost(starget->dev.parent);
im_port = (struct bfad_im_port_s *) shost->hostdata[0];
bfad = im_port->bfad;
spin_lock_irqsave(&bfad->bfad_lock, flags);
@ -68,7 +61,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
u64 node_name = 0;
unsigned long flags;
shost = bfad_os_starget_to_shost(starget);
shost = dev_to_shost(starget->dev.parent);
im_port = (struct bfad_im_port_s *) shost->hostdata[0];
bfad = im_port->bfad;
spin_lock_irqsave(&bfad->bfad_lock, flags);
@ -94,7 +87,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
u64 port_name = 0;
unsigned long flags;
shost = bfad_os_starget_to_shost(starget);
shost = dev_to_shost(starget->dev.parent);
im_port = (struct bfad_im_port_s *) shost->hostdata[0];
bfad = im_port->bfad;
spin_lock_irqsave(&bfad->bfad_lock, flags);
@ -118,17 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
struct bfad_port_s *port = im_port->port;
fc_host_port_id(shost) =
bfa_os_hton3b(bfa_fcs_port_get_fcid(port->fcs_port));
}
struct Scsi_Host *
bfad_os_starget_to_shost(struct scsi_target *starget)
{
return dev_to_shost(starget->dev.parent);
bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
}
/**
@ -140,21 +123,21 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_pport_attr_s attr;
struct bfa_lport_attr_s port_attr;
bfa_fcport_get_attr(&bfad->bfa, &attr);
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
switch (attr.port_type) {
case BFA_PPORT_TYPE_NPORT:
switch (port_attr.port_type) {
case BFA_PORT_TYPE_NPORT:
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case BFA_PPORT_TYPE_NLPORT:
case BFA_PORT_TYPE_NLPORT:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
break;
case BFA_PPORT_TYPE_P2P:
case BFA_PORT_TYPE_P2P:
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case BFA_PPORT_TYPE_LPORT:
case BFA_PORT_TYPE_LPORT:
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
break;
default:
@ -172,25 +155,28 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_pport_attr_s attr;
struct bfa_port_attr_s attr;
bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.port_state) {
case BFA_PPORT_ST_LINKDOWN:
case BFA_PORT_ST_LINKDOWN:
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
break;
case BFA_PPORT_ST_LINKUP:
case BFA_PORT_ST_LINKUP:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
break;
case BFA_PPORT_ST_UNINIT:
case BFA_PPORT_ST_ENABLING_QWAIT:
case BFA_PPORT_ST_ENABLING:
case BFA_PPORT_ST_DISABLING_QWAIT:
case BFA_PPORT_ST_DISABLING:
case BFA_PPORT_ST_DISABLED:
case BFA_PPORT_ST_STOPPED:
case BFA_PPORT_ST_IOCDOWN:
case BFA_PORT_ST_DISABLED:
case BFA_PORT_ST_STOPPED:
case BFA_PORT_ST_IOCDOWN:
case BFA_PORT_ST_IOCDIS:
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
break;
case BFA_PORT_ST_UNINIT:
case BFA_PORT_ST_ENABLING_QWAIT:
case BFA_PORT_ST_ENABLING:
case BFA_PORT_ST_DISABLING_QWAIT:
case BFA_PORT_ST_DISABLING:
default:
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
break;
@ -210,13 +196,9 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
memset(fc_host_active_fc4s(shost), 0,
sizeof(fc_host_active_fc4s(shost)));
if (port->supported_fc4s &
(BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
fc_host_active_fc4s(shost)[2] = 1;
if (port->supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
fc_host_active_fc4s(shost)[3] = 0x20;
fc_host_active_fc4s(shost)[7] = 1;
}
@ -229,29 +211,29 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_pport_attr_s attr;
unsigned long flags;
struct bfa_port_attr_s attr;
spin_lock_irqsave(shost->host_lock, flags);
bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.speed) {
case BFA_PPORT_SPEED_8GBPS:
case BFA_PORT_SPEED_10GBPS:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
case BFA_PORT_SPEED_8GBPS:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
case BFA_PPORT_SPEED_4GBPS:
case BFA_PORT_SPEED_4GBPS:
fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
break;
case BFA_PPORT_SPEED_2GBPS:
case BFA_PORT_SPEED_2GBPS:
fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
break;
case BFA_PPORT_SPEED_1GBPS:
case BFA_PORT_SPEED_1GBPS:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
@ -265,7 +247,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
struct bfad_port_s *port = im_port->port;
wwn_t fabric_nwwn = 0;
fabric_nwwn = bfa_fcs_port_get_fabric_name(port->fcs_port);
fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
@ -281,23 +263,44 @@ bfad_im_get_stats(struct Scsi_Host *shost)
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_hal_comp fcomp;
union bfa_port_stats_u *fcstats;
struct fc_host_statistics *hstats;
bfa_status_t rc;
unsigned long flags;
fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
if (fcstats == NULL)
return NULL;
hstats = &bfad->link_stats;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
memset(hstats, 0, sizeof(struct fc_host_statistics));
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
(union bfa_pport_stats_u *) hstats,
bfad_hcb_comp, &fcomp);
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK)
return NULL;
wait_for_completion(&fcomp.comp);
/* Fill the fc_host_statistics structure */
hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
hstats->tx_frames = fcstats->fc.tx_frames;
hstats->tx_words = fcstats->fc.tx_words;
hstats->rx_frames = fcstats->fc.rx_frames;
hstats->rx_words = fcstats->fc.rx_words;
hstats->lip_count = fcstats->fc.lip_count;
hstats->nos_count = fcstats->fc.nos_count;
hstats->error_frames = fcstats->fc.error_frames;
hstats->dumped_frames = fcstats->fc.dropped_frames;
hstats->link_failure_count = fcstats->fc.link_failures;
hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
kfree(fcstats);
return hstats;
}
@ -317,7 +320,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
&fcomp);
&fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK)
@ -372,8 +375,8 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_port_cfg_s port_cfg;
struct bfad_pcfg_s *pcfg;
struct bfa_lport_cfg_s port_cfg;
struct bfad_vport_s *vp;
int status = 0, rc;
unsigned long flags;
@ -382,12 +385,14 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
if (strlen(vname) > 0)
strcpy((char *)&port_cfg.sym_name, vname);
port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
spin_lock_irqsave(&bfad->bfad_lock, flags);
list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) {
if (port_cfg.pwwn == pcfg->port_cfg.pwwn) {
port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp;
list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
if (port_cfg.pwwn ==
vp->fcs_vport.lport.port_cfg.pwwn) {
port_cfg.preboot_vp =
vp->fcs_vport.lport.port_cfg.preboot_vp;
break;
}
}
@ -638,7 +643,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
@ -652,7 +657,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
@ -666,10 +671,54 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
bfa_get_adapter_model(&bfad->bfa, model_descr);
bfa_get_adapter_model(&bfad->bfa, model);
if (!strcmp(model, "Brocade-425"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 4Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "Brocade-825"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 8Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "Brocade-42B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"HP 4Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "Brocade-82B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"HP 8Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "Brocade-1010"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps single port CNA");
else if (!strcmp(model, "Brocade-1020"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps dual port CNA");
else if (!strcmp(model, "Brocade-1007"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps CNA");
else if (!strcmp(model, "Brocade-415"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 4Gbps PCIe single port FC HBA");
else if (!strcmp(model, "Brocade-815"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 8Gbps PCIe single port FC HBA");
else if (!strcmp(model, "Brocade-41B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"HP 4Gbps PCIe single port FC HBA");
else if (!strcmp(model, "Brocade-81B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"HP 8Gbps PCIe single port FC HBA");
else if (!strcmp(model, "Brocade-804"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"HP Bladesystem C-class 8Gbps FC HBA");
else if (!strcmp(model, "Brocade-902"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps CNA");
else
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
}
@ -683,7 +732,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
struct bfad_port_s *port = im_port->port;
u64 nwwn;
nwwn = bfa_fcs_port_get_nwwn(port->fcs_port);
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
}
@ -694,14 +743,14 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char fw_ver[BFA_VERSION_LEN];
struct bfad_s *bfad = im_port->bfad;
struct bfa_lport_attr_s port_attr;
char symname[BFA_SYMNAME_MAXLEN];
bfa_get_adapter_model(&bfad->bfa, model);
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
model, fw_ver, BFAD_DRIVER_VERSION);
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strncpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
static ssize_t
@ -711,7 +760,7 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
char hw_ver[BFA_VERSION_LEN];
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
@ -732,7 +781,7 @@ bfad_im_optionrom_version_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
@ -746,7 +795,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
char fw_ver[BFA_VERSION_LEN];
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
@ -760,10 +809,10 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
return snprintf(buf, PAGE_SIZE, "%d\n",
bfa_get_nports(&bfad->bfa));
bfa_get_nports(&bfad->bfa));
}
static ssize_t
@ -788,10 +837,10 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
if (rports == NULL)
return -ENOMEM;
return snprintf(buf, PAGE_SIZE, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_port_get_rports(port->fcs_port, rports, &nrports);
bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
@ -837,19 +886,19 @@ struct device_attribute *bfad_im_host_attrs[] = {
};
struct device_attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number,
&dev_attr_model,
&dev_attr_model_description,
&dev_attr_node_name,
&dev_attr_symbolic_name,
&dev_attr_hardware_version,
&dev_attr_driver_version,
&dev_attr_option_rom_version,
&dev_attr_firmware_version,
&dev_attr_number_of_ports,
&dev_attr_driver_name,
&dev_attr_number_of_discovered_ports,
NULL,
&dev_attr_serial_number,
&dev_attr_model,
&dev_attr_model_description,
&dev_attr_node_name,
&dev_attr_symbolic_name,
&dev_attr_hardware_version,
&dev_attr_driver_version,
&dev_attr_option_rom_version,
&dev_attr_firmware_version,
&dev_attr_number_of_ports,
&dev_attr_driver_name,
&dev_attr_number_of_discovered_ports,
NULL,
};

Просмотреть файл

@ -1,56 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFAD_ATTR_H__
#define __BFAD_ATTR_H__
/**
* FC_transport_template FC transport template
*/
struct Scsi_Host*
bfad_os_dev_to_shost(struct scsi_target *starget);
/**
* FC transport template entry, get SCSI target port ID.
*/
void
bfad_im_get_starget_port_id(struct scsi_target *starget);
/**
* FC transport template entry, get SCSI target nwwn.
*/
void
bfad_im_get_starget_node_name(struct scsi_target *starget);
/**
* FC transport template entry, get SCSI target pwwn.
*/
void
bfad_im_get_starget_port_name(struct scsi_target *starget);
/**
* FC transport template entry, get SCSI host port ID.
*/
void
bfad_im_get_host_port_id(struct Scsi_Host *shost);
struct Scsi_Host*
bfad_os_starget_to_shost(struct scsi_target *starget);
#endif /* __BFAD_ATTR_H__ */

Просмотреть файл

@ -17,8 +17,8 @@
#include <linux/debugfs.h>
#include <bfad_drv.h>
#include <bfad_im.h>
#include "bfad_drv.h"
#include "bfad_im.h"
/*
* BFA debufs interface
@ -28,7 +28,7 @@
* mount -t debugfs none /sys/kernel/debug
*
* BFA Hierarchy:
* - bfa/host#
* - bfa/host#
* where the host number corresponds to the one under /sys/class/scsi_host/host#
*
* Debugging service available per host:
@ -217,7 +217,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
#define BFA_REG_ADDRSZ(__bfa) \
((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
#define BFA_REG_ADDRMSK(__bfa) ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1))
#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
static bfa_status_t
bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@ -359,7 +359,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
return -EINVAL;
}
reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr);
reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_reg_write(reg_addr, val);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -28,30 +28,27 @@
#include "bfa_os_inc.h"
#include <bfa.h>
#include <bfa_svc.h>
#include <fcs/bfa_fcs.h>
#include <defs/bfa_defs_pci.h>
#include <defs/bfa_defs_port.h>
#include <defs/bfa_defs_rport.h>
#include <fcs/bfa_fcs_rport.h>
#include <defs/bfa_defs_vport.h>
#include <fcs/bfa_fcs_vport.h>
#include "bfa_modules.h"
#include "bfa_fcs.h"
#include "bfa_defs_fcs.h"
#include <cs/bfa_plog.h>
#include "aen/bfa_aen.h"
#include <log/bfa_log_linux.h>
#include "bfa_plog.h"
#include "bfa_cs.h"
#define BFAD_DRIVER_NAME "bfa"
#define BFAD_DRIVER_NAME "bfa"
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
#define BFAD_DRIVER_VERSION "2.2.2.1"
#define BFAD_DRIVER_VERSION "2.3.2.0"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
#define BFAD_IRQ_FLAGS IRQF_SHARED
#ifndef FC_PORTSPEED_8GBIT
#define FC_PORTSPEED_8GBIT 0x10
#endif
/*
* BFAD flags
*/
@ -62,9 +59,9 @@
#define BFAD_HAL_START_DONE 0x00000010
#define BFAD_PORT_ONLINE 0x00000020
#define BFAD_RPORT_ONLINE 0x00000040
#define BFAD_FCS_INIT_DONE 0x00000080
#define BFAD_HAL_INIT_FAIL 0x00000100
#define BFAD_FC4_PROBE_DONE 0x00000200
#define BFAD_FCS_INIT_DONE 0x00000080
#define BFAD_HAL_INIT_FAIL 0x00000100
#define BFAD_FC4_PROBE_DONE 0x00000200
#define BFAD_PORT_DELETE 0x00000001
/*
@ -77,8 +74,8 @@
/*
* BFAD configuration parameter default values
*/
#define BFAD_LUN_QUEUE_DEPTH 32
#define BFAD_IO_MAX_SGE SG_ALL
#define BFAD_LUN_QUEUE_DEPTH 32
#define BFAD_IO_MAX_SGE SG_ALL
#define bfad_isr_t irq_handler_t
@ -87,6 +84,16 @@
struct bfad_msix_s {
struct bfad_s *bfad;
struct msix_entry msix;
char name[32];
};
/*
* Only append to the enums defined here to avoid any versioning
* needed between trace utility and driver version
*/
enum {
BFA_TRC_LDRV_BFAD = 1,
BFA_TRC_LDRV_IM = 2,
};
enum bfad_port_pvb_type {
@ -101,17 +108,13 @@ enum bfad_port_pvb_type {
*/
struct bfad_port_s {
struct list_head list_entry;
struct bfad_s *bfad;
struct bfa_fcs_port_s *fcs_port;
u32 roles;
s32 flags;
u32 supported_fc4s;
u8 ipfc_flags;
struct bfad_s *bfad;
struct bfa_fcs_lport_s *fcs_port;
u32 roles;
s32 flags;
u32 supported_fc4s;
enum bfad_port_pvb_type pvb_type;
struct bfad_im_port_s *im_port; /* IM specific data */
struct bfad_tm_port_s *tm_port; /* TM specific data */
struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */
/* port debugfs specific data */
struct dentry *port_debugfs_root;
};
@ -124,7 +127,6 @@ struct bfad_vport_s {
struct bfa_fcs_vport_s fcs_vport;
struct completion *comp_del;
struct list_head list_entry;
struct bfa_port_cfg_s port_cfg;
};
/*
@ -137,20 +139,35 @@ struct bfad_vf_s {
};
struct bfad_cfg_param_s {
u32 rport_del_timeout;
u32 ioc_queue_depth;
u32 lun_queue_depth;
u32 io_max_sge;
u32 binding_method;
u32 rport_del_timeout;
u32 ioc_queue_depth;
u32 lun_queue_depth;
u32 io_max_sge;
u32 binding_method;
};
union bfad_tmp_buf {
/* From struct bfa_adapter_attr_s */
char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char fw_ver[BFA_VERSION_LEN];
char optrom_ver[BFA_VERSION_LEN];
/* From struct bfa_ioc_pci_attr_s */
u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
wwn_t wwn[BFA_FCS_MAX_LPORTS];
};
/*
* BFAD (PCI function) data structure
*/
struct bfad_s {
bfa_sm_t sm; /* state machine */
struct list_head list_entry;
struct bfa_s bfa;
struct bfa_fcs_s bfa_fcs;
struct bfa_s bfa;
struct bfa_fcs_s bfa_fcs;
struct pci_dev *pcidev;
const char *pci_name;
struct bfa_pcidev_s hal_pcidev;
@ -163,41 +180,41 @@ struct bfad_s {
struct bfad_port_s pport; /* physical port of the BFAD */
struct bfa_meminfo_s meminfo;
struct bfa_iocfc_cfg_s ioc_cfg;
u32 inst_no; /* BFAD instance number */
u32 bfad_flags;
u32 inst_no; /* BFAD instance number */
u32 bfad_flags;
spinlock_t bfad_lock;
struct task_struct *bfad_tsk;
struct bfad_cfg_param_s cfg_data;
struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
int nvec;
char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
char port_name[BFA_ADAPTER_SYM_NAME_LEN];
int nvec;
char adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
char port_name[BFA_ADAPTER_SYM_NAME_LEN];
struct timer_list hal_tmo;
unsigned long hs_start;
struct bfad_im_s *im; /* IM specific data */
struct bfad_tm_s *tm; /* TM specific data */
struct bfad_ipfc_s *ipfc; /* IPFC specific data */
struct bfa_log_mod_s log_data;
struct bfa_trc_mod_s *trcmod;
struct bfa_log_mod_s *logmod;
struct bfa_aen_s *aen;
struct bfa_aen_s aen_buf;
void *file_map[BFA_AEN_MAX_APP];
struct bfa_plog_s plog_buf;
int ref_count;
bfa_boolean_t ipfc_enabled;
int ref_count;
union bfad_tmp_buf tmp_buf;
struct fc_host_statistics link_stats;
struct list_head pbc_pcfg_list;
atomic_t wq_reqcnt;
struct list_head pbc_vport_list;
/* debugfs specific data */
char *regdata;
u32 reglen;
struct dentry *bfad_dentry_files[5];
};
struct bfad_pcfg_s {
struct list_head list_entry;
struct bfa_port_cfg_s port_cfg;
/* BFAD state machine events */
enum bfad_sm_event {
BFAD_E_CREATE = 1,
BFAD_E_KTHREAD_CREATE_FAILED = 2,
BFAD_E_INIT = 3,
BFAD_E_INIT_SUCCESS = 4,
BFAD_E_INIT_FAILED = 5,
BFAD_E_INTR_INIT_FAILED = 6,
BFAD_E_FCS_EXIT_COMP = 7,
BFAD_E_EXIT_COMP = 8,
BFAD_E_STOP = 9
};
/*
@ -208,30 +225,30 @@ struct bfad_rport_s {
};
struct bfad_buf_info {
void *virt;
void *virt;
dma_addr_t phys;
u32 size;
u32 size;
};
struct bfad_fcxp {
struct bfad_port_s *port;
struct bfa_rport_s *bfa_rport;
bfa_status_t req_status;
u16 tag;
u16 rsp_len;
u16 rsp_maxlen;
u8 use_ireqbuf;
u8 use_irspbuf;
u32 num_req_sgles;
u32 num_rsp_sgles;
struct fchs_s fchs;
void *reqbuf_info;
void *rspbuf_info;
u16 tag;
u16 rsp_len;
u16 rsp_maxlen;
u8 use_ireqbuf;
u8 use_irspbuf;
u32 num_req_sgles;
u32 num_rsp_sgles;
struct fchs_s fchs;
void *reqbuf_info;
void *rspbuf_info;
struct bfa_sge_s *req_sge;
struct bfa_sge_s *rsp_sge;
fcxp_send_cb_t send_cbfn;
void *send_cbarg;
void *bfa_fcxp;
void *send_cbarg;
void *bfa_fcxp;
struct completion comp;
};
@ -244,34 +261,48 @@ struct bfad_hal_comp {
* Macro to obtain the immediate lower power
* of two for the integer.
*/
#define nextLowerInt(x) \
do { \
int j; \
(*x)--; \
for (j = 1; j < (sizeof(int) * 8); j <<= 1) \
(*x) = (*x) | (*x) >> j; \
(*x)++; \
(*x) = (*x) >> 1; \
#define nextLowerInt(x) \
do { \
int i; \
(*x)--; \
for (i = 1; i < (sizeof(int)*8); i <<= 1) \
(*x) = (*x) | (*x) >> i; \
(*x)++; \
(*x) = (*x) >> 1; \
} while (0)
bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_port_cfg_s *port_cfg, struct device *dev);
bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_port_cfg_s *port_cfg);
bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
bfa_status_t bfad_drv_init(struct bfad_s *bfad);
#define list_remove_head(list, entry, type, member) \
do { \
entry = NULL; \
if (!list_empty(list)) { \
entry = list_entry((list)->next, type, member); \
list_del_init(&entry->member); \
} \
} while (0)
#define list_get_first(list, type, member) \
((list_empty(list)) ? NULL : \
list_entry((list)->next, type, member))
bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_lport_cfg_s *port_cfg,
struct device *dev);
bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_lport_cfg_s *port_cfg);
bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
bfa_status_t bfad_drv_init(struct bfad_s *bfad);
bfa_status_t bfad_start_ops(struct bfad_s *bfad);
void bfad_drv_start(struct bfad_s *bfad);
void bfad_uncfg_pport(struct bfad_s *bfad);
void bfad_drv_stop(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_hal_mem_release(struct bfad_s *bfad);
void bfad_hcb_comp(void *arg, bfa_status_t status);
int bfad_setup_intr(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_drv_start(struct bfad_s *bfad);
void bfad_uncfg_pport(struct bfad_s *bfad);
void bfad_stop(struct bfad_s *bfad);
void bfad_fcs_stop(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_hal_mem_release(struct bfad_s *bfad);
void bfad_hcb_comp(void *arg, bfa_status_t status);
int bfad_setup_intr(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
void bfad_bfa_tmo(unsigned long data);
@ -280,9 +311,6 @@ int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_fcs_port_cfg(struct bfad_s *bfad);
void bfad_drv_uninit(struct bfad_s *bfad);
void bfad_drv_log_level_set(struct bfad_s *bfad);
bfa_status_t bfad_fc4_module_init(void);
void bfad_fc4_module_exit(void);
int bfad_worker(void *ptr);
void bfad_debugfs_init(struct bfad_port_s *port);
void bfad_debugfs_exit(struct bfad_port_s *port);
@ -294,10 +322,30 @@ int bfad_os_get_linkup_delay(struct bfad_s *bfad);
int bfad_install_msix_handler(struct bfad_s *bfad);
extern struct idr bfad_im_port_index;
extern struct pci_device_id bfad_id_table[];
extern struct list_head bfad_list;
extern int bfa_lun_queue_depth;
extern int bfad_supported_fc4s;
extern int bfa_linkup_delay;
extern char *os_name;
extern char *os_patch;
extern char *host_name;
extern int num_rports;
extern int num_ios;
extern int num_tms;
extern int num_fcxps;
extern int num_ufbufs;
extern int reqq_size;
extern int rspq_size;
extern int num_sgpgs;
extern int rport_del_timeout;
extern int bfa_lun_queue_depth;
extern int bfa_io_max_sge;
extern int log_level;
extern int ioc_auto_recover;
extern int bfa_linkup_delay;
extern int msix_disable_cb;
extern int msix_disable_ct;
extern int fdmi_enable;
extern int supported_fc4s;
extern int pcie_max_read_reqsz;
extern int bfa_debugfs_enable;
extern struct mutex bfad_mutex;

Просмотреть файл

@ -1,131 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/**
* bfad_fwimg.c Linux driver PCI interface module.
*/
#include <bfa_os_inc.h>
#include <bfad_drv.h>
#include <bfad_im_compat.h>
#include <defs/bfa_defs_version.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/fcntl.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <bfa_fwimg_priv.h>
#include <bfa.h>
u32 bfi_image_ct_fc_size;
u32 bfi_image_ct_cna_size;
u32 bfi_image_cb_fc_size;
u32 *bfi_image_ct_fc;
u32 *bfi_image_ct_cna;
u32 *bfi_image_cb_fc;
#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
u32 *
bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 *bfi_image_size, char *fw_name)
{
const struct firmware *fw;
if (request_firmware(&fw, fw_name, &pdev->dev)) {
printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
goto error;
}
*bfi_image = vmalloc(fw->size);
if (NULL == *bfi_image) {
printk(KERN_ALERT "Fail to allocate buffer for fw image "
"size=%x!\n", (u32) fw->size);
goto error;
}
memcpy(*bfi_image, fw->data, fw->size);
*bfi_image_size = fw->size/sizeof(u32);
return *bfi_image;
error:
return NULL;
}
u32 *
bfad_get_firmware_buf(struct pci_dev *pdev)
{
if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
if (bfi_image_ct_fc_size == 0)
bfad_read_firmware(pdev, &bfi_image_ct_fc,
&bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
return bfi_image_ct_fc;
} else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
if (bfi_image_ct_cna_size == 0)
bfad_read_firmware(pdev, &bfi_image_ct_cna,
&bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
return bfi_image_ct_cna;
} else {
if (bfi_image_cb_fc_size == 0)
bfad_read_firmware(pdev, &bfi_image_cb_fc,
&bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
return bfi_image_cb_fc;
}
}
u32 *
bfi_image_ct_fc_get_chunk(u32 off)
{ return (u32 *)(bfi_image_ct_fc + off); }
u32 *
bfi_image_ct_cna_get_chunk(u32 off)
{ return (u32 *)(bfi_image_ct_cna + off); }
u32 *
bfi_image_cb_fc_get_chunk(u32 off)
{ return (u32 *)(bfi_image_cb_fc + off); }
uint32_t *
bfi_image_get_chunk(int type, uint32_t off)
{
switch (type) {
case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break;
case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break;
case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break;
default: return 0; break;
}
}
uint32_t
bfi_image_get_size(int type)
{
switch (type) {
case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break;
case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break;
case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break;
default: return 0; break;
}
}

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -19,12 +19,10 @@
* bfad_im.c Linux driver IM module.
*/
#include <linux/slab.h>
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfad_trcmod.h"
#include "bfa_cb_ioim_macros.h"
#include <fcb/bfa_fcb_fcpim.h>
#include "bfa_cb_ioim.h"
#include "bfa_fcs.h"
BFA_TRC_FILE(LDRV, IM);
@ -33,8 +31,10 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
void (*done)(struct scsi_cmnd *));
void (*done)(struct scsi_cmnd *));
static int bfad_im_slave_alloc(struct scsi_device *sdev);
static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
void
bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
@ -58,6 +58,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
sns_len = SCSI_SENSE_BUFFERSIZE;
memcpy(cmnd->sense_buffer, sns_info, sns_len);
}
if (residue > 0) {
bfa_trc(bfad, residue);
scsi_set_resid(cmnd, residue);
@ -76,7 +77,8 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
case BFI_IOIM_STS_TIMEDOUT:
case BFI_IOIM_STS_PATHTOV:
default:
cmnd->result = ScsiResult(DID_ERROR, 0);
host_status = DID_ERROR;
cmnd->result = ScsiResult(host_status, 0);
}
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@ -162,11 +164,6 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
wake_up(wq);
}
void
bfa_cb_ioim_resfree(void *drv)
{
}
/**
* Scsi_Host_template SCSI host template
*/
@ -179,15 +176,23 @@ bfad_im_info(struct Scsi_Host *shost)
static char bfa_buf[256];
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_s *bfad = im_port->bfad;
struct bfa_s *bfa = &bfad->bfa;
struct bfa_ioc_s *ioc = &bfa->ioc;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
bfa_get_adapter_model(bfa, model);
memset(bfa_buf, 0, sizeof(bfa_buf));
snprintf(bfa_buf, sizeof(bfa_buf),
"Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
if (ioc->ctdev)
snprintf(bfa_buf, sizeof(bfa_buf),
"Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
model, bfad->pci_name, BFAD_DRIVER_VERSION);
else
snprintf(bfa_buf, sizeof(bfa_buf),
"Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
model, bfad->pci_name, BFAD_DRIVER_VERSION);
return bfa_buf;
}
@ -221,9 +226,9 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
}
bfa_trc(bfad, hal_io->iotag);
bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT,
BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n",
im_port->shost->host_no, cmnd, hal_io->iotag);
bfa_ioim_abort(hal_io);
(void) bfa_ioim_abort(hal_io);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
/* Need to wait until the command get aborted */
@ -237,7 +242,8 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
cmnd->scsi_done(cmnd);
bfa_trc(bfad, hal_io->iotag);
bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP,
BFA_LOG(KERN_INFO, bfad, log_level,
"scsi%d: complete abort 0x%p iotag 0x%x\n",
im_port->shost->host_no, cmnd, hal_io->iotag);
return SUCCESS;
out:
@ -255,8 +261,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
if (!tskim) {
BFA_DEV_PRINTF(bfad, BFA_ERR,
"target reset, fail to allocate tskim\n");
BFA_LOG(KERN_ERR, bfad, log_level,
"target reset, fail to allocate tskim\n");
rc = BFA_STATUS_FAILED;
goto out;
}
@ -306,7 +312,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
if (!tskim) {
BFA_DEV_PRINTF(bfad, BFA_ERR,
BFA_LOG(KERN_ERR, bfad, log_level,
"LUN reset, fail to allocate tskim");
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
rc = FAILED;
@ -331,8 +337,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
task_status = cmnd->SCp.Status >> 1;
if (task_status != BFI_TSKIM_STS_OK) {
BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n",
task_status);
BFA_LOG(KERN_ERR, bfad, log_level,
"LUN reset failure, status: %d\n", task_status);
rc = FAILED;
}
@ -375,7 +381,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
task_status = cmnd->SCp.Status >> 1;
if (task_status != BFI_TSKIM_STS_OK) {
BFA_DEV_PRINTF(bfad, BFA_ERR,
BFA_LOG(KERN_ERR, bfad, log_level,
"target reset failure,"
" status: %d\n", task_status);
err_cnt++;
@ -438,6 +444,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
wwn_t wwpn;
u32 fcid;
char wwpn_str[32], fcid_str[16];
struct bfad_im_s *im = itnim_drv->im;
/* online to free state transtion should not happen */
bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
@ -454,10 +461,14 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
wwn2str(wwpn_str, wwpn);
fcid2str(fcid_str, fcid);
bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE,
BFA_LOG(KERN_INFO, bfad, log_level,
"ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
port->im_port->shost->host_no,
fcid_str, wwpn_str);
bfad_os_itnim_process(itnim_drv);
/* ITNIM processing */
if (itnim_drv->queue_work)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
/**
@ -468,13 +479,17 @@ void
bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
{
struct bfad_port_s *port;
struct bfad_im_s *im = itnim_drv->im;
itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
itnim_drv->state = ITNIM_STATE_ONLINE;
itnim_drv->queue_work = 1;
itnim_drv->im_port = port->im_port;
bfad_os_itnim_process(itnim_drv);
/* ITNIM processing */
if (itnim_drv->queue_work)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
/**
@ -486,6 +501,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
{
struct bfad_port_s *port;
struct bfad_s *bfad;
struct bfad_im_s *im = itnim_drv->im;
port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
bfad = port->bfad;
@ -497,16 +513,10 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
itnim_drv->im_port = port->im_port;
itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
itnim_drv->queue_work = 1;
bfad_os_itnim_process(itnim_drv);
}
/**
* BFA FCS itnim timeout callback.
* Context: Interrupt. bfad_lock is held
*/
void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
{
itnim->state = ITNIM_STATE_TIMEOUT;
/* ITNIM processing */
if (itnim_drv->queue_work)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
/**
@ -514,7 +524,7 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
*/
int
bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
struct device *dev)
struct device *dev)
{
int error = 1;
@ -580,7 +590,7 @@ void
bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
bfa_trc(bfad, bfad->inst_no);
bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n",
im_port->shost->host_no);
fc_remove_host(im_port->shost);
@ -598,14 +608,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
{
struct bfad_im_port_s *im_port =
container_of(work, struct bfad_im_port_s, port_delete_work);
struct bfad_s *bfad = im_port->bfad;
if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
im_port->flags |= BFAD_PORT_DELETE;
fc_vport_terminate(im_port->fc_vport);
atomic_dec(&bfad->wq_reqcnt);
}
}
bfa_status_t
@ -636,11 +643,8 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
{
struct bfad_im_port_s *im_port = port->im_port;
if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
atomic_inc(&bfad->wq_reqcnt);
queue_work(bfad->im->drv_workq,
queue_work(bfad->im->drv_workq,
&im_port->port_delete_work);
}
}
void
@ -663,16 +667,6 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
void
bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
{
}
void
bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
{
}
bfa_status_t
bfad_im_probe(struct bfad_s *bfad)
{
@ -701,27 +695,12 @@ void
bfad_im_probe_undo(struct bfad_s *bfad)
{
if (bfad->im) {
while (atomic_read(&bfad->wq_reqcnt)) {
printk(KERN_INFO "bfa %s: waiting workq processing,"
" wq_reqcnt:%x\n", bfad->pci_name,
atomic_read(&bfad->wq_reqcnt));
schedule_timeout_uninterruptible(HZ);
}
bfad_os_destroy_workq(bfad->im);
kfree(bfad->im);
bfad->im = NULL;
}
}
/**
* Call back function to handle IO redirection state change
*/
void
bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect)
{
/* Do nothing */
}
struct Scsi_Host *
bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
{
@ -751,6 +730,7 @@ void
bfad_os_destroy_workq(struct bfad_im_s *im)
{
if (im && im->drv_workq) {
flush_workqueue(im->drv_workq);
destroy_workqueue(im->drv_workq);
im->drv_workq = NULL;
}
@ -762,7 +742,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
struct bfad_im_s *im = bfad->im;
bfa_trc(bfad, 0);
snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d",
snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
bfad->inst_no);
im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
if (!im->drv_workq)
@ -832,12 +812,6 @@ struct scsi_host_template bfad_im_vport_template = {
.max_sectors = 0xFFFF,
};
void
bfad_im_probe_post(struct bfad_im_s *im)
{
flush_workqueue(im->drv_workq);
}
bfa_status_t
bfad_im_module_init(void)
{
@ -861,19 +835,11 @@ bfad_im_module_exit(void)
{
if (bfad_im_scsi_transport_template)
fc_release_transport(bfad_im_scsi_transport_template);
if (bfad_im_scsi_vport_transport_template)
fc_release_transport(bfad_im_scsi_vport_transport_template);
}
void
bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
{
struct bfad_im_s *im = itnim_drv->im;
if (itnim_drv->queue_work)
queue_work(im->drv_workq, &itnim_drv->itnim_work);
}
void
bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
{
@ -916,9 +882,6 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
}
}
struct bfad_itnim_s *
bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
{
@ -949,44 +912,64 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
return 0;
}
static u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
struct bfa_ioc_attr_s ioc_attr;
u32 supported_speed = 0;
bfa_get_attr(bfa, &ioc_attr);
if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
if (ioc_attr.adapter_attr.is_mezz) {
supported_speed |= FC_PORTSPEED_8GBIT |
FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
} else {
supported_speed |= FC_PORTSPEED_8GBIT |
FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT;
}
} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
FC_PORTSPEED_1GBIT;
} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
supported_speed |= FC_PORTSPEED_10GBIT;
}
return supported_speed;
}
void
bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
{
struct Scsi_Host *host = im_port->shost;
struct bfad_s *bfad = im_port->bfad;
struct bfad_port_s *port = im_port->port;
struct bfa_pport_attr_s pattr;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char fw_ver[BFA_VERSION_LEN];
struct bfa_port_attr_s pattr;
struct bfa_lport_attr_s port_attr;
char symname[BFA_SYMNAME_MAXLEN];
fc_host_node_name(host) =
bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
fc_host_port_name(host) =
bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
fc_host_supported_classes(host) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(host), 0,
sizeof(fc_host_supported_fc4s(host)));
if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
/* For FCP type 0x08 */
fc_host_supported_fc4s(host)[2] = 1;
if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
/* For LLC/SNAP type 0x05 */
fc_host_supported_fc4s(host)[3] = 0x20;
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(host)[7] = 1;
bfa_get_adapter_model(&bfad->bfa, model);
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
model, fw_ver, BFAD_DRIVER_VERSION);
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strncpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
sprintf(fc_host_symbolic_name(host), "%s", symname);
fc_host_supported_speeds(host) = 0;
fc_host_supported_speeds(host) |=
FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
bfa_fcport_get_attr(&bfad->bfa, &pattr);
fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
@ -1065,7 +1048,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
fcid2str(fcid_str, fcid);
list_add_tail(&itnim->list_entry,
&im_port->itnim_mapped_list);
bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE,
BFA_LOG(KERN_INFO, bfad, log_level,
"ITNIM ONLINE Target: %d:0:%d "
"FCID: %s WWPN: %s\n",
im_port->shost->host_no,
itnim->scsi_tgt_id,
fcid_str, wwpn_str);
@ -1096,7 +1081,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
wwn2str(wwpn_str, wwpn);
fcid2str(fcid_str, fcid);
list_del(&itnim->list_entry);
bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE,
BFA_LOG(KERN_INFO, bfad, log_level,
"ITNIM OFFLINE Target: %d:0:%d "
"FCID: %s WWPN: %s\n",
im_port->shost->host_no,
itnim->scsi_tgt_id,
fcid_str, wwpn_str);
@ -1142,7 +1129,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
struct bfa_ioim_s *hal_io;
unsigned long flags;
int rc;
s16 sg_cnt = 0;
int sg_cnt = 0;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
rc = fc_remote_port_chkready(rport);
@ -1153,7 +1140,6 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
}
sg_cnt = scsi_dma_map(cmnd);
if (sg_cnt < 0)
return SCSI_MLQUEUE_HOST_BUSY;
@ -1168,6 +1154,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_fail_cmd;
}
itnim = itnim_data->itnim;
if (!itnim) {
cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
@ -1206,47 +1193,49 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
int rport_delay = 10;
for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
&& i < bfa_linkup_delay; i++)
schedule_timeout_uninterruptible(HZ);
&& i < bfa_linkup_delay; i++) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
rport_delay = rport_delay < bfa_linkup_delay ?
rport_delay : bfa_linkup_delay;
rport_delay : bfa_linkup_delay;
for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
&& i < rport_delay; i++)
schedule_timeout_uninterruptible(HZ);
&& i < rport_delay; i++) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE))
schedule_timeout_uninterruptible(rport_delay * HZ);
if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(rport_delay * HZ);
}
}
}
int
bfad_os_get_linkup_delay(struct bfad_s *bfad)
{
u8 nwwns = 0;
wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
int ldelay;
u8 nwwns = 0;
wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
int linkup_delay;
/*
* Querying for the boot target port wwns
* -- read from boot information in flash.
* If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30
* else => local boot machine set bfa_linkup_delay = 10
* If nwwns > 0 => boot over SAN and set linkup_delay = 30
* else => local boot machine set linkup_delay = 0
*/
bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
if (nwwns > 0) {
/* If boot over SAN; linkup_delay = 30sec */
ldelay = 30;
} else {
/* If local boot; linkup_delay = 10sec */
ldelay = 0;
}
if (nwwns > 0)
/* If Boot over SAN set linkup_delay = 30sec */
linkup_delay = 30;
else
/* If local boot; no linkup_delay */
linkup_delay = 0;
return ldelay;
return linkup_delay;
}

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
@ -18,20 +18,20 @@
#ifndef __BFAD_IM_H__
#define __BFAD_IM_H__
#include "fcs/bfa_fcs_fcpim.h"
#include "bfad_im_compat.h"
#include "bfa_fcs.h"
#define FCPI_NAME " fcpim"
#ifndef KOBJ_NAME_LEN
#define KOBJ_NAME_LEN 20
#endif
bfa_status_t bfad_im_module_init(void);
void bfad_im_module_exit(void);
bfa_status_t bfad_im_probe(struct bfad_s *bfad);
void bfad_im_probe_undo(struct bfad_s *bfad);
void bfad_im_probe_post(struct bfad_im_s *im);
bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_clean(struct bfad_im_port_s *im_port);
int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
struct bfad_im_port_s *im_port, struct device *dev);
@ -44,14 +44,10 @@ void bfad_im_scsi_host_free(struct bfad_s *bfad,
#define BFAD_LUN_RESET_TMO 60
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
#define BFAD_KOBJ_NAME_LEN 20
/*
* itnim flags
*/
#define ITNIM_MAPPED 0x00000001
#define SCSI_TASK_MGMT 0x00000001
#define IO_DONE_BIT 0
struct bfad_itnim_data_s {
@ -64,7 +60,7 @@ struct bfad_im_port_s {
struct work_struct port_delete_work;
int idr_id;
u16 cur_scsi_id;
u16 flags;
u16 flags;
struct list_head binding_list;
struct Scsi_Host *shost;
struct list_head itnim_mapped_list;
@ -118,14 +114,13 @@ struct bfad_fcp_binding {
struct bfad_im_s {
struct bfad_s *bfad;
struct workqueue_struct *drv_workq;
char drv_workq_name[BFAD_KOBJ_NAME_LEN];
char drv_workq_name[KOBJ_NAME_LEN];
};
struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
void bfad_os_destroy_workq(struct bfad_im_s *im);
void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
void bfad_os_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
@ -133,11 +128,6 @@ void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
struct scsi_device *sdev);
void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
int bfad_os_scsi_add_host(struct Scsi_Host *shost,
struct bfad_im_port_s *im_port, struct bfad_s *bfad);
void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
extern struct scsi_host_template bfad_im_scsi_host_template;
extern struct scsi_host_template bfad_im_vport_template;
@ -146,4 +136,34 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
extern struct device_attribute *bfad_im_host_attrs[];
extern struct device_attribute *bfad_im_vport_attrs[];
irqreturn_t bfad_intx(int irq, void *dev_id);
/* Firmware releated */
#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 *bfi_image_size, char *fw_name);
static inline u32 *
bfad_load_fwimg(struct pci_dev *pdev)
{
return bfad_get_firmware_buf(pdev);
}
static inline void
bfad_free_fwimg(void)
{
if (bfi_image_ct_fc_size && bfi_image_ct_fc)
vfree(bfi_image_ct_fc);
if (bfi_image_ct_cna_size && bfi_image_ct_cna)
vfree(bfi_image_ct_cna);
if (bfi_image_cb_fc_size && bfi_image_cb_fc)
vfree(bfi_image_cb_fc);
}
#endif

Просмотреть файл

@ -1,45 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __BFAD_IM_COMPAT_H__
#define __BFAD_IM_COMPAT_H__
extern struct device_attribute *bfad_im_host_attrs[];
extern struct device_attribute *bfad_im_vport_attrs[];
u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 *bfi_image_size, char *fw_name);
static inline u32 *
bfad_load_fwimg(struct pci_dev *pdev)
{
return bfad_get_firmware_buf(pdev);
}
static inline void
bfad_free_fwimg(void)
{
if (bfi_image_ct_fc_size && bfi_image_ct_fc)
vfree(bfi_image_ct_fc);
if (bfi_image_ct_cna_size && bfi_image_ct_cna)
vfree(bfi_image_ct_cna);
if (bfi_image_cb_fc_size && bfi_image_cb_fc)
vfree(bfi_image_cb_fc);
}
#endif

Просмотреть файл

@ -1,222 +0,0 @@
/*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfad_trcmod.h"
BFA_TRC_FILE(LDRV, INTR);
/**
* bfa_isr BFA driver interrupt functions
*/
static int msix_disable_cb;
static int msix_disable_ct;
module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825"
" cards, default=0, Range[false:0|true:1]");
module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804"
" cards, default=0, Range[false:0|true:1]");
/**
* Line based interrupt handler.
*/
static irqreturn_t
bfad_intx(int irq, void *dev_id)
{
struct bfad_s *bfad = dev_id;
struct list_head doneq;
unsigned long flags;
bfa_boolean_t rc;
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_intx(&bfad->bfa);
if (!rc) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return IRQ_NONE;
}
bfa_comp_deq(&bfad->bfa, &doneq);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (!list_empty(&doneq)) {
bfa_comp_process(&bfad->bfa, &doneq);
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_comp_free(&bfad->bfa, &doneq);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfa_trc_fp(bfad, irq);
}
return IRQ_HANDLED;
}
static irqreturn_t
bfad_msix(int irq, void *dev_id)
{
struct bfad_msix_s *vec = dev_id;
struct bfad_s *bfad = vec->bfad;
struct list_head doneq;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_msix(&bfad->bfa, vec->msix.entry);
bfa_comp_deq(&bfad->bfa, &doneq);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (!list_empty(&doneq)) {
bfa_comp_process(&bfad->bfa, &doneq);
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_comp_free(&bfad->bfa, &doneq);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
return IRQ_HANDLED;
}
/**
* Initialize the MSIX entry table.
*/
static void
bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
int mask, int max_bit)
{
int i;
int match = 0x00000001;
for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
if (mask & match) {
bfad->msix_tab[bfad->nvec].msix.entry = i;
bfad->msix_tab[bfad->nvec].bfad = bfad;
msix_entries[bfad->nvec].entry = i;
bfad->nvec++;
}
match <<= 1;
}
}
int
bfad_install_msix_handler(struct bfad_s *bfad)
{
int i, error = 0;
for (i = 0; i < bfad->nvec; i++) {
error = request_irq(bfad->msix_tab[i].msix.vector,
(irq_handler_t) bfad_msix, 0,
BFAD_DRIVER_NAME, &bfad->msix_tab[i]);
bfa_trc(bfad, i);
bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
if (error) {
int j;
for (j = 0; j < i; j++)
free_irq(bfad->msix_tab[j].msix.vector,
&bfad->msix_tab[j]);
return 1;
}
}
return 0;
}
/**
* Setup MSIX based interrupt.
*/
int
bfad_setup_intr(struct bfad_s *bfad)
{
int error = 0;
u32 mask = 0, i, num_bit = 0, max_bit = 0;
struct msix_entry msix_entries[MAX_MSIX_ENTRY];
struct pci_dev *pdev = bfad->pcidev;
/* Call BFA to get the msix map for this PCI function. */
bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
/* Set up the msix entry table */
bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
(!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
if (error) {
/*
* Only error number of vector is available.
* We don't have a mechanism to map multiple
* interrupts into one vector, so even if we
* can try to request less vectors, we don't
* know how to associate interrupt events to
* vectors. Linux doesn't dupicate vectors
* in the MSIX table for this case.
*/
printk(KERN_WARNING "bfad%d: "
"pci_enable_msix failed (%d),"
" use line based.\n", bfad->inst_no, error);
goto line_based;
}
/* Save the vectors */
for (i = 0; i < bfad->nvec; i++) {
bfa_trc(bfad, msix_entries[i].vector);
bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
}
bfa_msix_init(&bfad->bfa, bfad->nvec);
bfad->bfad_flags |= BFAD_MSIX_ON;
return error;
}
line_based:
error = 0;
if (request_irq
(bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
BFAD_DRIVER_NAME, bfad) != 0) {
/* Enable interrupt handler failed */
return 1;
}
return error;
}
void
bfad_remove_intr(struct bfad_s *bfad)
{
int i;
if (bfad->bfad_flags & BFAD_MSIX_ON) {
for (i = 0; i < bfad->nvec; i++)
free_irq(bfad->msix_tab[i].msix.vector,
&bfad->msix_tab[i]);
pci_disable_msix(bfad->pcidev);
bfad->bfad_flags &= ~BFAD_MSIX_ON;
} else {
free_irq(bfad->pcidev->irq, bfad);
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше