Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (104 commits)
  [SCSI] fcoe: fix configuration problems
  [SCSI] cxgb3i: fix select/depend problem
  [SCSI] fcoe: fix incorrect use of struct module
  [SCSI] cxgb3i: remove use of skb->sp
  [SCSI] cxgb3i: Add cxgb3i iSCSI driver.
  [SCSI] zfcp: Remove unnecessary warning message
  [SCSI] zfcp: Add support for unchained FSF requests
  [SCSI] zfcp: Remove busid macro
  [SCSI] zfcp: remove DID_DID flag
  [SCSI] zfcp: Simplify mask lookups for incoming RSCNs
  [SCSI] zfcp: Remove initial device data from zfcp_data
  [SCSI] zfcp: fix compile warning
  [SCSI] zfcp: Remove adapter list
  [SCSI] zfcp: Simplify SBAL allocation to fix sparse warnings
  [SCSI] zfcp: register with SCSI layer on ccw registration
  [SCSI] zfcp: Fix message line break
  [SCSI] qla2xxx: changes in multiq code
  [SCSI] eata: fix the data buffer accessors conversion regression
  [SCSI] ibmvfc: Improve async event handling
  [SCSI] lpfc : correct printk types on PPC compiles
  ...
This commit is contained in:
Linus Torvalds 2008-12-30 17:43:10 -08:00
Родитель f54a6ec0fd fb5edd020f
Коммит 590cf28580
150 изменённых файлов: 28780 добавлений и 5674 удалений

Просмотреть файл

@ -0,0 +1,85 @@
Chelsio S3 iSCSI Driver for Linux
Introduction
============
The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc.
series of products) supports iSCSI acceleration and iSCSI Direct Data Placement
(DDP) where the hardware handles the expensive byte touching operations, such
as CRC computation and verification, and direct DMA to the final host memory
destination:
- iSCSI PDU digest generation and verification
On transmitting, Chelsio S3 h/w computes and inserts the Header and
Data digest into the PDUs.
On receiving, Chelsio S3 h/w computes and verifies the Header and
Data digest of the PDUs.
- Direct Data Placement (DDP)
S3 h/w can directly place the iSCSI Data-In or Data-Out PDU's
payload into pre-posted final destination host-memory buffers based
on the Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
in Data-Out PDUs.
- PDU Transmit and Recovery
On transmitting, S3 h/w accepts the complete PDU (header + data)
from the host driver, computes and inserts the digests, decomposes
the PDU into multiple TCP segments if necessary, and transmit all
the TCP segments onto the wire. It handles TCP retransmission if
needed.
On receving, S3 h/w recovers the iSCSI PDU by reassembling TCP
segments, separating the header and data, calculating and verifying
the digests, then forwards the header to the host. The payload data,
if possible, will be directly placed into the pre-posted host DDP
buffer. Otherwise, the payload data will be sent to the host too.
The cxgb3i driver interfaces with open-iscsi initiator and provides the iSCSI
acceleration through Chelsio hardware wherever applicable.
Using the cxgb3i Driver
=======================
The following steps need to be taken to accelerates the open-iscsi initiator:
1. Load the cxgb3i driver: "modprobe cxgb3i"
The cxgb3i module registers a new transport class "cxgb3i" with open-iscsi.
* in the case of recompiling the kernel, the cxgb3i selection is located at
Device Drivers
SCSI device support --->
[*] SCSI low-level drivers --->
<M> Chelsio S3xx iSCSI support
2. Create an interface file located under /etc/iscsi/ifaces/ for the new
transport class "cxgb3i".
The content of the file should be in the following format:
iface.transport_name = cxgb3i
iface.net_ifacename = <ethX>
iface.ipaddress = <iscsi ip address>
* if iface.ipaddress is specified, <iscsi ip address> needs to be either the
same as the ethX's ip address or an address on the same subnet. Make
sure the ip address is unique in the network.
3. edit /etc/iscsi/iscsid.conf
The default setting for MaxRecvDataSegmentLength (131072) is too big,
replace "node.conn[0].iscsi.MaxRecvDataSegmentLength" to be a value no
bigger than 15360 (for example 8192):
node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192
* The login would fail for a normal session if MaxRecvDataSegmentLength is
too big. A error message in the format of
"cxgb3i: ERR! MaxRecvSegmentLength <X> too big. Need to be <= <Y>."
would be logged to dmesg.
4. To direct open-iscsi traffic to go through cxgb3i's accelerated path,
"-I <iface file name>" option needs to be specified with most of the
iscsiadm command. <iface file name> is the transport interface file created
in step 2.

Просмотреть файл

@ -517,7 +517,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
sensebuf, (10*HZ), 5, 0);
sensebuf, (10*HZ), 5, 0, NULL);
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
u8 *desc = sensebuf + 8;
@ -603,7 +603,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
sensebuf, (10*HZ), 5, 0);
sensebuf, (10*HZ), 5, 0, NULL);
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
u8 *desc = sensebuf + 8;

Просмотреть файл

@ -119,6 +119,14 @@ error:
iscsi_conn_failure(conn, rc);
}
static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
struct iscsi_iser_task *iser_task = task->dd_data;
task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
task->hdr_max = sizeof(iser_task->desc.iscsi_header);
return 0;
}
/**
* iscsi_iser_task_init - Initialize task
@ -180,25 +188,26 @@ static int
iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iscsi_data hdr;
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
struct iscsi_data hdr;
int error = 0;
/* Send data-out PDUs while there's still unsolicited data to send */
while (task->unsol_count > 0) {
iscsi_prep_unsolicit_data_pdu(task, &hdr);
while (iscsi_task_has_unsol_data(task)) {
iscsi_prep_data_out_pdu(task, r2t, &hdr);
debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
hdr.itt, task->data_count);
hdr.itt, r2t->data_count);
/* the buffer description has been passed with the command */
/* Send the command */
error = iser_send_data_out(conn, task, &hdr);
if (error) {
task->unsol_datasn--;
r2t->datasn--;
goto iscsi_iser_task_xmit_unsol_data_exit;
}
task->unsol_count -= task->data_count;
r2t->sent += r2t->data_count;
debug_scsi("Need to send %d more as data-out PDUs\n",
task->unsol_count);
r2t->data_length - r2t->sent);
}
iscsi_iser_task_xmit_unsol_data_exit:
@ -220,7 +229,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
task->itt, scsi_bufflen(task->sc),
task->imm_count, task->unsol_count);
task->imm_count, task->unsol_r2t.data_length);
}
debug_scsi("task deq [cid %d itt 0x%x]\n",
@ -235,7 +244,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
}
/* Send unsolicited data-out PDU(s) if necessary */
if (task->unsol_count)
if (iscsi_task_has_unsol_data(task))
error = iscsi_iser_task_xmit_unsol_data(conn, task);
iscsi_iser_task_xmit_exit:
@ -244,13 +253,15 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
return error;
}
static void
iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
/* mgmt tasks do not need special cleanup */
if (!task->sc)
/*
* mgmt tasks do not need special cleanup and we do not
* allocate anything in the init task callout
*/
if (!task->sc || task->state == ISCSI_TASK_PENDING)
return;
if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@ -391,9 +402,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct Scsi_Host *shost;
int i;
struct iscsi_task *task;
struct iscsi_iser_task *iser_task;
struct iser_conn *ib_conn;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
@ -430,13 +438,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
session = cls_session->dd_data;
shost->can_queue = session->scsi_cmds_max;
/* libiscsi setup itts, data and pool so just set desc fields */
for (i = 0; i < session->cmds_max; i++) {
task = session->cmds[i];
iser_task = task->dd_data;
task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
task->hdr_max = sizeof(iser_task->desc.iscsi_header);
}
return cls_session;
remove_host:
@ -652,6 +653,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.init_task = iscsi_iser_task_init,
.xmit_task = iscsi_iser_task_xmit,
.cleanup_task = iscsi_iser_cleanup_task,
.alloc_pdu = iscsi_iser_pdu_alloc,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,

Просмотреть файл

@ -353,8 +353,7 @@ int iser_send_command(struct iscsi_conn *conn,
unsigned long edtl;
int err = 0;
struct iser_data_buf *data_buf;
struct iscsi_cmd *hdr = task->hdr;
struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
struct scsi_cmnd *sc = task->sc;
if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@ -393,7 +392,7 @@ int iser_send_command(struct iscsi_conn *conn,
err = iser_prepare_write_cmd(task,
task->imm_count,
task->imm_count +
task->unsol_count,
task->unsol_r2t.data_length,
edtl);
if (err)
goto send_command_error;

Просмотреть файл

@ -952,7 +952,6 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_free_msg_frame - Place MPT request frame back on FreeQ.
* @handle: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
@ -4563,7 +4562,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
failcnt++;
hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
/* don't overflow our IOC hs_reply[] buffer! */
if (u16cnt < sizeof(ioc->hs_reply) / sizeof(ioc->hs_reply[0]))
if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
hs_reply[u16cnt] = hword;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
}
@ -5422,7 +5421,6 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
/**
* mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
* @ioc: Pointer to a Adapter Strucutre
* @portnum: IOC port number
*
* Return:
* 0 on success
@ -6939,7 +6937,6 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
/**
* mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @mr: Pointer to MPT reply frame
* @log_info: U32 LogInfo word from the IOC
*
* Refer to lsi/sp_log.h.
@ -7176,7 +7173,7 @@ union loginfo_type {
sas_loginfo.loginfo = log_info;
if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
(sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
(sas_loginfo.dw.originator < ARRAY_SIZE(originator_str)))
return;
originator_desc = originator_str[sas_loginfo.dw.originator];
@ -7185,21 +7182,21 @@ union loginfo_type {
case 0: /* IOP */
if (sas_loginfo.dw.code <
sizeof(iop_code_str)/sizeof(char*))
ARRAY_SIZE(iop_code_str))
code_desc = iop_code_str[sas_loginfo.dw.code];
break;
case 1: /* PL */
if (sas_loginfo.dw.code <
sizeof(pl_code_str)/sizeof(char*))
ARRAY_SIZE(pl_code_str))
code_desc = pl_code_str[sas_loginfo.dw.code];
break;
case 2: /* IR */
if (sas_loginfo.dw.code >=
sizeof(ir_code_str)/sizeof(char*))
ARRAY_SIZE(ir_code_str))
break;
code_desc = ir_code_str[sas_loginfo.dw.code];
if (sas_loginfo.dw.subcode >=
sizeof(raid_sub_code_str)/sizeof(char*))
ARRAY_SIZE(raid_sub_code_str))
break;
if (sas_loginfo.dw.code == 0)
sub_code_desc =

Просмотреть файл

@ -2399,9 +2399,14 @@ config CHELSIO_T1_1G
Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here.
config CHELSIO_T3_DEPENDS
tristate
depends on PCI && INET
default y
config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on PCI && INET
depends on CHELSIO_T3_DEPENDS
select FW_LOADER
select INET_LRO
help

Просмотреть файл

@ -34,13 +34,12 @@
#define ZFCP_BUS_ID_SIZE 20
static char *device;
MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com");
MODULE_DESCRIPTION("FCP HBA driver");
MODULE_LICENSE("GPL");
module_param(device, charp, 0400);
static char *init_device;
module_param_named(device, init_device, charp, 0400);
MODULE_PARM_DESC(device, "specify initial device");
static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
@ -73,46 +72,7 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
return 1;
}
static int __init zfcp_device_setup(char *devstr)
{
char *token;
char *str;
if (!devstr)
return 0;
/* duplicate devstr and keep the original for sysfs presentation*/
str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
if (!str)
return 0;
strcpy(str, devstr);
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0,
(unsigned long long *) &zfcp_data.init_wwpn))
goto err_out;
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0,
(unsigned long long *) &zfcp_data.init_fcp_lun))
goto err_out;
kfree(str);
return 1;
err_out:
kfree(str);
pr_err("%s is not a valid SCSI device\n", devstr);
return 0;
}
static void __init zfcp_init_device_configure(void)
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct zfcp_adapter *adapter;
struct zfcp_port *port;
@ -120,17 +80,17 @@ static void __init zfcp_init_device_configure(void)
down(&zfcp_data.config_sema);
read_lock_irq(&zfcp_data.config_lock);
adapter = zfcp_get_adapter_by_busid(zfcp_data.init_busid);
adapter = zfcp_get_adapter_by_busid(busid);
if (adapter)
zfcp_adapter_get(adapter);
read_unlock_irq(&zfcp_data.config_lock);
if (!adapter)
goto out_adapter;
port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
if (IS_ERR(port))
goto out_port;
unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
unit = zfcp_unit_enqueue(port, lun);
if (IS_ERR(unit))
goto out_unit;
up(&zfcp_data.config_sema);
@ -160,6 +120,42 @@ static struct kmem_cache *zfcp_cache_create(int size, char *name)
return kmem_cache_create(name , size, align, 0, NULL);
}
static void __init zfcp_init_device_setup(char *devstr)
{
char *token;
char *str;
char busid[ZFCP_BUS_ID_SIZE];
u64 wwpn, lun;
/* duplicate devstr and keep the original for sysfs presentation*/
str = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
if (!str)
return;
strcpy(str, devstr);
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
strncpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
goto err_out;
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
goto err_out;
kfree(str);
zfcp_init_device_configure(busid, wwpn, lun);
return;
err_out:
kfree(str);
pr_err("%s is not a valid SCSI device\n", devstr);
}
static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
@ -181,7 +177,6 @@ static int __init zfcp_module_init(void)
zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
sema_init(&zfcp_data.config_sema, 1);
rwlock_init(&zfcp_data.config_lock);
@ -203,10 +198,9 @@ static int __init zfcp_module_init(void)
goto out_ccw_register;
}
if (zfcp_device_setup(device))
zfcp_init_device_configure();
goto out;
if (init_device)
zfcp_init_device_setup(init_device);
return 0;
out_ccw_register:
misc_deregister(&zfcp_cfdc_misc);
@ -527,14 +521,11 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto sysfs_failed;
write_lock_irq(&zfcp_data.config_lock);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
write_unlock_irq(&zfcp_data.config_lock);
zfcp_fc_nameserver_init(adapter);
return 0;
if (!zfcp_adapter_scsi_register(adapter))
return 0;
sysfs_failed:
zfcp_adapter_debug_unregister(adapter);
@ -573,14 +564,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
return;
zfcp_adapter_debug_unregister(adapter);
/* remove specified adapter data structure from list */
write_lock_irq(&zfcp_data.config_lock);
list_del(&adapter->list);
write_unlock_irq(&zfcp_data.config_lock);
zfcp_qdio_free(adapter);
zfcp_free_low_mem_buffers(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);

Просмотреть файл

@ -106,10 +106,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
if (retval)
goto out;
retval = zfcp_adapter_scsi_register(adapter);
if (retval)
goto out_scsi_register;
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter));
adapter->req_no = 0;
@ -123,8 +119,6 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
flush_work(&adapter->scan_work);
return 0;
out_scsi_register:
zfcp_erp_thread_kill(adapter);
out:
up(&zfcp_data.config_sema);
return retval;

Просмотреть файл

@ -85,20 +85,9 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
{
struct zfcp_adapter *adapter = NULL, *cur_adapter;
struct ccw_dev_id dev_id;
read_lock_irq(&zfcp_data.config_lock);
list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) {
ccw_device_get_id(cur_adapter->ccw_device, &dev_id);
if (dev_id.devno == devno) {
adapter = cur_adapter;
zfcp_adapter_get(adapter);
break;
}
}
read_unlock_irq(&zfcp_data.config_lock);
return adapter;
char busid[9];
snprintf(busid, sizeof(busid), "0.0.%04x", devno);
return zfcp_get_adapter_by_busid(busid);
}
static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)

Просмотреть файл

@ -522,7 +522,7 @@ static const char *zfcp_rec_dbf_ids[] = {
[29] = "link down",
[30] = "link up status read",
[31] = "open port failed",
[32] = "open port failed",
[32] = "",
[33] = "close port",
[34] = "open unit failed",
[35] = "exclusive open unit failed",
@ -936,6 +936,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
rct->reason_code = hdr->reason_code;
rct->expl = hdr->reason_code_expl;
rct->vendor_unique = hdr->vendor_unique;
rct->max_res_size = hdr->max_res_size;
rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
ZFCP_DBF_SAN_MAX_PAYLOAD);
debug_event(adapter->san_dbf, level, r, sizeof(*r));
@ -1043,6 +1044,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
} else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
@ -1249,7 +1251,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
char dbf_name[DEBUG_MAX_NAME_LEN];
/* debug feature area which records recovery activity */
sprintf(dbf_name, "zfcp_%s_rec", zfcp_get_busid_by_adapter(adapter));
sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1,
sizeof(struct zfcp_rec_dbf_record));
if (!adapter->rec_dbf)
@ -1259,7 +1261,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
debug_set_level(adapter->rec_dbf, 3);
/* debug feature area which records HBA (FSF and QDIO) conditions */
sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
sizeof(struct zfcp_hba_dbf_record));
if (!adapter->hba_dbf)
@ -1269,7 +1271,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
debug_set_level(adapter->hba_dbf, 3);
/* debug feature area which records SAN command failures and recovery */
sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
sizeof(struct zfcp_san_dbf_record));
if (!adapter->san_dbf)
@ -1279,7 +1281,7 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
debug_set_level(adapter->san_dbf, 6);
/* debug feature area which records SCSI command failures and recovery */
sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
sizeof(struct zfcp_scsi_dbf_record));
if (!adapter->scsi_dbf)

Просмотреть файл

@ -171,6 +171,7 @@ struct zfcp_san_dbf_record_ct_response {
u8 reason_code;
u8 expl;
u8 vendor_unique;
u16 max_res_size;
u32 len;
} __attribute__ ((packed));

Просмотреть файл

@ -159,20 +159,6 @@ struct fcp_rscn_element {
u32 nport_did:24;
} __attribute__((packed));
#define ZFCP_PORT_ADDRESS 0x0
#define ZFCP_AREA_ADDRESS 0x1
#define ZFCP_DOMAIN_ADDRESS 0x2
#define ZFCP_FABRIC_ADDRESS 0x3
#define ZFCP_PORTS_RANGE_PORT 0xFFFFFF
#define ZFCP_PORTS_RANGE_AREA 0xFFFF00
#define ZFCP_PORTS_RANGE_DOMAIN 0xFF0000
#define ZFCP_PORTS_RANGE_FABRIC 0x000000
#define ZFCP_NO_PORTS_PER_AREA 0x100
#define ZFCP_NO_PORTS_PER_DOMAIN 0x10000
#define ZFCP_NO_PORTS_PER_FABRIC 0x1000000
/* see fc-ph */
struct fcp_logo {
u32 command;
@ -211,7 +197,6 @@ struct zfcp_ls_adisc {
#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
#define ZFCP_CT_GID_PN 0x0121
#define ZFCP_CT_GPN_FT 0x0172
#define ZFCP_CT_MAX_SIZE 0x1020
#define ZFCP_CT_ACCEPT 0x8002
#define ZFCP_CT_REJECT 0x8001
@ -258,7 +243,6 @@ struct zfcp_ls_adisc {
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
#define ZFCP_STATUS_PORT_DID_DID 0x00000002
#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
@ -340,8 +324,6 @@ struct ct_iu_gid_pn_resp {
* @wka_port: port where the request is sent to
* @req: scatter-gather list for request
* @resp: scatter-gather list for response
* @req_count: number of elements in request scatter-gather list
* @resp_count: number of elements in response scatter-gather list
* @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function
* @timeout: FSF timeout for this request
@ -352,8 +334,6 @@ struct zfcp_send_ct {
struct zfcp_wka_port *wka_port;
struct scatterlist *req;
struct scatterlist *resp;
unsigned int req_count;
unsigned int resp_count;
void (*handler)(unsigned long);
unsigned long handler_data;
int timeout;
@ -378,8 +358,6 @@ struct zfcp_gid_pn_data {
* @d_id: destiniation id of port where request is sent to
* @req: scatter-gather list for request
* @resp: scatter-gather list for response
* @req_count: number of elements in request scatter-gather list
* @resp_count: number of elements in response scatter-gather list
* @handler: handler function (called for response to the request)
* @handler_data: data passed to handler function
* @completion: completion for synchronization purposes
@ -392,8 +370,6 @@ struct zfcp_send_els {
u32 d_id;
struct scatterlist *req;
struct scatterlist *resp;
unsigned int req_count;
unsigned int resp_count;
void (*handler)(unsigned long);
unsigned long handler_data;
struct completion *completion;
@ -451,7 +427,6 @@ struct zfcp_latencies {
};
struct zfcp_adapter {
struct list_head list; /* list of adapters */
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
refcount drop to zero */
@ -593,16 +568,11 @@ struct zfcp_fsf_req {
struct zfcp_data {
struct scsi_host_template scsi_host_template;
struct scsi_transport_template *scsi_transport_template;
struct list_head adapter_list_head; /* head of adapter list */
rwlock_t config_lock; /* serialises changes
to adapter/port/unit
lists */
struct semaphore config_sema; /* serialises configuration
changes */
atomic_t loglevel; /* current loglevel */
char init_busid[20];
u64 init_wwpn;
u64 init_fcp_lun;
struct kmem_cache *fsf_req_qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache;
@ -623,8 +593,6 @@ struct zfcp_fsf_req_qtcb {
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
#define zfcp_get_busid_by_adapter(adapter) (dev_name(&adapter->ccw_device->dev))
/*
* Helper functions for request ID management.
*/

Просмотреть файл

@ -840,7 +840,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
return ZFCP_ERP_FAILED;
}
port->d_id = adapter->peer_d_id;
atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
return zfcp_erp_port_strategy_open_port(act);
}
@ -871,12 +870,12 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
case ZFCP_ERP_STEP_PORT_CLOSING:
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
if (!port->d_id) {
queue_work(zfcp_data.work_queue, &port->gid_pn_work);
return ZFCP_ERP_CONTINUES;
}
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
if (!port->d_id) {
if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
zfcp_erp_port_failed(port, 26, NULL);
return ZFCP_ERP_EXIT;
@ -888,7 +887,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
case ZFCP_ERP_STEP_PORT_OPENING:
/* D_ID might have changed during open */
if (p_status & ZFCP_STATUS_COMMON_OPEN) {
if (p_status & ZFCP_STATUS_PORT_DID_DID)
if (port->d_id)
return ZFCP_ERP_SUCCEEDED;
else {
act->step = ZFCP_ERP_STEP_PORT_CLOSING;
@ -1385,6 +1384,7 @@ static int zfcp_erp_thread(void *data)
struct list_head *next;
struct zfcp_erp_action *act;
unsigned long flags;
int ignore;
daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
/* Block all signals */
@ -1407,7 +1407,7 @@ static int zfcp_erp_thread(void *data)
}
zfcp_rec_dbf_event_thread_lock(4, adapter);
down_interruptible(&adapter->erp_ready_sem);
ignore = down_interruptible(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock(5, adapter);
}

Просмотреть файл

@ -11,6 +11,20 @@
#include "zfcp_ext.h"
enum rscn_address_format {
RSCN_PORT_ADDRESS = 0x0,
RSCN_AREA_ADDRESS = 0x1,
RSCN_DOMAIN_ADDRESS = 0x2,
RSCN_FABRIC_ADDRESS = 0x3,
};
static u32 rscn_range_mask[] = {
[RSCN_PORT_ADDRESS] = 0xFFFFFF,
[RSCN_AREA_ADDRESS] = 0xFFFF00,
[RSCN_DOMAIN_ADDRESS] = 0xFF0000,
[RSCN_FABRIC_ADDRESS] = 0x000000,
};
struct ct_iu_gpn_ft_req {
struct ct_hdr header;
u8 flags;
@ -26,9 +40,12 @@ struct gpn_ft_resp_acc {
u64 wwpn;
} __attribute__ ((packed));
#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
/ sizeof(struct gpn_ft_resp_acc))
#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
/ sizeof(struct gpn_ft_resp_acc))
#define ZFCP_GPN_FT_BUFFERS 4
#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
- sizeof(struct ct_hdr))
#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
struct ct_iu_gpn_ft_resp {
@ -160,22 +177,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
fcp_rscn_element++;
switch (fcp_rscn_element->addr_format) {
case ZFCP_PORT_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_PORT;
break;
case ZFCP_AREA_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_AREA;
break;
case ZFCP_DOMAIN_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_DOMAIN;
break;
case ZFCP_FABRIC_ADDRESS:
range_mask = ZFCP_PORTS_RANGE_FABRIC;
break;
default:
continue;
}
range_mask = rscn_range_mask[fcp_rscn_element->addr_format];
_zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element);
}
schedule_work(&fsf_req->adapter->scan_work);
@ -266,7 +268,6 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
return;
/* looks like a valid d_id */
port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
}
int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
@ -284,8 +285,6 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
gid_pn->ct.req = &gid_pn->req;
gid_pn->ct.resp = &gid_pn->resp;
gid_pn->ct.req_count = 1;
gid_pn->ct.resp_count = 1;
sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
sizeof(struct ct_iu_gid_pn_req));
sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
@ -297,7 +296,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
init_completion(&compl_rec.done);
@ -407,8 +406,6 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
sizeof(struct zfcp_ls_adisc));
adisc->els.req_count = 1;
adisc->els.resp_count = 1;
adisc->els.adapter = adapter;
adisc->els.port = port;
adisc->els.d_id = port->d_id;
@ -448,17 +445,17 @@ void zfcp_test_link(struct zfcp_port *port)
zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
}
static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
{
struct scatterlist *sg = &gpn_ft->sg_req;
kfree(sg_virt(sg)); /* free request buffer */
zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
kfree(gpn_ft);
}
static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
{
struct zfcp_gpn_ft *gpn_ft;
struct ct_iu_gpn_ft_req *req;
@ -475,8 +472,8 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
}
sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
zfcp_free_sg_env(gpn_ft);
if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
zfcp_free_sg_env(gpn_ft, buf_num);
gpn_ft = NULL;
}
out:
@ -485,7 +482,8 @@ out:
static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
struct zfcp_adapter *adapter)
struct zfcp_adapter *adapter,
int max_bytes)
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
@ -498,8 +496,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
req->header.options = ZFCP_CT_SYNCHRONOUS;
req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
(ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
req->header.max_res_size = max_bytes / 4;
req->flags = 0;
req->domain_id_scope = 0;
req->area_id_scope = 0;
@ -512,8 +509,6 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
ct->timeout = 10;
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
ct->req_count = 1;
ct->resp_count = ZFCP_GPN_FT_BUFFERS;
init_completion(&compl_rec.done);
compl_rec.handler = NULL;
@ -540,7 +535,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
zfcp_port_dequeue(port);
}
static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct scatterlist *sg = gpn_ft->sg_resp;
@ -560,13 +555,17 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
return -EIO;
}
if (hdr->max_res_size)
if (hdr->max_res_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
hdr->max_res_size);
return -E2BIG;
}
down(&zfcp_data.config_sema);
/* first entry is the header */
for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES && !last; x++) {
for (x = 1; x < max_entries && !last; x++) {
if (x % (ZFCP_GPN_FT_ENTRIES + 1))
acc++;
else
@ -589,7 +588,6 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
}
port = zfcp_port_enqueue(adapter, acc->wwpn,
ZFCP_STATUS_PORT_DID_DID |
ZFCP_STATUS_COMMON_NOESC, d_id);
if (IS_ERR(port))
ret = PTR_ERR(port);
@ -612,6 +610,12 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
{
int ret, i;
struct zfcp_gpn_ft *gpn_ft;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
return 0;
@ -620,23 +624,23 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
if (ret)
return ret;
gpn_ft = zfcp_alloc_sg_env();
gpn_ft = zfcp_alloc_sg_env(buf_num);
if (!gpn_ft) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < 3; i++) {
ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
if (!ret) {
ret = zfcp_scan_eval_gpn_ft(gpn_ft);
ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
zfcp_free_sg_env(gpn_ft);
zfcp_free_sg_env(gpn_ft, buf_num);
out:
zfcp_wka_port_put(&adapter->nsp);
return ret;

Просмотреть файл

@ -644,38 +644,38 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
}
}
static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
{
struct zfcp_qdio_queue *req_q = &adapter->req_q;
spin_lock_bh(&adapter->req_q_lock);
if (atomic_read(&req_q->count))
if (atomic_read(&adapter->req_q.count) > 0)
return 1;
spin_unlock_bh(&adapter->req_q_lock);
atomic_inc(&adapter->qdio_outb_full);
return 0;
}
static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
{
unsigned int count = atomic_read(&adapter->req_q.count);
if (!count)
atomic_inc(&adapter->qdio_outb_full);
return count > 0;
}
static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
__releases(&adapter->req_q_lock)
__acquires(&adapter->req_q_lock)
{
struct zfcp_qdio_queue *req_q = &adapter->req_q;
long ret;
if (atomic_read(&req_q->count) <= -REQUEST_LIST_SIZE)
return -EIO;
if (atomic_read(&req_q->count) > 0)
return 0;
atomic_dec(&req_q->count);
spin_unlock_bh(&adapter->req_q_lock);
ret = wait_event_interruptible_timeout(adapter->request_wq,
zfcp_fsf_sbal_check(adapter), 5 * HZ);
atomic_read(&req_q->count) >= 0,
5 * HZ);
spin_lock_bh(&adapter->req_q_lock);
atomic_inc(&req_q->count);
if (ret > 0)
return 0;
if (!ret)
atomic_inc(&adapter->qdio_outb_full);
spin_lock_bh(&adapter->req_q_lock);
return -EIO;
}
@ -1013,12 +1013,29 @@ skip_fsfstatus:
send_ct->handler(send_ct->handler_data);
}
static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp, int max_sbals)
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp,
int max_sbals)
{
struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req);
u32 feat = req->adapter->adapter_features;
int bytes;
if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE ||
!sg_is_last(sg_req) || !sg_is_last(sg_resp))
return -EOPNOTSUPP;
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
sbale[2].addr = sg_virt(sg_req);
sbale[2].length = sg_req->length;
sbale[3].addr = sg_virt(sg_resp);
sbale[3].length = sg_resp->length;
sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
return 0;
}
bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
@ -1060,8 +1077,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
goto out;
}
ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
FSF_MAX_SBALS_PER_REQ);
ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
FSF_MAX_SBALS_PER_REQ);
if (ret)
goto failed_send;
@ -1171,7 +1188,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
goto out;
}
ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
if (ret)
goto failed_send;
@ -1406,13 +1423,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_SQ_NO_RETRY_POSSIBLE:
dev_warn(&req->adapter->ccw_device->dev,
"Remote port 0x%016Lx could not be opened\n",
(unsigned long long)port->wwpn);
zfcp_erp_port_failed(port, 32, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
@ -1440,10 +1451,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
if (req->qtcb->bottom.support.els1_length >=
FSF_PLOGI_MIN_LEN) {
if (plogi->serv_param.wwpn != port->wwpn)
atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
&port->status);
port->d_id = 0;
else {
port->wwnn = plogi->serv_param.wwnn;
zfcp_fc_plogi_evaluate(port, plogi);
@ -1907,7 +1918,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
dev_err(&adapter->ccw_device->dev,
"Shared read-write access not "
"supported (unit 0x%016Lx, port "
"0x%016Lx\n)",
"0x%016Lx)\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
zfcp_erp_unit_failed(unit, 36, req);

Просмотреть файл

@ -164,6 +164,7 @@
#define FSF_FEATURE_LUN_SHARING 0x00000004
#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
@ -322,6 +323,7 @@ struct fsf_nport_serv_param {
u8 vendor_version_level[16];
} __attribute__ ((packed));
#define FSF_PLOGI_MIN_LEN 112
struct fsf_plogi {
u32 code;
struct fsf_nport_serv_param serv_param;

Просмотреть файл

@ -112,7 +112,7 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
* corruption and must stop the machine immediatly.
*/
panic("error: unknown request id (%lx) on adapter %s.\n",
req_id, zfcp_get_busid_by_adapter(adapter));
req_id, dev_name(&adapter->ccw_device->dev));
zfcp_reqlist_remove(adapter, fsf_req);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
@ -392,7 +392,7 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
init_data->cdev = adapter->ccw_device;
init_data->q_format = QDIO_ZFCP_QFMT;
memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8);
memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
ASCEBC(init_data->adapter_name, 8);
init_data->qib_param_field_format = 0;
init_data->qib_param_field = NULL;

Просмотреть файл

@ -352,6 +352,8 @@ config ISCSI_TCP
http://open-iscsi.org
source "drivers/scsi/cxgb3i/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
depends on SGI_HAS_WD93 && SCSI
@ -603,6 +605,19 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
config LIBFC
tristate "LibFC module"
select SCSI_FC_ATTRS
---help---
Fibre Channel library module
config FCOE
tristate "FCoE module"
depends on PCI
select LIBFC
---help---
Fibre Channel over Ethernet module
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
@ -1357,6 +1372,13 @@ config SCSI_LPFC
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
config SCSI_LPFC_DEBUG_FS
bool "Emulex LightPulse Fibre Channel debugfs Support"
depends on SCSI_LPFC && DEBUG_FS
help
This makes debugging infomation from the lpfc driver
available via the debugfs filesystem.
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on (EISA || MCA) && SCSI

Просмотреть файл

@ -36,7 +36,9 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
@ -124,6 +126,7 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_ARM) += arm/

Просмотреть файл

@ -30,7 +30,7 @@
* $Log: NCR5380.c,v $
* Revision 1.10 1998/9/2 Alan Cox
* (alan@redhat.com)
* (alan@lxorguk.ukuu.org.uk)
* Fixed up the timer lockups reported so far. Things still suck. Looking
* forward to 2.3 and per device request queues. Then it'll be possible to
* SMP thread this beast and improve life no end.

Просмотреть файл

@ -54,7 +54,7 @@
* 9/28/04 Christoph Hellwig <hch@lst.de>
* - merge the two source files
* - remove internal queueing code
* 14/06/07 Alan Cox <alan@redhat.com>
* 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Grand cleanup and Linuxisation
*/

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
@ -90,14 +90,24 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
dma_addr_t daddr;
if (size > 2048) {
retval = -EINVAL;
goto cleanup;
}
kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
if (!kfib) {
retval = -ENOMEM;
goto cleanup;
}
/* Highjack the hw_fib */
hw_fib = fibptr->hw_fib_va;
hw_fib_pa = fibptr->hw_fib_pa;
fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
fibptr->hw_fib_va = kfib;
fibptr->hw_fib_pa = daddr;
memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
memcpy(kfib, hw_fib, dev->max_fib_size);
}

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.

Просмотреть файл

@ -13425,8 +13425,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
}
boardp->asc_n_io_port = pci_resource_len(pdev, 1);
boardp->ioremap_addr = ioremap(pci_resource_start(pdev, 1),
boardp->asc_n_io_port);
boardp->ioremap_addr = pci_ioremap_bar(pdev, 1);
if (!boardp->ioremap_addr) {
shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) "
"returned NULL\n",

Просмотреть файл

@ -22,7 +22,7 @@
* aha1740_makecode may still need even more work
* if it doesn't work for your devices, take a look.
*
* Reworked for new_eh and new locking by Alan Cox <alan@redhat.com>
* Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Converted to EISA and generic DMA APIs by Marc Zyngier
* <maz@wild-wind.fr.eu.org>, 4/2003.

Просмотреть файл

@ -235,7 +235,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
uint32_t intmask_org;
int i, j;
acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
acb->pmuA = pci_ioremap_bar(pdev, 0);
if (!acb->pmuA) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
acb->host->host_no);
@ -329,13 +329,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
reg = (struct MessageUnit_B *)(dma_coherent +
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
acb->pmuB = reg;
mem_base0 = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
mem_base0 = pci_ioremap_bar(pdev, 0);
if (!mem_base0)
goto out;
mem_base1 = ioremap(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
mem_base1 = pci_ioremap_bar(pdev, 2);
if (!mem_base1) {
iounmap(mem_base0);
goto out;

Просмотреть файл

@ -1,8 +1,8 @@
/*
* Copyright (C) 1997 Wu Ching Chen
* 2.1.x update (C) 1998 Krzysztof G. Baranowski
* 2.5.x update (C) 2002 Red Hat <alan@redhat.com>
* 2.6.x update (C) 2004 Red Hat <alan@redhat.com>
* 2.5.x update (C) 2002 Red Hat
* 2.6.x update (C) 2004 Red Hat
*
* Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
*

Просмотреть файл

@ -190,7 +190,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES);
MAX_RETRIES, NULL);
dprintk("result: 0x%x\n",result);
if (driver_byte(result) & DRIVER_SENSE) {

Просмотреть файл

@ -0,0 +1,4 @@
EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i_ddp.o cxgb3i.o

Просмотреть файл

@ -0,0 +1,7 @@
config SCSI_CXGB3_ISCSI
tristate "Chelsio S3xx iSCSI support"
depends on CHELSIO_T3_DEPENDS
select CHELSIO_T3
select SCSI_ISCSI_ATTRS
---help---
This driver supports iSCSI offload for the Chelsio S3 series devices.

Просмотреть файл

@ -0,0 +1,139 @@
/*
* cxgb3i.h: Chelsio S3xx iSCSI driver.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#ifndef __CXGB3I_H__
#define __CXGB3I_H__
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/scatterlist.h>
#include <scsi/libiscsi_tcp.h>
/* from cxgb3 LLD */
#include "common.h"
#include "t3_cpl.h"
#include "t3cdev.h"
#include "cxgb3_ctl_defs.h"
#include "cxgb3_offload.h"
#include "firmware_exports.h"
#include "cxgb3i_offload.h"
#include "cxgb3i_ddp.h"
#define CXGB3I_SCSI_QDEPTH_DFLT 128
#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
#define CXGB3I_MAX_LUN 512
#define ISCSI_PDU_NONPAYLOAD_MAX \
(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
struct cxgb3i_adapter;
struct cxgb3i_hba;
struct cxgb3i_endpoint;
/**
* struct cxgb3i_hba - cxgb3i iscsi structure (per port)
*
* @snic: cxgb3i adapter containing this port
* @ndev: pointer to netdev structure
* @shost: pointer to scsi host structure
*/
struct cxgb3i_hba {
struct cxgb3i_adapter *snic;
struct net_device *ndev;
struct Scsi_Host *shost;
};
/**
* struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
*
* @listhead: list head to link elements
* @lock: lock for this structure
* @tdev: pointer to t3cdev used by cxgb3 driver
* @pdev: pointer to pci dev
* @hba_cnt: # of hbas (the same as # of ports)
* @hba: all the hbas on this adapter
* @tx_max_size: max. tx packet size supported
* @rx_max_size: max. rx packet size supported
* @tag_format: ddp tag format settings
*/
struct cxgb3i_adapter {
struct list_head list_head;
spinlock_t lock;
struct t3cdev *tdev;
struct pci_dev *pdev;
unsigned char hba_cnt;
struct cxgb3i_hba *hba[MAX_NPORTS];
unsigned int tx_max_size;
unsigned int rx_max_size;
struct cxgb3i_tag_format tag_format;
};
/**
* struct cxgb3i_conn - cxgb3i iscsi connection
*
* @listhead: list head to link elements
* @cep: pointer to iscsi_endpoint structure
* @conn: pointer to iscsi_conn structure
* @hba: pointer to the hba this conn. is going through
* @task_idx_bits: # of bits needed for session->cmds_max
*/
struct cxgb3i_conn {
struct list_head list_head;
struct cxgb3i_endpoint *cep;
struct iscsi_conn *conn;
struct cxgb3i_hba *hba;
unsigned int task_idx_bits;
};
/**
* struct cxgb3i_endpoint - iscsi tcp endpoint
*
* @c3cn: the h/w tcp connection representation
* @hba: pointer to the hba this conn. is going through
* @cconn: pointer to the associated cxgb3i iscsi connection
*/
struct cxgb3i_endpoint {
struct s3_conn *c3cn;
struct cxgb3i_hba *hba;
struct cxgb3i_conn *cconn;
};
int cxgb3i_iscsi_init(void);
void cxgb3i_iscsi_cleanup(void);
struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *);
void cxgb3i_adapter_remove(struct t3cdev *);
int cxgb3i_adapter_ulp_init(struct cxgb3i_adapter *);
void cxgb3i_adapter_ulp_cleanup(struct cxgb3i_adapter *);
struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
struct net_device *);
void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
int cxgb3i_pdu_init(void);
void cxgb3i_pdu_cleanup(void);
void cxgb3i_conn_cleanup_task(struct iscsi_task *);
int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
#endif

Просмотреть файл

@ -0,0 +1,770 @@
/*
* cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#include <linux/skbuff.h>
/* from cxgb3 LLD */
#include "common.h"
#include "t3_cpl.h"
#include "t3cdev.h"
#include "cxgb3_ctl_defs.h"
#include "cxgb3_offload.h"
#include "firmware_exports.h"
#include "cxgb3i_ddp.h"
#define DRV_MODULE_NAME "cxgb3i_ddp"
#define DRV_MODULE_VERSION "1.0.0"
#define DRV_MODULE_RELDATE "Dec. 1, 2008"
static char version[] =
"Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
#ifdef __DEBUG_CXGB3I_DDP__
#define ddp_log_debug(fmt, args...) \
printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
#else
#define ddp_log_debug(fmt...)
#endif
/*
* iSCSI Direct Data Placement
*
* T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
* pre-posted final destination host-memory buffers based on the Initiator
* Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
*
* The host memory address is programmed into h/w in the format of pagepod
* entries.
* The location of the pagepod entry is encoded into ddp tag which is used or
* is the base for ITT/TTT.
*/
#define DDP_PGIDX_MAX 4
#define DDP_THRESHOLD 2048
static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
static unsigned char page_idx = DDP_PGIDX_MAX;
static LIST_HEAD(cxgb3i_ddp_list);
static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
/*
* functions to program the pagepod in h/w
*/
static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
{
struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
req->wr.wr_lo = 0;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
V_ULPTX_CMD(ULP_MEM_WRITE));
req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
}
static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
unsigned int idx, unsigned int npods,
struct cxgb3i_gather_list *gl)
{
unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
int i;
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
struct sk_buff *skb = ddp->gl_skb[idx];
struct pagepod *ppod;
int j, pidx;
/* hold on to the skb until we clear the ddp mapping */
skb_get(skb);
ulp_mem_io_set_hdr(skb, pm_addr);
ppod = (struct pagepod *)
(skb->head + sizeof(struct ulp_mem_io));
memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
ppod->addr[j] = pidx < gl->nelem ?
cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(ddp->tdev, skb);
}
return 0;
}
static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
unsigned int npods)
{
unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
int i;
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
struct sk_buff *skb = ddp->gl_skb[idx];
ddp->gl_skb[idx] = NULL;
memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
ulp_mem_io_set_hdr(skb, pm_addr);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(ddp->tdev, skb);
}
return 0;
}
static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
int start, int max, int count,
struct cxgb3i_gather_list *gl)
{
unsigned int i, j;
spin_lock(&ddp->map_lock);
for (i = start; i <= max;) {
for (j = 0; j < count; j++) {
if (ddp->gl_map[i + j])
break;
}
if (j == count) {
for (j = 0; j < count; j++)
ddp->gl_map[i + j] = gl;
spin_unlock(&ddp->map_lock);
return i;
}
i += j + 1;
}
spin_unlock(&ddp->map_lock);
return -EBUSY;
}
static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
int start, int count)
{
spin_lock(&ddp->map_lock);
memset(&ddp->gl_map[start], 0,
count * sizeof(struct cxgb3i_gather_list *));
spin_unlock(&ddp->map_lock);
}
static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
int idx, int count)
{
int i;
for (i = 0; i < count; i++, idx++)
if (ddp->gl_skb[idx]) {
kfree_skb(ddp->gl_skb[idx]);
ddp->gl_skb[idx] = NULL;
}
}
static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
int count, gfp_t gfp)
{
int i;
for (i = 0; i < count; i++) {
struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
PPOD_SIZE, gfp);
if (skb) {
ddp->gl_skb[idx + i] = skb;
skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
} else {
ddp_free_gl_skb(ddp, idx, i);
return -ENOMEM;
}
}
return 0;
}
/**
* cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
* @pgsz: page size
* return the ddp page index, if no match is found return DDP_PGIDX_MAX.
*/
int cxgb3i_ddp_find_page_index(unsigned long pgsz)
{
int i;
for (i = 0; i < DDP_PGIDX_MAX; i++) {
if (pgsz == (1UL << ddp_page_shift[i]))
return i;
}
ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
return DDP_PGIDX_MAX;
}
EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
static inline void ddp_gl_unmap(struct pci_dev *pdev,
struct cxgb3i_gather_list *gl)
{
int i;
for (i = 0; i < gl->nelem; i++)
pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
PCI_DMA_FROMDEVICE);
}
static inline int ddp_gl_map(struct pci_dev *pdev,
struct cxgb3i_gather_list *gl)
{
int i;
for (i = 0; i < gl->nelem; i++) {
gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
goto unmap;
}
return i;
unmap:
if (i) {
unsigned int nelem = gl->nelem;
gl->nelem = i;
ddp_gl_unmap(pdev, gl);
gl->nelem = nelem;
}
return -ENOMEM;
}
/**
* cxgb3i_ddp_make_gl - build ddp page buffer list
* @xferlen: total buffer length
* @sgl: page buffer scatter-gather list
* @sgcnt: # of page buffers
* @pdev: pci_dev, used for pci map
* @gfp: allocation mode
*
* construct a ddp page buffer list from the scsi scattergather list.
* coalesce buffers as much as possible, and obtain dma addresses for
* each page.
*
* Return the cxgb3i_gather_list constructed from the page buffers if the
* memory can be used for ddp. Return NULL otherwise.
*/
struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
struct scatterlist *sgl,
unsigned int sgcnt,
struct pci_dev *pdev,
gfp_t gfp)
{
struct cxgb3i_gather_list *gl;
struct scatterlist *sg = sgl;
struct page *sgpage = sg_page(sg);
unsigned int sglen = sg->length;
unsigned int sgoffset = sg->offset;
unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
PAGE_SHIFT;
int i = 1, j = 0;
if (xferlen < DDP_THRESHOLD) {
ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
xferlen, DDP_THRESHOLD);
return NULL;
}
gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
gfp);
if (!gl)
return NULL;
gl->pages = (struct page **)&gl->phys_addr[npages];
gl->length = xferlen;
gl->offset = sgoffset;
gl->pages[0] = sgpage;
sg = sg_next(sg);
while (sg) {
struct page *page = sg_page(sg);
if (sgpage == page && sg->offset == sgoffset + sglen)
sglen += sg->length;
else {
/* make sure the sgl is fit for ddp:
* each has the same page size, and
* all of the middle pages are used completely
*/
if ((j && sgoffset) ||
((i != sgcnt - 1) &&
((sglen + sgoffset) & ~PAGE_MASK)))
goto error_out;
j++;
if (j == gl->nelem || sg->offset)
goto error_out;
gl->pages[j] = page;
sglen = sg->length;
sgoffset = sg->offset;
sgpage = page;
}
i++;
sg = sg_next(sg);
}
gl->nelem = ++j;
if (ddp_gl_map(pdev, gl) < 0)
goto error_out;
return gl;
error_out:
kfree(gl);
return NULL;
}
EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
/**
* cxgb3i_ddp_release_gl - release a page buffer list
* @gl: a ddp page buffer list
* @pdev: pci_dev used for pci_unmap
* free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
*/
void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
struct pci_dev *pdev)
{
ddp_gl_unmap(pdev, gl);
kfree(gl);
}
EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
/**
* cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
* @tdev: t3cdev adapter
* @tid: connection id
* @tformat: tag format
* @tagp: the s/w tag, if ddp setup is successful, it will be updated with
* ddp/hw tag
* @gl: the page momory list
* @gfp: allocation mode
*
* ddp setup for a given page buffer list and construct the ddp tag.
* return 0 if success, < 0 otherwise.
*/
int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
struct cxgb3i_tag_format *tformat, u32 *tagp,
struct cxgb3i_gather_list *gl, gfp_t gfp)
{
struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
struct pagepod_hdr hdr;
unsigned int npods;
int idx = -1, idx_max;
int err = -ENOMEM;
u32 sw_tag = *tagp;
u32 tag;
if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
gl->length < DDP_THRESHOLD) {
ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
page_idx, gl->length, DDP_THRESHOLD);
return -EINVAL;
}
npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
idx_max = ddp->nppods - npods + 1;
if (ddp->idx_last == ddp->nppods)
idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
else {
idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
idx_max, npods, gl);
if (idx < 0 && ddp->idx_last >= npods)
idx = ddp_find_unused_entries(ddp, 0,
ddp->idx_last - npods + 1,
npods, gl);
}
if (idx < 0) {
ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
gl->length, gl->nelem, npods);
return idx;
}
err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
if (err < 0)
goto unmark_entries;
tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
tag |= idx << PPOD_IDX_SHIFT;
hdr.rsvd = 0;
hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
hdr.maxoffset = htonl(gl->length);
hdr.pgoffset = htonl(gl->offset);
err = set_ddp_map(ddp, &hdr, idx, npods, gl);
if (err < 0)
goto free_gl_skb;
ddp->idx_last = idx;
ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
idx, npods);
*tagp = tag;
return 0;
free_gl_skb:
ddp_free_gl_skb(ddp, idx, npods);
unmark_entries:
ddp_unmark_entries(ddp, idx, npods);
return err;
}
EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
/**
* cxgb3i_ddp_tag_release - release a ddp tag
* @tdev: t3cdev adapter
* @tag: ddp tag
* ddp cleanup for a given ddp tag and release all the resources held
*/
void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
{
struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
u32 idx;
if (!ddp) {
ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
return;
}
idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
if (idx < ddp->nppods) {
struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
unsigned int npods;
if (!gl) {
ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
tag, idx);
return;
}
npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
tag, idx, npods);
clear_ddp_map(ddp, idx, npods);
ddp_unmark_entries(ddp, idx, npods);
cxgb3i_ddp_release_gl(gl, ddp->pdev);
} else
ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
tag, idx, ddp->nppods);
}
EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
int reply)
{
struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
GFP_KERNEL);
struct cpl_set_tcb_field *req;
u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
if (!skb)
return -ENOMEM;
/* set up ulp submode and page size */
req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = V_NO_REPLY(reply ? 0 : 1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
req->val = cpu_to_be64(val << 28);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(tdev, skb);
return 0;
}
/**
* cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
* @tdev: t3cdev adapter
* @tid: connection id
* @reply: request reply from h/w
* set up the ddp page size based on the host PAGE_SIZE for a connection
* identified by tid
*/
int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
int reply)
{
return setup_conn_pgidx(tdev, tid, page_idx, reply);
}
EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
/**
* cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
* @tdev: t3cdev adapter
* @tid: connection id
* @reply: request reply from h/w
* @pgsz: ddp page size
* set up the ddp page size for a connection identified by tid
*/
int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
int reply, unsigned long pgsz)
{
int pgidx = cxgb3i_ddp_find_page_index(pgsz);
return setup_conn_pgidx(tdev, tid, pgidx, reply);
}
EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
/**
* cxgb3i_setup_conn_digest - setup conn. digest setting
* @tdev: t3cdev adapter
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
* @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
int hcrc, int dcrc, int reply)
{
struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
GFP_KERNEL);
struct cpl_set_tcb_field *req;
u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
if (!skb)
return -ENOMEM;
/* set up ulp submode and page size */
req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = V_NO_REPLY(reply ? 0 : 1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);
req->val = cpu_to_be64(val << 24);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(tdev, skb);
return 0;
}
EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
static int ddp_init(struct t3cdev *tdev)
{
struct cxgb3i_ddp_info *ddp;
struct ulp_iscsi_info uinfo;
unsigned int ppmax, bits;
int i, err;
static int vers_printed;
if (!vers_printed) {
printk(KERN_INFO "%s", version);
vers_printed = 1;
}
err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
if (err < 0) {
ddp_log_error("%s, failed to get iscsi param err=%d.\n",
tdev->name, err);
return err;
}
ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
bits = __ilog2_u32(ppmax) + 1;
if (bits > PPOD_IDX_MAX_SIZE)
bits = PPOD_IDX_MAX_SIZE;
ppmax = (1 << (bits - 1)) - 1;
ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
ppmax *
(sizeof(struct cxgb3i_gather_list *) +
sizeof(struct sk_buff *)),
GFP_KERNEL);
if (!ddp) {
ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
tdev->name, ppmax);
return 0;
}
ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
ppmax *
sizeof(struct cxgb3i_gather_list *));
spin_lock_init(&ddp->map_lock);
ddp->tdev = tdev;
ddp->pdev = uinfo.pdev;
ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
ddp->llimit = uinfo.llimit;
ddp->ulimit = uinfo.ulimit;
ddp->nppods = ppmax;
ddp->idx_last = ppmax;
ddp->idx_bits = bits;
ddp->idx_mask = (1 << bits) - 1;
ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
for (i = 0; i < DDP_PGIDX_MAX; i++)
uinfo.pgsz_factor[i] = ddp_page_order[i];
uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
if (err < 0) {
ddp_log_warn("%s unable to set iscsi param err=%d, "
"ddp disabled.\n", tdev->name, err);
goto free_ddp_map;
}
tdev->ulp_iscsi = ddp;
/* add to the list */
write_lock(&cxgb3i_ddp_rwlock);
list_add_tail(&ddp->list, &cxgb3i_ddp_list);
write_unlock(&cxgb3i_ddp_rwlock);
ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
"pkt %u,%u.\n",
ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
ddp->idx_mask, ddp->rsvd_tag_mask,
ddp->max_txsz, ddp->max_rxsz);
return 0;
free_ddp_map:
cxgb3i_free_big_mem(ddp);
return err;
}
/**
* cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
* @tdev: t3cdev adapter
* @tformat: tag format
* @txsz: max tx pkt size, filled in by this func.
* @rxsz: max rx pkt size, filled in by this func.
* initialize the ddp pagepod manager for a given adapter if needed and
* setup the tag format for a given iscsi entity
*/
int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
struct cxgb3i_tag_format *tformat,
unsigned int *txsz, unsigned int *rxsz)
{
struct cxgb3i_ddp_info *ddp;
unsigned char idx_bits;
if (!tformat)
return -EINVAL;
if (!tdev->ulp_iscsi) {
int err = ddp_init(tdev);
if (err < 0)
return err;
}
ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
idx_bits = 32 - tformat->sw_bits;
tformat->rsvd_bits = ddp->idx_bits;
tformat->rsvd_shift = PPOD_IDX_SHIFT;
tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
tformat->sw_bits, tformat->rsvd_bits,
tformat->rsvd_shift, tformat->rsvd_mask);
*txsz = ddp->max_txsz;
*rxsz = ddp->max_rxsz;
ddp_log_info("ddp max pkt size: %u, %u.\n",
ddp->max_txsz, ddp->max_rxsz);
return 0;
}
EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
static void ddp_release(struct cxgb3i_ddp_info *ddp)
{
int i = 0;
struct t3cdev *tdev = ddp->tdev;
tdev->ulp_iscsi = NULL;
while (i < ddp->nppods) {
struct cxgb3i_gather_list *gl = ddp->gl_map[i];
if (gl) {
int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
>> PPOD_PAGES_SHIFT;
kfree(gl);
ddp_free_gl_skb(ddp, i, npods);
} else
i++;
}
cxgb3i_free_big_mem(ddp);
}
/**
* cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
* @tdev: t3cdev adapter
* release all the resource held by the ddp pagepod manager for a given
* adapter if needed
*/
void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
{
struct cxgb3i_ddp_info *ddp;
/* remove from the list */
write_lock(&cxgb3i_ddp_rwlock);
list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
if (ddp->tdev == tdev) {
list_del(&ddp->list);
break;
}
}
write_unlock(&cxgb3i_ddp_rwlock);
if (ddp)
ddp_release(ddp);
}
EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
/**
* cxgb3i_ddp_init_module - module init entry point
* initialize any driver wide global data structures
*/
static int __init cxgb3i_ddp_init_module(void)
{
page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
PAGE_SIZE, page_idx);
return 0;
}
/**
* cxgb3i_ddp_exit_module - module cleanup/exit entry point
* go through the ddp list and release any resource held.
*/
static void __exit cxgb3i_ddp_exit_module(void)
{
struct cxgb3i_ddp_info *ddp;
/* release all ddp manager if there is any */
write_lock(&cxgb3i_ddp_rwlock);
list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
list_del(&ddp->list);
ddp_release(ddp);
}
write_unlock(&cxgb3i_ddp_rwlock);
}
module_init(cxgb3i_ddp_init_module);
module_exit(cxgb3i_ddp_exit_module);

Просмотреть файл

@ -0,0 +1,306 @@
/*
* cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#ifndef __CXGB3I_ULP2_DDP_H__
#define __CXGB3I_ULP2_DDP_H__
/**
* struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
*
* @sw_bits: # of bits used by iscsi software layer
* @rsvd_bits: # of bits used by h/w
* @rsvd_shift: h/w bits shift left
* @rsvd_mask: reserved bit mask
*/
struct cxgb3i_tag_format {
unsigned char sw_bits;
unsigned char rsvd_bits;
unsigned char rsvd_shift;
unsigned char filler[1];
u32 rsvd_mask;
};
/**
* struct cxgb3i_gather_list - cxgb3i direct data placement memory
*
* @tag: ddp tag
* @length: total data buffer length
* @offset: initial offset to the 1st page
* @nelem: # of pages
* @pages: page pointers
* @phys_addr: physical address
*/
struct cxgb3i_gather_list {
u32 tag;
unsigned int length;
unsigned int offset;
unsigned int nelem;
struct page **pages;
dma_addr_t phys_addr[0];
};
/**
* struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
*
* @list: list head to link elements
* @tdev: pointer to t3cdev used by cxgb3 driver
* @max_txsz: max tx packet size for ddp
* @max_rxsz: max rx packet size for ddp
* @llimit: lower bound of the page pod memory
* @ulimit: upper bound of the page pod memory
* @nppods: # of page pod entries
* @idx_last: page pod entry last used
* @idx_bits: # of bits the pagepod index would take
* @idx_mask: pagepod index mask
* @rsvd_tag_mask: tag mask
* @map_lock: lock to synchonize access to the page pod map
* @gl_map: ddp memory gather list
* @gl_skb: skb used to program the pagepod
*/
struct cxgb3i_ddp_info {
struct list_head list;
struct t3cdev *tdev;
struct pci_dev *pdev;
unsigned int max_txsz;
unsigned int max_rxsz;
unsigned int llimit;
unsigned int ulimit;
unsigned int nppods;
unsigned int idx_last;
unsigned char idx_bits;
unsigned char filler[3];
u32 idx_mask;
u32 rsvd_tag_mask;
spinlock_t map_lock;
struct cxgb3i_gather_list **gl_map;
struct sk_buff **gl_skb;
};
#define ULP2_MAX_PKT_SIZE 16224
#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX)
#define PPOD_PAGES_MAX 4
#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
/*
* struct pagepod_hdr, pagepod - pagepod format
*/
struct pagepod_hdr {
u32 vld_tid;
u32 pgsz_tag_clr;
u32 maxoffset;
u32 pgoffset;
u64 rsvd;
};
struct pagepod {
struct pagepod_hdr hdr;
u64 addr[PPOD_PAGES_MAX + 1];
};
#define PPOD_SIZE sizeof(struct pagepod) /* 64 */
#define PPOD_SIZE_SHIFT 6
#define PPOD_COLOR_SHIFT 0
#define PPOD_COLOR_SIZE 6
#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1)
#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE
#define PPOD_IDX_MAX_SIZE 24
#define S_PPOD_TID 0
#define M_PPOD_TID 0xFFFFFF
#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
#define S_PPOD_VALID 24
#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
#define F_PPOD_VALID V_PPOD_VALID(1U)
#define S_PPOD_COLOR 0
#define M_PPOD_COLOR 0x3F
#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
#define S_PPOD_TAG 6
#define M_PPOD_TAG 0xFFFFFF
#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
#define S_PPOD_PGSZ 30
#define M_PPOD_PGSZ 0x3
#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
/*
* large memory chunk allocation/release
* use vmalloc() if kmalloc() fails
*/
static inline void *cxgb3i_alloc_big_mem(unsigned int size,
gfp_t gfp)
{
void *p = kmalloc(size, gfp);
if (!p)
p = vmalloc(size);
if (p)
memset(p, 0, size);
return p;
}
static inline void cxgb3i_free_big_mem(void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
}
/*
* cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
* non-reserved bits that can be used by the iscsi s/w.
* The reserved bits are identified by the rsvd_bits and rsvd_shift fields
* in struct cxgb3i_tag_format.
*
* The upper most reserved bit can be used to check if a tag is ddp tag or not:
* if the bit is 0, the tag is a valid ddp tag
*/
/**
* cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
* @tformat: tag format information
* @tag: tag to be checked
*
* return true if the tag is a ddp tag, false otherwise.
*/
static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
{
return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
}
/**
* cxgb3i_sw_tag_usable - check if a given s/w tag has enough bits left for
* the reserved/hw bits
* @tformat: tag format information
* @sw_tag: s/w tag to be checked
*
* return true if the tag is a ddp tag, false otherwise.
*/
static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
u32 sw_tag)
{
sw_tag >>= (32 - tformat->rsvd_bits);
return !sw_tag;
}
/**
* cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
* @tformat: tag format information
* @sw_tag: s/w tag to be checked
*
* insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
*/
static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
u32 sw_tag)
{
unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
u32 mask = (1 << shift) - 1;
if (sw_tag && (sw_tag & ~mask)) {
u32 v1 = sw_tag & ((1 << shift) - 1);
u32 v2 = (sw_tag >> (shift - 1)) << shift;
return v2 | v1 | 1 << shift;
}
return sw_tag | 1 << shift;
}
/**
* cxgb3i_ddp_tag_base - shift the s/w tag bits so that reserved bits are not
* used.
* @tformat: tag format information
* @sw_tag: s/w tag to be checked
*/
static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
u32 sw_tag)
{
u32 mask = (1 << tformat->rsvd_shift) - 1;
if (sw_tag && (sw_tag & ~mask)) {
u32 v1 = sw_tag & mask;
u32 v2 = sw_tag >> tformat->rsvd_shift;
v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
return v2 | v1;
}
return sw_tag;
}
/**
* cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
* @tformat: tag format information
* @tag: tag to be checked
*
* return the reserved bits in the tag
*/
static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
u32 tag)
{
if (cxgb3i_is_ddp_tag(tformat, tag))
return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
return 0;
}
/**
* cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
* @tformat: tag format information
* @tag: tag to be checked
*
* return the non-reserved bits in the tag.
*/
static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
u32 tag)
{
unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
u32 v1, v2;
if (cxgb3i_is_ddp_tag(tformat, tag)) {
v1 = tag & ((1 << tformat->rsvd_shift) - 1);
v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
} else {
u32 mask = (1 << shift) - 1;
tag &= ~(1 << shift);
v1 = tag & mask;
v2 = (tag >> 1) & ~mask;
}
return v1 | v2;
}
int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
struct cxgb3i_tag_format *, u32 *tag,
struct cxgb3i_gather_list *, gfp_t gfp);
void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
struct scatterlist *sgl,
unsigned int sgcnt,
struct pci_dev *pdev,
gfp_t gfp);
void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
struct pci_dev *pdev);
int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
int reply);
int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
unsigned long pgsz);
int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
int hcrc, int dcrc, int reply);
int cxgb3i_ddp_find_page_index(unsigned long pgsz);
int cxgb3i_adapter_ddp_init(struct t3cdev *, struct cxgb3i_tag_format *,
unsigned int *txsz, unsigned int *rxsz);
void cxgb3i_adapter_ddp_cleanup(struct t3cdev *);
#endif

Просмотреть файл

@ -0,0 +1,107 @@
/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#include "cxgb3i.h"
#define DRV_MODULE_NAME "cxgb3i"
#define DRV_MODULE_VERSION "1.0.0"
#define DRV_MODULE_RELDATE "Jun. 1, 2008"
static char version[] =
"Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static void open_s3_dev(struct t3cdev *);
static void close_s3_dev(struct t3cdev *);
static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
static struct cxgb3_client t3c_client = {
.name = "iscsi_cxgb3",
.handlers = cxgb3i_cpl_handlers,
.add = open_s3_dev,
.remove = close_s3_dev,
};
/**
* open_s3_dev - register with cxgb3 LLD
* @t3dev: cxgb3 adapter instance
*/
static void open_s3_dev(struct t3cdev *t3dev)
{
static int vers_printed;
if (!vers_printed) {
printk(KERN_INFO "%s", version);
vers_printed = 1;
}
cxgb3i_sdev_add(t3dev, &t3c_client);
cxgb3i_adapter_add(t3dev);
}
/**
* close_s3_dev - de-register with cxgb3 LLD
* @t3dev: cxgb3 adapter instance
*/
static void close_s3_dev(struct t3cdev *t3dev)
{
cxgb3i_adapter_remove(t3dev);
cxgb3i_sdev_remove(t3dev);
}
/**
* cxgb3i_init_module - module init entry point
*
* initialize any driver wide global data structures and register itself
* with the cxgb3 module
*/
static int __init cxgb3i_init_module(void)
{
int err;
err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
if (err < 0)
return err;
err = cxgb3i_iscsi_init();
if (err < 0)
return err;
err = cxgb3i_pdu_init();
if (err < 0)
return err;
cxgb3_register_client(&t3c_client);
return 0;
}
/**
* cxgb3i_exit_module - module cleanup/exit entry point
*
* go through the driver hba list and for each hba, release any resource held.
* and unregisters iscsi transport and the cxgb3 module
*/
static void __exit cxgb3i_exit_module(void)
{
cxgb3_unregister_client(&t3c_client);
cxgb3i_pdu_cleanup();
cxgb3i_iscsi_cleanup();
cxgb3i_sdev_cleanup();
}
module_init(cxgb3i_init_module);
module_exit(cxgb3i_exit_module);

Просмотреть файл

@ -0,0 +1,951 @@
/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
* Copyright (c) 2008 Mike Christie
* Copyright (c) 2008 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#include <linux/inet.h>
#include <linux/crypto.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/iscsi_proto.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include "cxgb3i.h"
#include "cxgb3i_pdu.h"
#ifdef __DEBUG_CXGB3I_TAG__
#define cxgb3i_tag_debug cxgb3i_log_debug
#else
#define cxgb3i_tag_debug(fmt...)
#endif
#ifdef __DEBUG_CXGB3I_API__
#define cxgb3i_api_debug cxgb3i_log_debug
#else
#define cxgb3i_api_debug(fmt...)
#endif
/*
* align pdu size to multiple of 512 for better performance
*/
#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
static struct scsi_transport_template *cxgb3i_scsi_transport;
static struct scsi_host_template cxgb3i_host_template;
static struct iscsi_transport cxgb3i_iscsi_transport;
static unsigned char sw_tag_idx_bits;
static unsigned char sw_tag_age_bits;
static LIST_HEAD(cxgb3i_snic_list);
static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
/**
* cxgb3i_adapter_add - init a s3 adapter structure and any h/w settings
* @t3dev: t3cdev adapter
* return the resulting cxgb3i_adapter struct
*/
struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
{
struct cxgb3i_adapter *snic;
struct adapter *adapter = tdev2adap(t3dev);
int i;
snic = kzalloc(sizeof(*snic), GFP_KERNEL);
if (!snic) {
cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
return NULL;
}
spin_lock_init(&snic->lock);
snic->tdev = t3dev;
snic->pdev = adapter->pdev;
snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
if (cxgb3i_adapter_ddp_init(t3dev, &snic->tag_format,
&snic->tx_max_size,
&snic->rx_max_size) < 0)
goto free_snic;
for_each_port(adapter, i) {
snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
if (!snic->hba[i])
goto ulp_cleanup;
}
snic->hba_cnt = adapter->params.nports;
/* add to the list */
write_lock(&cxgb3i_snic_rwlock);
list_add_tail(&snic->list_head, &cxgb3i_snic_list);
write_unlock(&cxgb3i_snic_rwlock);
return snic;
ulp_cleanup:
cxgb3i_adapter_ddp_cleanup(t3dev);
free_snic:
kfree(snic);
return NULL;
}
/**
* cxgb3i_adapter_remove - release all the resources held and cleanup any
* h/w settings
* @t3dev: t3cdev adapter
*/
void cxgb3i_adapter_remove(struct t3cdev *t3dev)
{
int i;
struct cxgb3i_adapter *snic;
/* remove from the list */
write_lock(&cxgb3i_snic_rwlock);
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
if (snic->tdev == t3dev) {
list_del(&snic->list_head);
break;
}
}
write_unlock(&cxgb3i_snic_rwlock);
if (snic) {
for (i = 0; i < snic->hba_cnt; i++) {
if (snic->hba[i]) {
cxgb3i_hba_host_remove(snic->hba[i]);
snic->hba[i] = NULL;
}
}
/* release ddp resources */
cxgb3i_adapter_ddp_cleanup(snic->tdev);
kfree(snic);
}
}
/**
* cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure with a given
* net_device
* @t3dev: t3cdev adapter
*/
struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
{
struct cxgb3i_adapter *snic;
int i;
read_lock(&cxgb3i_snic_rwlock);
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
for (i = 0; i < snic->hba_cnt; i++) {
if (snic->hba[i]->ndev == ndev) {
read_unlock(&cxgb3i_snic_rwlock);
return snic->hba[i];
}
}
}
read_unlock(&cxgb3i_snic_rwlock);
return NULL;
}
/**
* cxgb3i_hba_host_add - register a new host with scsi/iscsi
* @snic: the cxgb3i adapter
* @ndev: associated net_device
*/
struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
struct net_device *ndev)
{
struct cxgb3i_hba *hba;
struct Scsi_Host *shost;
int err;
shost = iscsi_host_alloc(&cxgb3i_host_template,
sizeof(struct cxgb3i_hba),
CXGB3I_SCSI_QDEPTH_DFLT);
if (!shost) {
cxgb3i_log_info("iscsi_host_alloc failed.\n");
return NULL;
}
shost->transportt = cxgb3i_scsi_transport;
shost->max_lun = CXGB3I_MAX_LUN;
shost->max_id = CXGB3I_MAX_TARGET;
shost->max_channel = 0;
shost->max_cmd_len = 16;
hba = iscsi_host_priv(shost);
hba->snic = snic;
hba->ndev = ndev;
hba->shost = shost;
pci_dev_get(snic->pdev);
err = iscsi_host_add(shost, &snic->pdev->dev);
if (err) {
cxgb3i_log_info("iscsi_host_add failed.\n");
goto pci_dev_put;
}
cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
shost, hba, shost->host_no);
return hba;
pci_dev_put:
pci_dev_put(snic->pdev);
scsi_host_put(shost);
return NULL;
}
/**
* cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
* @hba: the cxgb3i hba
*/
void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
{
cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
hba->shost, hba, hba->shost->host_no);
iscsi_host_remove(hba->shost);
pci_dev_put(hba->snic->pdev);
iscsi_host_free(hba->shost);
}
/**
* cxgb3i_ep_connect - establish TCP connection to target portal
* @dst_addr: target IP address
* @non_blocking: blocking or non-blocking call
*
* Initiates a TCP/IP connection to the dst_addr
*/
static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
int non_blocking)
{
struct iscsi_endpoint *ep;
struct cxgb3i_endpoint *cep;
struct cxgb3i_hba *hba;
struct s3_conn *c3cn = NULL;
int err = 0;
c3cn = cxgb3i_c3cn_create();
if (!c3cn) {
cxgb3i_log_info("ep connect OOM.\n");
err = -ENOMEM;
goto release_conn;
}
err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
if (err < 0) {
cxgb3i_log_info("ep connect failed.\n");
goto release_conn;
}
hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
if (!hba) {
err = -ENOSPC;
cxgb3i_log_info("NOT going through cxgbi device.\n");
goto release_conn;
}
if (c3cn_is_closing(c3cn)) {
err = -ENOSPC;
cxgb3i_log_info("ep connect unable to connect.\n");
goto release_conn;
}
ep = iscsi_create_endpoint(sizeof(*cep));
if (!ep) {
err = -ENOMEM;
cxgb3i_log_info("iscsi alloc ep, OOM.\n");
goto release_conn;
}
cep = ep->dd_data;
cep->c3cn = c3cn;
cep->hba = hba;
cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
ep, cep, c3cn, hba);
return ep;
release_conn:
cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
if (c3cn)
cxgb3i_c3cn_release(c3cn);
return ERR_PTR(err);
}
/**
* cxgb3i_ep_poll - polls for TCP connection establishement
* @ep: TCP connection (endpoint) handle
* @timeout_ms: timeout value in milli secs
*
* polls for TCP connect request to complete
*/
static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct cxgb3i_endpoint *cep = ep->dd_data;
struct s3_conn *c3cn = cep->c3cn;
if (!c3cn_is_established(c3cn))
return 0;
cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
return 1;
}
/**
* cxgb3i_ep_disconnect - teardown TCP connection
* @ep: TCP connection (endpoint) handle
*
* teardown TCP connection
*/
static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
{
struct cxgb3i_endpoint *cep = ep->dd_data;
struct cxgb3i_conn *cconn = cep->cconn;
cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
if (cconn && cconn->conn) {
/*
* stop the xmit path so the xmit_pdu function is
* not being called
*/
iscsi_suspend_tx(cconn->conn);
write_lock_bh(&cep->c3cn->callback_lock);
cep->c3cn->user_data = NULL;
cconn->cep = NULL;
write_unlock_bh(&cep->c3cn->callback_lock);
}
cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
ep, cep, cep->c3cn);
cxgb3i_c3cn_release(cep->c3cn);
iscsi_destroy_endpoint(ep);
}
/**
* cxgb3i_session_create - create a new iscsi session
* @cmds_max: max # of commands
* @qdepth: scsi queue depth
* @initial_cmdsn: initial iscsi CMDSN for this session
* @host_no: pointer to return host no
*
* Creates a new iSCSI session
*/
static struct iscsi_cls_session *
cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
u32 initial_cmdsn, u32 *host_no)
{
struct cxgb3i_endpoint *cep;
struct cxgb3i_hba *hba;
struct Scsi_Host *shost;
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
if (!ep) {
cxgb3i_log_error("%s, missing endpoint.\n", __func__);
return NULL;
}
cep = ep->dd_data;
hba = cep->hba;
shost = hba->shost;
cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
BUG_ON(hba != iscsi_host_priv(shost));
*host_no = shost->host_no;
cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
cmds_max,
sizeof(struct iscsi_tcp_task),
initial_cmdsn, ISCSI_MAX_TARGET);
if (!cls_session)
return NULL;
session = cls_session->dd_data;
if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session;
return cls_session;
remove_session:
iscsi_session_teardown(cls_session);
return NULL;
}
/**
* cxgb3i_session_destroy - destroys iscsi session
* @cls_session: pointer to iscsi cls session
*
* Destroys an iSCSI session instance and releases its all resources held
*/
static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
{
cxgb3i_api_debug("sess 0x%p.\n", cls_session);
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
}
/**
* cxgb3i_conn_max_xmit_dlength -- check the max. xmit pdu segment size,
* reduce it to be within the hardware limit if needed
* @conn: iscsi connection
*/
static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
cconn->hba->snic->tx_max_size -
ISCSI_PDU_NONPAYLOAD_MAX);
if (conn->max_xmit_dlength)
conn->max_xmit_dlength = min_t(unsigned int,
conn->max_xmit_dlength, max);
else
conn->max_xmit_dlength = max;
align_pdu_size(conn->max_xmit_dlength);
cxgb3i_log_info("conn 0x%p, max xmit %u.\n",
conn, conn->max_xmit_dlength);
return 0;
}
/**
* cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size against
* the hardware limit
* @conn: iscsi connection
* return 0 if the value is valid, < 0 otherwise.
*/
static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
cconn->hba->snic->rx_max_size -
ISCSI_PDU_NONPAYLOAD_MAX);
align_pdu_size(max);
if (conn->max_recv_dlength) {
if (conn->max_recv_dlength > max) {
cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
" Need to be <= %u.\n",
conn->max_recv_dlength, max);
return -EINVAL;
}
conn->max_recv_dlength = min_t(unsigned int,
conn->max_recv_dlength, max);
align_pdu_size(conn->max_recv_dlength);
} else
conn->max_recv_dlength = max;
cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
conn, conn->max_recv_dlength);
return 0;
}
/**
* cxgb3i_conn_create - create iscsi connection instance
* @cls_session: pointer to iscsi cls session
* @cid: iscsi cid
*
* Creates a new iSCSI connection instance for a given session
*/
static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
*cls_session, u32 cid)
{
struct iscsi_cls_conn *cls_conn;
struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn;
struct cxgb3i_conn *cconn;
cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
tcp_conn = conn->dd_data;
cconn = tcp_conn->dd_data;
cconn->conn = conn;
return cls_conn;
}
/**
* cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
* @cls_session: pointer to iscsi cls session
* @cls_conn: pointer to iscsi cls conn
* @transport_eph: 64-bit EP handle
* @is_leading: leading connection on this session?
*
* Binds together an iSCSI session, an iSCSI connection and a
* TCP connection. This routine returns error code if the TCP
* connection does not belong on the device iSCSI sess/conn is bound
*/
static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
u64 transport_eph, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
struct cxgb3i_adapter *snic;
struct iscsi_endpoint *ep;
struct cxgb3i_endpoint *cep;
struct s3_conn *c3cn;
int err;
ep = iscsi_lookup_endpoint(transport_eph);
if (!ep)
return -EINVAL;
/* setup ddp pagesize */
cep = ep->dd_data;
c3cn = cep->c3cn;
snic = cep->hba->snic;
err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
if (err < 0)
return err;
cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
ep, cls_session, cls_conn);
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
return -EINVAL;
/* calculate the tag idx bits needed for this conn based on cmds_max */
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
conn->session->cmds_max, cconn->task_idx_bits);
read_lock(&c3cn->callback_lock);
c3cn->user_data = conn;
cconn->hba = cep->hba;
cconn->cep = cep;
cep->cconn = cconn;
read_unlock(&c3cn->callback_lock);
cxgb3i_conn_max_xmit_dlength(conn);
cxgb3i_conn_max_recv_dlength(conn);
spin_lock_bh(&conn->session->lock);
sprintf(conn->portal_address, NIPQUAD_FMT,
NIPQUAD(c3cn->daddr.sin_addr.s_addr));
conn->portal_port = ntohs(c3cn->daddr.sin_port);
spin_unlock_bh(&conn->session->lock);
/* init recv engine */
iscsi_tcp_hdr_recv_prep(tcp_conn);
return 0;
}
/**
* cxgb3i_conn_get_param - return iscsi connection parameter to caller
* @cls_conn: pointer to iscsi cls conn
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iSCSI connection parameters
*/
static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
int len;
cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
spin_lock_bh(&conn->session->lock);
len = sprintf(buf, "%hu\n", conn->portal_port);
spin_unlock_bh(&conn->session->lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
spin_lock_bh(&conn->session->lock);
len = sprintf(buf, "%s\n", conn->portal_address);
spin_unlock_bh(&conn->session->lock);
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
return len;
}
/**
* cxgb3i_conn_set_param - set iscsi connection parameter
* @cls_conn: pointer to iscsi cls conn
* @param: parameter type identifier
* @buf: buffer pointer
* @buflen: buffer length
*
* set iSCSI connection parameters
*/
static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
struct cxgb3i_adapter *snic = cconn->hba->snic;
struct s3_conn *c3cn = cconn->cep->c3cn;
int value, err = 0;
switch (param) {
case ISCSI_PARAM_HDRDGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->hdrdgst_en)
err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
conn->hdrdgst_en,
conn->datadgst_en, 0);
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->datadgst_en)
err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
conn->hdrdgst_en,
conn->datadgst_en, 0);
break;
case ISCSI_PARAM_MAX_R2T:
sscanf(buf, "%d", &value);
if (value <= 0 || !is_power_of_2(value))
return -EINVAL;
if (session->max_r2t == value)
break;
iscsi_tcp_r2tpool_free(session);
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && iscsi_tcp_r2tpool_alloc(session))
return -ENOMEM;
case ISCSI_PARAM_MAX_RECV_DLENGTH:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err)
err = cxgb3i_conn_max_recv_dlength(conn);
break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err)
err = cxgb3i_conn_max_xmit_dlength(conn);
break;
default:
return iscsi_set_param(cls_conn, param, buf, buflen);
}
return err;
}
/**
* cxgb3i_host_set_param - configure host (adapter) related parameters
* @shost: scsi host pointer
* @param: parameter type identifier
* @buf: buffer pointer
*/
static int cxgb3i_host_set_param(struct Scsi_Host *shost,
enum iscsi_host_param param,
char *buf, int buflen)
{
struct cxgb3i_hba *hba = iscsi_host_priv(shost);
cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
{
__be32 addr = in_aton(buf);
cxgb3i_set_private_ipv4addr(hba->ndev, addr);
return 0;
}
case ISCSI_HOST_PARAM_HWADDRESS:
case ISCSI_HOST_PARAM_NETDEV_NAME:
/* ignore */
return 0;
default:
return iscsi_host_set_param(shost, param, buf, buflen);
}
}
/**
* cxgb3i_host_get_param - returns host (adapter) related parameters
* @shost: scsi host pointer
* @param: parameter type identifier
* @buf: buffer pointer
*/
static int cxgb3i_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct cxgb3i_hba *hba = iscsi_host_priv(shost);
int len = 0;
cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
break;
case ISCSI_HOST_PARAM_NETDEV_NAME:
len = sprintf(buf, "%s\n", hba->ndev->name);
break;
case ISCSI_HOST_PARAM_IPADDRESS:
{
__be32 addr;
addr = cxgb3i_get_private_ipv4addr(hba->ndev);
len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
break;
}
default:
return iscsi_host_get_param(shost, param, buf);
}
return len;
}
/**
* cxgb3i_conn_get_stats - returns iSCSI stats
* @cls_conn: pointer to iscsi cls conn
* @stats: pointer to iscsi statistic struct
*/
static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
stats->dataout_pdus = conn->dataout_pdus_cnt;
stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
stats->datain_pdus = conn->datain_pdus_cnt;
stats->r2t_pdus = conn->r2t_pdus_cnt;
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
stats->digest_err = 0;
stats->timeout_err = 0;
stats->custom_length = 1;
strcpy(stats->custom[0].desc, "eh_abort_cnt");
stats->custom[0].value = conn->eh_abort_cnt;
}
/**
* cxgb3i_parse_itt - get the idx and age bits from a given tag
* @conn: iscsi connection
* @itt: itt tag
* @idx: task index, filled in by this function
* @age: session age, filled in by this function
*/
static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
int *idx, int *age)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
struct cxgb3i_adapter *snic = cconn->hba->snic;
u32 tag = ntohl((__force u32) itt);
u32 sw_bits;
sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
if (idx)
*idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
if (age)
*age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
age ? *age : 0xFF);
}
/**
* cxgb3i_reserve_itt - generate tag for a give task
* Try to set up ddp for a scsi read task.
* @task: iscsi task
* @hdr_itt: tag, filled in by this function
*/
int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
{
struct scsi_cmnd *sc = task->sc;
struct iscsi_conn *conn = task->conn;
struct iscsi_session *sess = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
struct cxgb3i_adapter *snic = cconn->hba->snic;
struct cxgb3i_tag_format *tformat = &snic->tag_format;
u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
u32 tag;
int err = -EINVAL;
if (sc &&
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
cxgb3i_sw_tag_usable(tformat, sw_tag)) {
struct s3_conn *c3cn = cconn->cep->c3cn;
struct cxgb3i_gather_list *gl;
gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
scsi_in(sc)->table.sgl,
scsi_in(sc)->table.nents,
snic->pdev,
GFP_ATOMIC);
if (gl) {
tag = sw_tag;
err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
tformat, &tag,
gl, GFP_ATOMIC);
if (err < 0)
cxgb3i_ddp_release_gl(gl, snic->pdev);
}
}
if (err < 0)
tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
/* the itt need to sent in big-endian order */
*hdr_itt = (__force itt_t)htonl(tag);
cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
tag, *hdr_itt, task->itt, sess->age);
return 0;
}
/**
* cxgb3i_release_itt - release the tag for a given task
* if the tag is a ddp tag, release the ddp setup
* @task: iscsi task
* @hdr_itt: tag
*/
void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
{
struct scsi_cmnd *sc = task->sc;
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
struct cxgb3i_adapter *snic = cconn->hba->snic;
struct cxgb3i_tag_format *tformat = &snic->tag_format;
u32 tag = ntohl((__force u32)hdr_itt);
cxgb3i_tag_debug("release tag 0x%x.\n", tag);
if (sc &&
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
cxgb3i_is_ddp_tag(tformat, tag))
cxgb3i_ddp_tag_release(snic->tdev, tag);
}
/**
* cxgb3i_host_template -- Scsi_Host_Template structure
* used when registering with the scsi mid layer
*/
static struct scsi_host_template cxgb3i_host_template = {
.module = THIS_MODULE,
.name = "Chelsio S3xx iSCSI Initiator",
.proc_name = "cxgb3i",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1),
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING,
.this_id = -1,
};
static struct iscsi_transport cxgb3i_iscsi_transport = {
.owner = THIS_MODULE,
.name = "cxgb3i",
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
CAP_PADDING_OFFLOAD,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN |
ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN |
ISCSI_MAX_R2T |
ISCSI_IMM_DATA_EN |
ISCSI_FIRST_BURST |
ISCSI_MAX_BURST |
ISCSI_PDU_INORDER_EN |
ISCSI_DATASEQ_INORDER_EN |
ISCSI_ERL |
ISCSI_CONN_PORT |
ISCSI_CONN_ADDRESS |
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
ISCSI_TARGET_NAME | ISCSI_TPGT |
ISCSI_USERNAME | ISCSI_PASSWORD |
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
ISCSI_LU_RESET_TMO |
ISCSI_PING_TMO | ISCSI_RECV_TMO |
ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
.get_host_param = cxgb3i_host_get_param,
.set_host_param = cxgb3i_host_set_param,
/* session management */
.create_session = cxgb3i_session_create,
.destroy_session = cxgb3i_session_destroy,
.get_session_param = iscsi_session_get_param,
/* connection management */
.create_conn = cxgb3i_conn_create,
.bind_conn = cxgb3i_conn_bind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
.get_conn_param = cxgb3i_conn_get_param,
.set_param = cxgb3i_conn_set_param,
.get_stats = cxgb3i_conn_get_stats,
/* pdu xmit req. from user space */
.send_pdu = iscsi_conn_send_pdu,
/* task */
.init_task = iscsi_tcp_task_init,
.xmit_task = iscsi_tcp_task_xmit,
.cleanup_task = cxgb3i_conn_cleanup_task,
/* pdu */
.alloc_pdu = cxgb3i_conn_alloc_pdu,
.init_pdu = cxgb3i_conn_init_pdu,
.xmit_pdu = cxgb3i_conn_xmit_pdu,
.parse_pdu_itt = cxgb3i_parse_itt,
/* TCP connect/disconnect */
.ep_connect = cxgb3i_ep_connect,
.ep_poll = cxgb3i_ep_poll,
.ep_disconnect = cxgb3i_ep_disconnect,
/* Error recovery timeout call */
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
int cxgb3i_iscsi_init(void)
{
sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
ISCSI_ITT_MASK, sw_tag_idx_bits,
ISCSI_AGE_MASK, sw_tag_age_bits);
cxgb3i_scsi_transport =
iscsi_register_transport(&cxgb3i_iscsi_transport);
if (!cxgb3i_scsi_transport) {
cxgb3i_log_error("Could not register cxgb3i transport.\n");
return -ENODEV;
}
cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
return 0;
}
void cxgb3i_iscsi_cleanup(void)
{
if (cxgb3i_scsi_transport) {
cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
cxgb3i_scsi_transport);
iscsi_unregister_transport(&cxgb3i_iscsi_transport);
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,231 @@
/*
* cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
*
* Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*
* Written by: Dimitris Michailidis (dm@chelsio.com)
* Karen Xie (kxie@chelsio.com)
*/
#ifndef _CXGB3I_OFFLOAD_H
#define _CXGB3I_OFFLOAD_H
#include <linux/skbuff.h>
#include <net/tcp.h>
#include "common.h"
#include "adapter.h"
#include "t3cdev.h"
#include "cxgb3_offload.h"
#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
#define cxgb3i_log_debug(fmt, args...) \
printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
/**
* struct s3_conn - an iscsi tcp connection structure
*
* @dev: net device of with connection
* @cdev: adapter t3cdev for net device
* @flags: see c3cn_flags below
* @tid: connection id assigned by the h/w
* @qset: queue set used by connection
* @mss_idx: Maximum Segment Size table index
* @l2t: ARP resolution entry for offload packets
* @wr_max: maximum in-flight writes
* @wr_avail: number of writes available
* @wr_unacked: writes since last request for completion notification
* @wr_pending_head: head of pending write queue
* @wr_pending_tail: tail of pending write queue
* @cpl_close: skb for cpl_close_req
* @cpl_abort_req: skb for cpl_abort_req
* @cpl_abort_rpl: skb for cpl_abort_rpl
* @lock: connection status lock
* @refcnt: reference count on connection
* @state: connection state
* @saddr: source ip/port address
* @daddr: destination ip/port address
* @dst_cache: reference to destination route
* @receive_queue: received PDUs
* @write_queue: un-pushed pending writes
* @retry_timer: retry timer for various operations
* @err: connection error status
* @callback_lock: lock for opaque user context
* @user_data: opaque user context
* @rcv_nxt: next receive seq. #
* @copied_seq: head of yet unread data
* @rcv_wup: rcv_nxt on last window update sent
* @snd_nxt: next sequence we send
* @snd_una: first byte we want an ack for
* @write_seq: tail+1 of data held in send buffer
*/
struct s3_conn {
struct net_device *dev;
struct t3cdev *cdev;
unsigned long flags;
int tid;
int qset;
int mss_idx;
struct l2t_entry *l2t;
int wr_max;
int wr_avail;
int wr_unacked;
struct sk_buff *wr_pending_head;
struct sk_buff *wr_pending_tail;
struct sk_buff *cpl_close;
struct sk_buff *cpl_abort_req;
struct sk_buff *cpl_abort_rpl;
spinlock_t lock;
atomic_t refcnt;
volatile unsigned int state;
struct sockaddr_in saddr;
struct sockaddr_in daddr;
struct dst_entry *dst_cache;
struct sk_buff_head receive_queue;
struct sk_buff_head write_queue;
struct timer_list retry_timer;
int err;
rwlock_t callback_lock;
void *user_data;
u32 rcv_nxt;
u32 copied_seq;
u32 rcv_wup;
u32 snd_nxt;
u32 snd_una;
u32 write_seq;
};
/*
* connection state
*/
enum conn_states {
C3CN_STATE_CONNECTING = 1,
C3CN_STATE_ESTABLISHED,
C3CN_STATE_ACTIVE_CLOSE,
C3CN_STATE_PASSIVE_CLOSE,
C3CN_STATE_CLOSE_WAIT_1,
C3CN_STATE_CLOSE_WAIT_2,
C3CN_STATE_ABORTING,
C3CN_STATE_CLOSED,
};
static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
{
return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
}
static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
{
return c3cn->state == C3CN_STATE_ESTABLISHED;
}
/*
* Connection flags -- many to track some close related events.
*/
enum c3cn_flags {
C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
};
/**
* cxgb3i_sdev_data - Per adapter data.
* Linked off of each Ethernet device port on the adapter.
* Also available via the t3cdev structure since we have pointers to our port
* net_device's there ...
*
* @list: list head to link elements
* @cdev: t3cdev adapter
* @client: CPL client pointer
* @ports: array of adapter ports
* @sport_map_next: next index into the port map
* @sport_map: source port map
*/
struct cxgb3i_sdev_data {
struct list_head list;
struct t3cdev *cdev;
struct cxgb3_client *client;
struct adap_ports ports;
unsigned int sport_map_next;
unsigned long sport_map[0];
};
#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
void cxgb3i_sdev_cleanup(void);
int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
void cxgb3i_sdev_remove(struct t3cdev *);
struct s3_conn *cxgb3i_c3cn_create(void);
int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
void cxgb3i_c3cn_release(struct s3_conn *);
/**
* cxgb3_skb_cb - control block for received pdu state and ULP mode management.
*
* @flag: see C3CB_FLAG_* below
* @ulp_mode: ULP mode/submode of sk_buff
* @seq: tcp sequence number
* @ddigest: pdu data digest
* @pdulen: recovered pdu length
* @wr_data: scratch area for tx wr
*/
struct cxgb3_skb_cb {
__u8 flags;
__u8 ulp_mode;
__u32 seq;
__u32 ddigest;
__u32 pdulen;
struct sk_buff *wr_data;
};
#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest)
#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen)
#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data)
enum c3cb_flags {
C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
};
/**
* sge_opaque_hdr -
* Opaque version of structure the SGE stores at skb->head of TX_DATA packets
* and for which we must reserve space.
*/
struct sge_opaque_hdr {
void *dev;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
#define TX_HEADER_LEN \
(sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
/*
* get and set private ip for iscsi traffic
*/
#define cxgb3i_get_private_ipv4addr(ndev) \
(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
#define cxgb3i_set_private_ipv4addr(ndev, addr) \
(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
/* max. connections per adapter */
#define CXGB3I_MAX_CONN 16384
#endif /* _CXGB3_OFFLOAD_H */

Просмотреть файл

@ -0,0 +1,402 @@
/*
* cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
* Copyright (c) 2008 Mike Christie
* Copyright (c) 2008 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#include <linux/skbuff.h>
#include <linux/crypto.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include "cxgb3i.h"
#include "cxgb3i_pdu.h"
#ifdef __DEBUG_CXGB3I_RX__
#define cxgb3i_rx_debug cxgb3i_log_debug
#else
#define cxgb3i_rx_debug(fmt...)
#endif
#ifdef __DEBUG_CXGB3I_TX__
#define cxgb3i_tx_debug cxgb3i_log_debug
#else
#define cxgb3i_tx_debug(fmt...)
#endif
static struct page *pad_page;
/*
* pdu receive, interact with libiscsi_tcp
*/
static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
unsigned int offset, int offloaded)
{
int status = 0;
int bytes_read;
bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
switch (status) {
case ISCSI_TCP_CONN_ERR:
return -EIO;
case ISCSI_TCP_SUSPENDED:
/* no transfer - just have caller flush queue */
return bytes_read;
case ISCSI_TCP_SKB_DONE:
/*
* pdus should always fit in the skb and we should get
* segment done notifcation.
*/
iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
return -EFAULT;
case ISCSI_TCP_SEGMENT_DONE:
return bytes_read;
default:
iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
"status %d\n", status);
return -EINVAL;
}
}
static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
struct sk_buff *skb)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
bool offloaded = 0;
unsigned int offset;
int rc;
cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
conn, skb, skb->len, skb_ulp_mode(skb));
if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
return -EIO;
}
if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
return -EIO;
}
if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
return -EIO;
}
/* iscsi hdr */
rc = read_pdu_skb(conn, skb, 0, 0);
if (rc <= 0)
return rc;
if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
return 0;
offset = rc;
if (conn->hdrdgst_en)
offset += ISCSI_DIGEST_SIZE;
/* iscsi data */
if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
"itt 0x%x.\n",
skb,
tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
tcp_conn->in.datalen,
ntohl(tcp_conn->in.hdr->itt));
offloaded = 1;
} else {
cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
"itt 0x%x.\n",
skb,
tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
tcp_conn->in.datalen,
ntohl(tcp_conn->in.hdr->itt));
offset += sizeof(struct cpl_iscsi_hdr_norss);
}
rc = read_pdu_skb(conn, skb, offset, offloaded);
if (rc < 0)
return rc;
else
return 0;
}
/*
* pdu transmit, interact with libiscsi_tcp
*/
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
{
u8 submode = 0;
if (hcrc)
submode |= 1;
if (dcrc)
submode |= 2;
skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
}
void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
{
struct iscsi_tcp_task *tcp_task = task->dd_data;
/* never reached the xmit task callout */
if (tcp_task->dd_data)
kfree_skb(tcp_task->dd_data);
tcp_task->dd_data = NULL;
/* MNC - Do we need a check in case this is called but
* cxgb3i_conn_alloc_pdu has never been called on the task */
cxgb3i_release_itt(task, task->hdr_itt);
iscsi_tcp_cleanup_task(task);
}
/*
* We do not support ahs yet
*/
int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
{
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct sk_buff *skb;
task->hdr = NULL;
/* always allocate rooms for AHS */
skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE +
TX_HEADER_LEN, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
task, opcode, skb);
tcp_task->dd_data = skb;
skb_reserve(skb, TX_HEADER_LEN);
task->hdr = (struct iscsi_hdr *)skb->data;
task->hdr_max = sizeof(struct iscsi_hdr);
/* data_out uses scsi_cmd's itt */
if (opcode != ISCSI_OP_SCSI_DATA_OUT)
cxgb3i_reserve_itt(task, &task->hdr->itt);
return 0;
}
int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
unsigned int count)
{
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct sk_buff *skb = tcp_task->dd_data;
struct iscsi_conn *conn = task->conn;
struct page *pg;
unsigned int datalen = count;
int i, padlen = iscsi_padding(count);
skb_frag_t *frag;
cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
task, task->sc, offset, count, skb);
skb_put(skb, task->hdr_len);
tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
if (!count)
return 0;
if (task->sc) {
struct scatterlist *sg;
struct scsi_data_buffer *sdb;
unsigned int sgoffset = offset;
struct page *sgpg;
unsigned int sglen;
sdb = scsi_out(task->sc);
sg = sdb->table.sgl;
for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n",
i, sg_page(sg), sg->length, sg->offset);
if (sgoffset < sg->length)
break;
sgoffset -= sg->length;
}
sgpg = sg_page(sg);
sglen = sg->length - sgoffset;
do {
int j = skb_shinfo(skb)->nr_frags;
unsigned int copy;
if (!sglen) {
sg = sg_next(sg);
sgpg = sg_page(sg);
sgoffset = 0;
sglen = sg->length;
++i;
}
copy = min(sglen, datalen);
if (j && skb_can_coalesce(skb, j, sgpg,
sg->offset + sgoffset)) {
skb_shinfo(skb)->frags[j - 1].size += copy;
} else {
get_page(sgpg);
skb_fill_page_desc(skb, j, sgpg,
sg->offset + sgoffset, copy);
}
sgoffset += copy;
sglen -= copy;
datalen -= copy;
} while (datalen);
} else {
pg = virt_to_page(task->data);
while (datalen) {
i = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[i];
get_page(pg);
frag->page = pg;
frag->page_offset = 0;
frag->size = min((unsigned int)PAGE_SIZE, datalen);
skb_shinfo(skb)->nr_frags++;
datalen -= frag->size;
pg++;
}
}
if (padlen) {
i = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[i];
frag->page = pad_page;
frag->page_offset = 0;
frag->size = padlen;
skb_shinfo(skb)->nr_frags++;
}
datalen = count + padlen;
skb->data_len += datalen;
skb->truesize += datalen;
skb->len += datalen;
return 0;
}
int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
{
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct sk_buff *skb = tcp_task->dd_data;
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
unsigned int datalen;
int err;
if (!skb)
return 0;
datalen = skb->data_len;
tcp_task->dd_data = NULL;
err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
task, skb, skb->len, skb->data_len, err);
if (err > 0) {
int pdulen = err;
if (task->conn->hdrdgst_en)
pdulen += ISCSI_DIGEST_SIZE;
if (datalen && task->conn->datadgst_en)
pdulen += ISCSI_DIGEST_SIZE;
task->conn->txdata_octets += pdulen;
return 0;
}
if (err < 0 && err != -EAGAIN) {
kfree_skb(skb);
cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
}
/* reset skb to send when we are called again */
tcp_task->dd_data = skb;
return -EAGAIN;
}
int cxgb3i_pdu_init(void)
{
pad_page = alloc_page(GFP_KERNEL);
if (!pad_page)
return -ENOMEM;
memset(page_address(pad_page), 0, PAGE_SIZE);
return 0;
}
void cxgb3i_pdu_cleanup(void)
{
if (pad_page) {
__free_page(pad_page);
pad_page = NULL;
}
}
void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
{
struct sk_buff *skb;
unsigned int read = 0;
struct iscsi_conn *conn = c3cn->user_data;
int err = 0;
cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
read_lock(&c3cn->callback_lock);
if (unlikely(!conn || conn->suspend_rx)) {
cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
conn, conn ? conn->id : 0xFF,
conn ? conn->suspend_rx : 0xFF);
read_unlock(&c3cn->callback_lock);
return;
}
skb = skb_peek(&c3cn->receive_queue);
while (!err && skb) {
__skb_unlink(skb, &c3cn->receive_queue);
read += skb_ulp_pdulen(skb);
err = cxgb3i_conn_read_pdu_skb(conn, skb);
__kfree_skb(skb);
skb = skb_peek(&c3cn->receive_queue);
}
read_unlock(&c3cn->callback_lock);
if (c3cn) {
c3cn->copied_seq += read;
cxgb3i_c3cn_rx_credits(c3cn, read);
}
conn->rxdata_octets += read;
}
void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
{
struct iscsi_conn *conn = c3cn->user_data;
cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
if (conn) {
cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
}
void cxgb3i_conn_closing(struct s3_conn *c3cn)
{
struct iscsi_conn *conn;
read_lock(&c3cn->callback_lock);
conn = c3cn->user_data;
if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
read_unlock(&c3cn->callback_lock);
}

Просмотреть файл

@ -0,0 +1,59 @@
/*
* cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
*
* Copyright (c) 2008 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
#ifndef __CXGB3I_ULP2_PDU_H__
#define __CXGB3I_ULP2_PDU_H__
struct cpl_iscsi_hdr_norss {
union opcode_tid ot;
u16 pdu_len_ddp;
u16 len;
u32 seq;
u16 urg;
u8 rsvd;
u8 status;
};
struct cpl_rx_data_ddp_norss {
union opcode_tid ot;
u16 urg;
u16 len;
u32 seq;
u32 nxt_seq;
u32 ulp_crc;
u32 ddp_status;
};
#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */
#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */
#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */
#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */
#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */
#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */
#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */
#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */
#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */
#define ULP2_FLAG_DATA_READY 0x1
#define ULP2_FLAG_DATA_DDPED 0x2
#define ULP2_FLAG_HCRC_ERROR 0x10
#define ULP2_FLAG_DCRC_ERROR 0x20
#define ULP2_FLAG_PAD_ERROR 0x40
void cxgb3i_conn_closing(struct s3_conn *);
void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
#endif

Просмотреть файл

@ -24,6 +24,7 @@
#include <scsi/scsi_dh.h>
#define RDAC_NAME "rdac"
#define RDAC_RETRY_COUNT 5
/*
* LSI mode page stuff
@ -386,6 +387,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
struct c9_inquiry *inqp;
h->lun_state = RDAC_LUN_UNOWNED;
h->state = RDAC_STATE_ACTIVE;
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c9;
@ -477,21 +479,27 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
int err = SCSI_DH_RES_TEMP_UNAVAIL;
int err, retry_cnt = RDAC_RETRY_COUNT;
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = rdac_failover_get(sdev, h);
if (!rq)
goto done;
sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
err = blk_execute_rq(q, NULL, rq, 1);
if (err != SCSI_DH_OK)
blk_put_request(rq);
if (err != SCSI_DH_OK) {
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
}
if (err == SCSI_DH_OK)
h->state = RDAC_STATE_ACTIVE;
blk_put_request(rq);
done:
return err;
}
@ -594,6 +602,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"SUN", "LCSM100_F"},
{"DELL", "MD3000"},
{"DELL", "MD3000i"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{NULL, NULL},
};

Просмотреть файл

@ -1626,8 +1626,15 @@ static void map_dma(unsigned int i, struct hostdata *ha)
cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
count = scsi_dma_map(SCpnt);
BUG_ON(count < 0);
if (!scsi_sg_count(SCpnt)) {
cpp->data_len = 0;
return;
}
count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
pci_dir);
BUG_ON(!count);
scsi_for_each_sg(SCpnt, sg, count, k) {
cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
@ -1655,7 +1662,9 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
scsi_dma_unmap(SCpnt);
if (scsi_sg_count(SCpnt))
pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
pci_dir);
if (!DEV2H(cpp->data_len))
pci_dir = PCI_DMA_BIDIRECTIONAL;

Просмотреть файл

@ -14,8 +14,8 @@
* neuffer@goofy.zdv.uni-mainz.de *
* a.arnold@kfa-juelich.de *
* *
* Updated 2002 by Alan Cox <alan@redhat.com> for Linux *
* 2.5.x and the newer locking and error handling *
* Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for *
* Linux 2.5.x and the newer locking and error handling *
* *
* This program is free software; you can redistribute it *
* and/or modify it under the terms of the GNU General *

Просмотреть файл

@ -1453,7 +1453,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
offset = 0;
if (offset) {
int rounded_up, one_clock;
int one_clock;
if (period > esp->max_period) {
period = offset = 0;
@ -1463,9 +1463,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
goto do_reject;
one_clock = esp->ccycle / 1000;
rounded_up = (period << 2);
rounded_up = (rounded_up + one_clock - 1) / one_clock;
stp = rounded_up;
stp = DIV_ROUND_UP(period << 2, one_clock);
if (stp && esp->rev >= FAS236) {
if (stp >= 50)
stp--;

Просмотреть файл

@ -0,0 +1,8 @@
# $Id: Makefile
obj-$(CONFIG_FCOE) += fcoe.o
fcoe-y := \
libfcoe.o \
fcoe_sw.o \
fc_transport_fcoe.o

Просмотреть файл

@ -0,0 +1,446 @@
/*
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
#include <linux/pci.h>
#include <scsi/libfcoe.h>
#include <scsi/fc_transport_fcoe.h>
/* internal fcoe transport */
struct fcoe_transport_internal {
struct fcoe_transport *t;
struct net_device *netdev;
struct list_head list;
};
/* fcoe transports list and its lock */
static LIST_HEAD(fcoe_transports);
static DEFINE_MUTEX(fcoe_transports_lock);
/**
* fcoe_transport_default - returns ptr to the default transport fcoe_sw
**/
struct fcoe_transport *fcoe_transport_default(void)
{
return &fcoe_sw_transport;
}
/**
* fcoe_transport_to_pcidev - get the pci dev from a netdev
* @netdev: the netdev that pci dev will be retrived from
*
* Returns: NULL or the corrsponding pci_dev
**/
struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
{
if (!netdev->dev.parent)
return NULL;
return to_pci_dev(netdev->dev.parent);
}
/**
* fcoe_transport_device_lookup - find out netdev is managed by the
* transport
* assign a transport to a device
* @netdev: the netdev the transport to be attached to
*
* This will look for existing offload driver, if not found, it falls back to
* the default sw hba (fcoe_sw) as its fcoe transport.
*
* Returns: 0 for success
**/
static struct fcoe_transport_internal *fcoe_transport_device_lookup(
struct fcoe_transport *t, struct net_device *netdev)
{
struct fcoe_transport_internal *ti;
/* assign the transpor to this device */
mutex_lock(&t->devlock);
list_for_each_entry(ti, &t->devlist, list) {
if (ti->netdev == netdev) {
mutex_unlock(&t->devlock);
return ti;
}
}
mutex_unlock(&t->devlock);
return NULL;
}
/**
* fcoe_transport_device_add - assign a transport to a device
* @netdev: the netdev the transport to be attached to
*
* This will look for existing offload driver, if not found, it falls back to
* the default sw hba (fcoe_sw) as its fcoe transport.
*
* Returns: 0 for success
**/
static int fcoe_transport_device_add(struct fcoe_transport *t,
struct net_device *netdev)
{
struct fcoe_transport_internal *ti;
ti = fcoe_transport_device_lookup(t, netdev);
if (ti) {
printk(KERN_DEBUG "fcoe_transport_device_add:"
"device %s is already added to transport %s\n",
netdev->name, t->name);
return -EEXIST;
}
/* allocate an internal struct to host the netdev and the list */
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return -ENOMEM;
ti->t = t;
ti->netdev = netdev;
INIT_LIST_HEAD(&ti->list);
dev_hold(ti->netdev);
mutex_lock(&t->devlock);
list_add(&ti->list, &t->devlist);
mutex_unlock(&t->devlock);
printk(KERN_DEBUG "fcoe_transport_device_add:"
"device %s added to transport %s\n",
netdev->name, t->name);
return 0;
}
/**
* fcoe_transport_device_remove - remove a device from its transport
* @netdev: the netdev the transport to be attached to
*
* this removes the device from the transport so the given transport will
* not manage this device any more
*
* Returns: 0 for success
**/
static int fcoe_transport_device_remove(struct fcoe_transport *t,
struct net_device *netdev)
{
struct fcoe_transport_internal *ti;
ti = fcoe_transport_device_lookup(t, netdev);
if (!ti) {
printk(KERN_DEBUG "fcoe_transport_device_remove:"
"device %s is not managed by transport %s\n",
netdev->name, t->name);
return -ENODEV;
}
mutex_lock(&t->devlock);
list_del(&ti->list);
mutex_unlock(&t->devlock);
printk(KERN_DEBUG "fcoe_transport_device_remove:"
"device %s removed from transport %s\n",
netdev->name, t->name);
dev_put(ti->netdev);
kfree(ti);
return 0;
}
/**
* fcoe_transport_device_remove_all - remove all from transport devlist
*
* this removes the device from the transport so the given transport will
* not manage this device any more
*
* Returns: 0 for success
**/
static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
{
struct fcoe_transport_internal *ti, *tmp;
mutex_lock(&t->devlock);
list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
list_del(&ti->list);
kfree(ti);
}
mutex_unlock(&t->devlock);
}
/**
* fcoe_transport_match - use the bus device match function to match the hw
* @t: the fcoe transport
* @netdev:
*
* This function is used to check if the givne transport wants to manage the
* input netdev. if the transports implements the match function, it will be
* called, o.w. we just compare the pci vendor and device id.
*
* Returns: true for match up
**/
static bool fcoe_transport_match(struct fcoe_transport *t,
struct net_device *netdev)
{
/* match transport by vendor and device id */
struct pci_dev *pci;
pci = fcoe_transport_pcidev(netdev);
if (pci) {
printk(KERN_DEBUG "fcoe_transport_match:"
"%s:%x:%x -- %s:%x:%x\n",
t->name, t->vendor, t->device,
netdev->name, pci->vendor, pci->device);
/* if transport supports match */
if (t->match)
return t->match(netdev);
/* else just compare the vendor and device id: pci only */
return (t->vendor == pci->vendor) && (t->device == pci->device);
}
return false;
}
/**
* fcoe_transport_lookup - check if the transport is already registered
* @t: the transport to be looked up
*
* This compares the parent device (pci) vendor and device id
*
* Returns: NULL if not found
*
* TODO - return default sw transport if no other transport is found
**/
static struct fcoe_transport *fcoe_transport_lookup(
struct net_device *netdev)
{
struct fcoe_transport *t;
mutex_lock(&fcoe_transports_lock);
list_for_each_entry(t, &fcoe_transports, list) {
if (fcoe_transport_match(t, netdev)) {
mutex_unlock(&fcoe_transports_lock);
return t;
}
}
mutex_unlock(&fcoe_transports_lock);
printk(KERN_DEBUG "fcoe_transport_lookup:"
"use default transport for %s\n", netdev->name);
return fcoe_transport_default();
}
/**
* fcoe_transport_register - adds a fcoe transport to the fcoe transports list
* @t: ptr to the fcoe transport to be added
*
* Returns: 0 for success
**/
int fcoe_transport_register(struct fcoe_transport *t)
{
struct fcoe_transport *tt;
/* TODO - add fcoe_transport specific initialization here */
mutex_lock(&fcoe_transports_lock);
list_for_each_entry(tt, &fcoe_transports, list) {
if (tt == t) {
mutex_unlock(&fcoe_transports_lock);
return -EEXIST;
}
}
list_add_tail(&t->list, &fcoe_transports);
mutex_unlock(&fcoe_transports_lock);
mutex_init(&t->devlock);
INIT_LIST_HEAD(&t->devlist);
printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_transport_register);
/**
* fcoe_transport_unregister - remove the tranport fro the fcoe transports list
* @t: ptr to the fcoe transport to be removed
*
* Returns: 0 for success
**/
int fcoe_transport_unregister(struct fcoe_transport *t)
{
struct fcoe_transport *tt, *tmp;
mutex_lock(&fcoe_transports_lock);
list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
if (tt == t) {
list_del(&t->list);
mutex_unlock(&fcoe_transports_lock);
fcoe_transport_device_remove_all(t);
printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
t->name);
return 0;
}
}
mutex_unlock(&fcoe_transports_lock);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
/*
* fcoe_load_transport_driver - load an offload driver by alias name
* @netdev: the target net device
*
* Requests for an offload driver module as the fcoe transport, if fails, it
* falls back to use the SW HBA (fcoe_sw) as its transport
*
* TODO -
* 1. supports only PCI device
* 2. needs fix for VLAn and bonding
* 3. pure hw fcoe hba may not have netdev
*
* Returns: 0 for success
**/
int fcoe_load_transport_driver(struct net_device *netdev)
{
struct pci_dev *pci;
struct device *dev = netdev->dev.parent;
if (fcoe_transport_lookup(netdev)) {
/* load default transport */
printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
netdev->name);
return -EEXIST;
}
pci = to_pci_dev(dev);
if (dev->bus != &pci_bus_type) {
printk(KERN_DEBUG "fcoe: support noly PCI device\n");
return -ENODEV;
}
printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
pci->vendor, pci->device);
return request_module("fcoe-pci-0x%04x-0x%04x",
pci->vendor, pci->device);
}
EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
/**
* fcoe_transport_attach - load transport to fcoe
* @netdev: the netdev the transport to be attached to
*
* This will look for existing offload driver, if not found, it falls back to
* the default sw hba (fcoe_sw) as its fcoe transport.
*
* Returns: 0 for success
**/
int fcoe_transport_attach(struct net_device *netdev)
{
struct fcoe_transport *t;
/* find the corresponding transport */
t = fcoe_transport_lookup(netdev);
if (!t) {
printk(KERN_DEBUG "fcoe_transport_attach"
":no transport for %s:use %s\n",
netdev->name, t->name);
return -ENODEV;
}
/* add to the transport */
if (fcoe_transport_device_add(t, netdev)) {
printk(KERN_DEBUG "fcoe_transport_attach"
":failed to add %s to tramsport %s\n",
netdev->name, t->name);
return -EIO;
}
/* transport create function */
if (t->create)
t->create(netdev);
printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
t->name, netdev->name);
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_transport_attach);
/**
* fcoe_transport_release - unload transport from fcoe
* @netdev: the net device on which fcoe is to be released
*
* Returns: 0 for success
**/
int fcoe_transport_release(struct net_device *netdev)
{
struct fcoe_transport *t;
/* find the corresponding transport */
t = fcoe_transport_lookup(netdev);
if (!t) {
printk(KERN_DEBUG "fcoe_transport_release:"
"no transport for %s:use %s\n",
netdev->name, t->name);
return -ENODEV;
}
/* remove the device from the transport */
if (fcoe_transport_device_remove(t, netdev)) {
printk(KERN_DEBUG "fcoe_transport_release:"
"failed to add %s to tramsport %s\n",
netdev->name, t->name);
return -EIO;
}
/* transport destroy function */
if (t->destroy)
t->destroy(netdev);
printk(KERN_DEBUG "fcoe_transport_release:"
"device %s dettached from transport %s\n",
netdev->name, t->name);
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_transport_release);
/**
* fcoe_transport_init - initializes fcoe transport layer
*
* This prepares for the fcoe transport layer
*
* Returns: none
**/
int __init fcoe_transport_init(void)
{
INIT_LIST_HEAD(&fcoe_transports);
mutex_init(&fcoe_transports_lock);
return 0;
}
/**
* fcoe_transport_exit - cleans up the fcoe transport layer
* This cleans up the fcoe transport layer. removing any transport on the list,
* note that the transport destroy func is not called here.
*
* Returns: none
**/
int __exit fcoe_transport_exit(void)
{
struct fcoe_transport *t, *tmp;
mutex_lock(&fcoe_transports_lock);
list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
list_del(&t->list);
mutex_unlock(&fcoe_transports_lock);
fcoe_transport_device_remove_all(t);
mutex_lock(&fcoe_transports_lock);
}
mutex_unlock(&fcoe_transports_lock);
return 0;
}

494
drivers/scsi/fcoe/fcoe_sw.c Normal file
Просмотреть файл

@ -0,0 +1,494 @@
/*
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <net/rtnetlink.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_encaps.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
#include <scsi/fc_transport_fcoe.h>
#define FCOE_SW_VERSION "0.1"
#define FCOE_SW_NAME "fcoesw"
#define FCOE_SW_VENDOR "Open-FCoE.org"
#define FCOE_MAX_LUN 255
#define FCOE_MAX_FCP_TARGET 256
#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
static struct scsi_transport_template *scsi_transport_fcoe_sw;
struct fc_function_template fcoe_sw_transport_function = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
.get_host_port_state = fc_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_host_fabric_name = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.show_starget_port_id = 1,
.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_fc_host_stats = fc_get_host_stats,
.issue_fc_host_lip = fcoe_reset,
.terminate_rport_io = fc_rport_terminate_io,
};
static struct scsi_host_template fcoe_sw_shost_template = {
.module = THIS_MODULE,
.name = "FCoE Driver",
.proc_name = FCOE_SW_NAME,
.queuecommand = fc_queuecommand,
.eh_abort_handler = fc_eh_abort,
.eh_device_reset_handler = fc_eh_device_reset,
.eh_host_reset_handler = fc_eh_host_reset,
.slave_alloc = fc_slave_alloc,
.change_queue_depth = fc_change_queue_depth,
.change_queue_type = fc_change_queue_type,
.this_id = -1,
.cmd_per_lun = 32,
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
};
/*
* fcoe_sw_lport_config - sets up the fc_lport
* @lp: ptr to the fc_lport
* @shost: ptr to the parent scsi host
*
* Returns: 0 for success
*
*/
static int fcoe_sw_lport_config(struct fc_lport *lp)
{
int i = 0;
lp->link_status = 0;
lp->max_retry_count = 3;
lp->e_d_tov = 2 * 1000; /* FC-FS default */
lp->r_a_tov = 2 * 2 * 1000;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
/*
* allocate per cpu stats block
*/
for_each_online_cpu(i)
lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
GFP_KERNEL);
/* lport fc_lport related configuration */
fc_lport_config(lp);
return 0;
}
/*
* fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
* related properties
* @lp : ptr to the fc_lport
* @netdev : ptr to the associated netdevice struct
*
* Must be called after fcoe_sw_lport_config() as it will use lport mutex
*
* Returns : 0 for success
*
*/
static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
{
u32 mfs;
u64 wwnn, wwpn;
struct fcoe_softc *fc;
u8 flogi_maddr[ETH_ALEN];
/* Setup lport private data to point to fcoe softc */
fc = lport_priv(lp);
fc->lp = lp;
fc->real_dev = netdev;
fc->phys_dev = netdev;
/* Require support for get_pauseparam ethtool op. */
if (netdev->priv_flags & IFF_802_1Q_VLAN)
fc->phys_dev = vlan_dev_real_dev(netdev);
/* Do not support for bonding device */
if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
(fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
(fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
return -EOPNOTSUPP;
}
/*
* Determine max frame size based on underlying device and optional
* user-configured limit. If the MFS is too low, fcoe_link_ok()
* will return 0, so do this first.
*/
mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
sizeof(struct fcoe_crc_eof));
if (fc_set_mfs(lp, mfs))
return -EINVAL;
lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
if (!fcoe_link_ok(lp))
lp->link_status |= FC_LINK_UP;
/* offload features support */
if (fc->real_dev->features & NETIF_F_SG)
lp->sg_supp = 1;
skb_queue_head_init(&fc->fcoe_pending_queue);
/* setup Source Mac Address */
memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
fc->real_dev->addr_len);
wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
fc_set_wwnn(lp, wwnn);
/* XXX - 3rd arg needs to be vlan id */
wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
fc_set_wwpn(lp, wwpn);
/*
* Add FCoE MAC address as second unicast MAC address
* or enter promiscuous mode if not capable of listening
* for multiple unicast MACs.
*/
rtnl_lock();
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
rtnl_unlock();
/*
* setup the receive function from ethernet driver
* on the ethertype for the given device
*/
fc->fcoe_packet_type.func = fcoe_rcv;
fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
fc->fcoe_packet_type.dev = fc->real_dev;
dev_add_pack(&fc->fcoe_packet_type);
return 0;
}
/*
* fcoe_sw_shost_config - sets up fc_lport->host
* @lp : ptr to the fc_lport
* @shost : ptr to the associated scsi host
* @dev : device associated to scsi host
*
* Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
*
* Returns : 0 for success
*
*/
static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
struct device *dev)
{
int rc = 0;
/* lport scsi host config */
lp->host = shost;
lp->host->max_lun = FCOE_MAX_LUN;
lp->host->max_id = FCOE_MAX_FCP_TARGET;
lp->host->max_channel = 0;
lp->host->transportt = scsi_transport_fcoe_sw;
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lp->host, dev);
if (rc) {
FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
return rc;
}
sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
FCOE_SW_NAME, FCOE_SW_VERSION,
fcoe_netdev(lp)->name);
return 0;
}
/*
* fcoe_sw_em_config - allocates em for this lport
* @lp: the port that em is to allocated for
*
* Returns : 0 on success
*/
static inline int fcoe_sw_em_config(struct fc_lport *lp)
{
BUG_ON(lp->emp);
lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
FCOE_MIN_XID, FCOE_MAX_XID);
if (!lp->emp)
return -ENOMEM;
return 0;
}
/*
* fcoe_sw_destroy - FCoE software HBA tear-down function
* @netdev: ptr to the associated net_device
*
* Returns: 0 if link is OK for use by FCoE.
*/
static int fcoe_sw_destroy(struct net_device *netdev)
{
int cpu;
struct fc_lport *lp = NULL;
struct fcoe_softc *fc;
u8 flogi_maddr[ETH_ALEN];
BUG_ON(!netdev);
printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
netdev->name);
lp = fcoe_hostlist_lookup(netdev);
if (!lp)
return -ENODEV;
fc = fcoe_softc(lp);
/* Logout of the fabric */
fc_fabric_logoff(lp);
/* Remove the instance from fcoe's list */
fcoe_hostlist_remove(lp);
/* Don't listen for Ethernet packets anymore */
dev_remove_pack(&fc->fcoe_packet_type);
/* Cleanup the fc_lport */
fc_lport_destroy(lp);
fc_fcp_destroy(lp);
/* Detach from the scsi-ml */
fc_remove_host(lp->host);
scsi_remove_host(lp->host);
/* There are no more rports or I/O, free the EM */
if (lp->emp)
fc_exch_mgr_free(lp->emp);
/* Delete secondary MAC addresses */
rtnl_lock();
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
rtnl_unlock();
/* Free the per-CPU revieve threads */
fcoe_percpu_clean(lp);
/* Free existing skbs */
fcoe_clean_pending_queue(lp);
/* Free memory used by statistical counters */
for_each_online_cpu(cpu)
kfree(lp->dev_stats[cpu]);
/* Release the net_device and Scsi_Host */
dev_put(fc->real_dev);
scsi_host_put(lp->host);
return 0;
}
static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
};
/*
* fcoe_sw_create - this function creates the fcoe interface
* @netdev: pointer the associated netdevice
*
* Creates fc_lport struct and scsi_host for lport, configures lport
* and starts fabric login.
*
* Returns : 0 on success
*/
static int fcoe_sw_create(struct net_device *netdev)
{
int rc;
struct fc_lport *lp = NULL;
struct fcoe_softc *fc;
struct Scsi_Host *shost;
BUG_ON(!netdev);
printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
netdev->name);
lp = fcoe_hostlist_lookup(netdev);
if (lp)
return -EEXIST;
shost = fcoe_host_alloc(&fcoe_sw_shost_template,
sizeof(struct fcoe_softc));
if (!shost) {
FC_DBG("Could not allocate host structure\n");
return -ENOMEM;
}
lp = shost_priv(shost);
fc = lport_priv(lp);
/* configure fc_lport, e.g., em */
rc = fcoe_sw_lport_config(lp);
if (rc) {
FC_DBG("Could not configure lport\n");
goto out_host_put;
}
/* configure lport network properties */
rc = fcoe_sw_netdev_config(lp, netdev);
if (rc) {
FC_DBG("Could not configure netdev for lport\n");
goto out_host_put;
}
/* configure lport scsi host properties */
rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
if (rc) {
FC_DBG("Could not configure shost for lport\n");
goto out_host_put;
}
/* lport exch manager allocation */
rc = fcoe_sw_em_config(lp);
if (rc) {
FC_DBG("Could not configure em for lport\n");
goto out_host_put;
}
/* Initialize the library */
rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
if (rc) {
FC_DBG("Could not configure libfc for lport!\n");
goto out_lp_destroy;
}
/* add to lports list */
fcoe_hostlist_add(lp);
lp->boot_time = jiffies;
fc_fabric_login(lp);
dev_hold(netdev);
return rc;
out_lp_destroy:
fc_exch_mgr_free(lp->emp); /* Free the EM */
out_host_put:
scsi_host_put(lp->host);
return rc;
}
/*
* fcoe_sw_match - the fcoe sw transport match function
*
* Returns : false always
*/
static bool fcoe_sw_match(struct net_device *netdev)
{
/* FIXME - for sw transport, always return false */
return false;
}
/* the sw hba fcoe transport */
struct fcoe_transport fcoe_sw_transport = {
.name = "fcoesw",
.create = fcoe_sw_create,
.destroy = fcoe_sw_destroy,
.match = fcoe_sw_match,
.vendor = 0x0,
.device = 0xffff,
};
/*
* fcoe_sw_init - registers fcoe_sw_transport
*
* Returns : 0 on success
*/
int __init fcoe_sw_init(void)
{
/* attach to scsi transport */
scsi_transport_fcoe_sw =
fc_attach_transport(&fcoe_sw_transport_function);
if (!scsi_transport_fcoe_sw) {
printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
return -ENODEV;
}
/* register sw transport */
fcoe_transport_register(&fcoe_sw_transport);
return 0;
}
/*
* fcoe_sw_exit - unregisters fcoe_sw_transport
*
* Returns : 0 on success
*/
int __exit fcoe_sw_exit(void)
{
/* dettach the transport */
fc_release_transport(scsi_transport_fcoe_sw);
fcoe_transport_unregister(&fcoe_sw_transport);
return 0;
}

1510
drivers/scsi/fcoe/libfcoe.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -3,7 +3,7 @@
* Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
* Author: Rickard E. Faith, faith@cs.unc.edu
* Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
* Shared IRQ supported added 7/7/2001 Alan Cox <alan@redhat.com>
* Shared IRQ supported added 7/7/2001 Alan Cox <alan@lxorguk.ukuu.org.uk>
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

Просмотреть файл

@ -10,7 +10,7 @@
See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
updates, info and ADF-files for adapters supported by this driver.
Alan Cox <alan@redhat.com>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Updated for Linux 2.5.45 to use the new error handler, cleaned up the
lock macros and did a few unavoidable locking tweaks, plus one locking
fix in the irq and completion path.

Просмотреть файл

@ -121,6 +121,7 @@ static const struct {
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
@ -278,13 +279,6 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
rsp->data.info.rsp_code))
return DID_ERROR << 16;
if (!vfc_cmd->status) {
if (rsp->flags & FCP_RESID_OVER)
return rsp->scsi_status | (DID_ERROR << 16);
else
return rsp->scsi_status | (DID_OK << 16);
}
err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
if (err >= 0)
return rsp->scsi_status | (cmd_status[err].result << 16);
@ -503,6 +497,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_NONE:
default:
@ -566,7 +561,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
struct ibmvfc_target *tgt;
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
dev_err(vhost->dev,
"Host initialization retries exceeded. Taking adapter offline\n");
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
@ -765,6 +760,9 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd);
}
if (evt->eh_comp)
complete(evt->eh_comp);
ibmvfc_free_event(evt);
}
@ -847,11 +845,12 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
{
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
vhost->delay_init = 1;
if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
dev_err(vhost->dev,
"Host initialization retries exceeded. Taking adapter offline\n");
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
} else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
__ibmvfc_reset_host(vhost);
else
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@ -1252,6 +1251,7 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
evt->sync_iu = NULL;
evt->crq.format = format;
evt->done = done;
evt->eh_comp = NULL;
}
/**
@ -1381,6 +1381,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
add_timer(&evt->timer);
}
mb();
if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
list_del(&evt->queue);
del_timer(&evt->timer);
@ -1477,6 +1479,11 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
ibmvfc_reinit_host(evt->vhost);
if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
cmnd->result = (DID_ERROR << 16);
ibmvfc_log_error(evt);
}
@ -1489,6 +1496,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd);
}
if (evt->eh_comp)
complete(evt->eh_comp);
ibmvfc_free_event(evt);
}
@ -1627,7 +1637,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt;
struct ibmvfc_event *evt = NULL;
union ibmvfc_iu rsp_iu;
struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
int rsp_rc = -EBUSY;
@ -1789,7 +1799,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
struct ibmvfc_tmf *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
@ -1827,7 +1838,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
int_to_scsilun(sdev->lun, &tmf->lun);
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
tmf->my_cancel_key = (unsigned long)starget->hostdata;
evt->sync_iu = &rsp;
init_completion(&evt->comp);
@ -1858,6 +1869,91 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
return 0;
}
/**
* ibmvfc_match_target - Match function for specified target
* @evt: ibmvfc event struct
* @device: device to match (starget)
*
* Returns:
* 1 if event matches starget / 0 if event does not match starget
**/
static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
{
if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
return 1;
return 0;
}
/**
* ibmvfc_match_lun - Match function for specified LUN
* @evt: ibmvfc event struct
* @device: device to match (sdev)
*
* Returns:
* 1 if event matches sdev / 0 if event does not match sdev
**/
static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
{
if (evt->cmnd && evt->cmnd->device == device)
return 1;
return 0;
}
/**
* ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct
* @device: device to match (starget or sdev)
* @match: match function
*
* Returns:
* SUCCESS / FAILED
**/
static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
int (*match) (struct ibmvfc_event *, void *))
{
struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp);
int wait;
unsigned long flags;
signed long timeout = init_timeout * HZ;
ENTER;
do {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(evt, &vhost->sent, queue) {
if (match(evt, device)) {
evt->eh_comp = &comp;
wait++;
}
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(evt, &vhost->sent, queue) {
if (match(evt, device)) {
evt->eh_comp = NULL;
wait++;
}
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE;
return wait ? FAILED : SUCCESS;
}
}
} while (wait);
LEAVE;
return SUCCESS;
}
/**
* ibmvfc_eh_abort_handler - Abort a command
* @cmd: scsi command to abort
@ -1867,29 +1963,21 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
**/
static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
struct ibmvfc_event *evt, *pos;
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, abort_rc;
unsigned long flags;
int rc = FAILED;
ENTER;
ibmvfc_wait_while_resetting(vhost);
cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
abort_rc = ibmvfc_abort_task_set(cmd->device);
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
abort_rc = ibmvfc_abort_task_set(sdev);
if (!cancel_rc && !abort_rc) {
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == cmd->device)
ibmvfc_fail_request(evt, DID_ABORT);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
return SUCCESS;
}
if (!cancel_rc && !abort_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
LEAVE;
return FAILED;
return rc;
}
/**
@ -1901,29 +1989,21 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
**/
static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
struct ibmvfc_event *evt, *pos;
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, reset_rc;
unsigned long flags;
int rc = FAILED;
ENTER;
ibmvfc_wait_while_resetting(vhost);
cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
if (!cancel_rc && !reset_rc) {
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == cmd->device)
ibmvfc_fail_request(evt, DID_ABORT);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
return SUCCESS;
}
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
LEAVE;
return FAILED;
return rc;
}
/**
@ -1959,31 +2039,23 @@ static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
struct scsi_target *starget = scsi_target(cmd->device);
struct ibmvfc_event *evt, *pos;
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
int reset_rc;
int rc = FAILED;
unsigned long cancel_rc = 0;
unsigned long flags;
ENTER;
ibmvfc_wait_while_resetting(vhost);
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
if (!cancel_rc && !reset_rc) {
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
ibmvfc_fail_request(evt, DID_ABORT);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
return SUCCESS;
}
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
LEAVE;
return FAILED;
return rc;
}
/**
@ -2013,23 +2085,18 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
struct scsi_target *starget = to_scsi_target(&rport->dev);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
struct ibmvfc_event *evt, *pos;
unsigned long cancel_rc = 0;
unsigned long abort_rc = 0;
unsigned long flags;
int rc = FAILED;
ENTER;
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
if (!cancel_rc && !abort_rc) {
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
ibmvfc_fail_request(evt, DID_ABORT);
}
spin_unlock_irqrestore(shost->host_lock, flags);
} else
if (!cancel_rc && !abort_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
LEAVE;
}
@ -2089,15 +2156,17 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_LINK_UP:
case IBMVFC_AE_RESUME:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
ibmvfc_init_host(vhost, 1);
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
case IBMVFC_AE_SCN_FABRIC:
case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
ibmvfc_init_host(vhost, 1);
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
case IBMVFC_AE_ELS_LOGO:
case IBMVFC_AE_ELS_PRLO:
@ -2262,6 +2331,28 @@ static int ibmvfc_slave_alloc(struct scsi_device *sdev)
return 0;
}
/**
* ibmvfc_target_alloc - Setup the target's task set value
* @starget: struct scsi_target
*
* Set the target's task set value so that error handling works as
* expected.
*
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
static int ibmvfc_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
starget->hostdata = (void *)(unsigned long)vhost->task_set++;
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
/**
* ibmvfc_slave_configure - Configure the device
* @sdev: struct scsi_device device to configure
@ -2541,6 +2632,7 @@ static struct scsi_host_template driver_template = {
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
.slave_alloc = ibmvfc_slave_alloc,
.slave_configure = ibmvfc_slave_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
.change_queue_type = ibmvfc_change_queue_type,
@ -2637,7 +2729,7 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
} else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
crq->valid = 0;
async->valid = 0;
} else
done = 1;
}
@ -2669,7 +2761,7 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
wake_up(&tgt->vhost->work_wait_q);
} else
@ -2708,6 +2800,8 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
rsp->status, rsp->error, status);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@ -2802,6 +2896,8 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@ -3093,6 +3189,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@ -3423,6 +3521,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
default:
break;
@ -3519,7 +3618,13 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
case IBMVFC_HOST_ACTION_INIT:
BUG_ON(vhost->state != IBMVFC_INITIALIZING);
vhost->job_step(vhost);
if (vhost->delay_init) {
vhost->delay_init = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ssleep(15);
return;
} else
vhost->job_step(vhost);
break;
case IBMVFC_HOST_ACTION_QUERY:
list_for_each_entry(tgt, &vhost->targets, queue)
@ -3538,6 +3643,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
break;
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
@ -3553,8 +3659,17 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
}
if (vhost->state == IBMVFC_INITIALIZING) {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_discover_targets;
if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
vhost->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
scsi_unblock_requests(vhost->host);
return;
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_discover_targets;
}
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@ -3577,14 +3692,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
}
}
if (!ibmvfc_dev_init_to_do(vhost)) {
ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
vhost->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
scsi_unblock_requests(vhost->host);
return;
}
if (!ibmvfc_dev_init_to_do(vhost))
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
break;
case IBMVFC_HOST_ACTION_TGT_ADD:
list_for_each_entry(tgt, &vhost->targets, queue) {
@ -3592,16 +3701,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_tgt_add_rport(tgt);
return;
} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
rport = tgt->rport;
tgt->rport = NULL;
list_del(&tgt->queue);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
}
}

Просмотреть файл

@ -29,11 +29,11 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.2"
#define IBMVFC_DRIVER_DATE "(August 14, 2008)"
#define IBMVFC_DRIVER_VERSION "1.0.4"
#define IBMVFC_DRIVER_DATE "(November 14, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
#define IBMVFC_INIT_TIMEOUT 30
#define IBMVFC_INIT_TIMEOUT 120
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
@ -43,7 +43,8 @@
#define IBMVFC_MAX_DISC_THREADS 4
#define IBMVFC_TGT_MEMPOOL_SZ 64
#define IBMVFC_MAX_CMDS_PER_LUN 64
#define IBMVFC_MAX_INIT_RETRIES 3
#define IBMVFC_MAX_HOST_INIT_RETRIES 6
#define IBMVFC_MAX_TGT_INIT_RETRIES 3
#define IBMVFC_DEV_LOSS_TMO (5 * 60)
#define IBMVFC_DEFAULT_LOG_LEVEL 2
#define IBMVFC_MAX_CDB_LEN 16
@ -109,6 +110,7 @@ enum ibmvfc_vios_errors {
IBMVFC_TRANS_CANCELLED = 0x0006,
IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
IBMVFC_PLOGI_REQUIRED = 0x0010,
IBMVFC_COMMAND_FAILED = 0x8000,
};
@ -337,7 +339,6 @@ struct ibmvfc_tmf {
#define IBMVFC_TMF_LUA_VALID 0x40
u32 cancel_key;
u32 my_cancel_key;
#define IBMVFC_TMF_CANCEL_KEY 0x80000000
u32 pad;
u64 reserved[2];
}__attribute__((packed, aligned (8)));
@ -524,10 +525,10 @@ enum ibmvfc_async_event {
};
struct ibmvfc_crq {
u8 valid;
u8 format;
volatile u8 valid;
volatile u8 format;
u8 reserved[6];
u64 ioba;
volatile u64 ioba;
}__attribute__((packed, aligned (8)));
struct ibmvfc_crq_queue {
@ -537,13 +538,13 @@ struct ibmvfc_crq_queue {
};
struct ibmvfc_async_crq {
u8 valid;
volatile u8 valid;
u8 pad[3];
u32 pad2;
u64 event;
u64 scsi_id;
u64 wwpn;
u64 node_name;
volatile u64 event;
volatile u64 scsi_id;
volatile u64 wwpn;
volatile u64 node_name;
u64 reserved;
}__attribute__((packed, aligned (8)));
@ -606,6 +607,7 @@ struct ibmvfc_event {
struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token;
struct completion comp;
struct completion *eh_comp;
struct timer_list timer;
};
@ -626,6 +628,7 @@ enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_TGT_DEL,
IBMVFC_HOST_ACTION_ALLOC_TGTS,
IBMVFC_HOST_ACTION_TGT_INIT,
IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
IBMVFC_HOST_ACTION_TGT_ADD,
};
@ -671,6 +674,7 @@ struct ibmvfc_host {
int discovery_threads;
int client_migrated;
int reinit;
int delay_init;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
#define IBMVFC_AE_LINKDOWN 0x0002
@ -700,7 +704,7 @@ struct ibmvfc_host {
#define ibmvfc_log(vhost, level, ...) \
do { \
if (level >= (vhost)->log_level) \
if ((vhost)->log_level >= level) \
dev_err((vhost)->dev, ##__VA_ARGS__); \
} while (0)

Просмотреть файл

@ -107,7 +107,7 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_channel, "Largest channel value");
module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
module_param_named(max_requests, max_requests, int, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
/* ------------------------------------------------------------
@ -1657,7 +1657,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
vdev->dev.driver_data = NULL;
driver_template.can_queue = max_requests;
driver_template.can_queue = max_requests - 2;
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
if (!host) {
dev_err(&vdev->dev, "couldn't allocate host data\n");

Просмотреть файл

@ -107,7 +107,7 @@
* this thing into as good a shape as possible, and I'm positive
* there are lots of lurking bugs and "Stupid Places".
*
* Updated for Linux 2.5 by Alan Cox <alan@redhat.com>
* Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Using new_eh handler
* - Hopefully got all the locking right again
* See "FIXME" notes for items that could do with more work

Просмотреть файл

@ -4,7 +4,7 @@
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
* Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
* Copyright (c) 2007 Red Hat <alan@redhat.com>
* Copyright (c) 2007 Red Hat
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

Просмотреть файл

@ -4,7 +4,7 @@
* Copyright (c) 1994-1998 Initio Corporation
* All rights reserved.
*
* Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
* Cleanups (c) Copyright 2007 Red Hat <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

Просмотреть файл

@ -5389,9 +5389,9 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
spin_unlock_irq(ioa_cfg->host->host_lock);
spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
spin_lock_irq(ioa_cfg->host->host_lock);
spin_lock(ioa_cfg->host->host_lock);
if (!ioa_cfg->allow_cmds)
scsi_block_requests(ioa_cfg->host);
@ -7473,7 +7473,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto out_scsi_host_put;
}
ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
ipr_regs = pci_ioremap_bar(pdev, 0);
if (!ipr_regs) {
dev_err(&pdev->dev,

Просмотреть файл

@ -19,7 +19,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Alan Cox <alan@redhat.com> - Removed several careless u32/dma_addr_t errors
* Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
* that broke 64bit platforms.
*/

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -19,67 +19,27 @@
* See the file COPYING included with this distribution for more details.
*/
#ifndef ISCSI_TCP_H
#define ISCSI_TCP_H
#ifndef ISCSI_SW_TCP_H
#define ISCSI_SW_TCP_H
#include <scsi/libiscsi.h>
#include <scsi/libiscsi_tcp.h>
struct crypto_hash;
struct socket;
struct iscsi_tcp_conn;
struct iscsi_segment;
typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
struct iscsi_segment *);
struct iscsi_segment {
unsigned char *data;
unsigned int size;
unsigned int copied;
unsigned int total_size;
unsigned int total_copied;
struct hash_desc *hash;
unsigned char recv_digest[ISCSI_DIGEST_SIZE];
unsigned char digest[ISCSI_DIGEST_SIZE];
unsigned int digest_len;
struct scatterlist *sg;
void *sg_mapped;
unsigned int sg_offset;
iscsi_segment_done_fn_t *done;
};
/* Socket connection recieve helper */
struct iscsi_tcp_recv {
struct iscsi_hdr *hdr;
struct iscsi_segment segment;
/* Allocate buffer for BHS + AHS */
uint32_t hdr_buf[64];
/* copied and flipped values */
int datalen;
};
/* Socket connection send helper */
struct iscsi_tcp_send {
struct iscsi_sw_tcp_send {
struct iscsi_hdr *hdr;
struct iscsi_segment segment;
struct iscsi_segment data_segment;
};
struct iscsi_tcp_conn {
struct iscsi_sw_tcp_conn {
struct iscsi_conn *iscsi_conn;
struct socket *sock;
int stop_stage; /* conn_stop() flag: *
* stop to recover, *
* stop to terminate */
/* control data */
struct iscsi_tcp_recv in; /* TCP receive context */
struct iscsi_tcp_send out; /* TCP send context */
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
@ -93,41 +53,13 @@ struct iscsi_tcp_conn {
uint32_t sendpage_failures_cnt;
uint32_t discontiguous_hdr_cnt;
int error;
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
struct iscsi_data_task {
struct iscsi_data hdr; /* PDU */
char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
};
struct iscsi_r2t_info {
__be32 ttt; /* copied from R2T */
__be32 exp_statsn; /* copied from R2T */
uint32_t data_length; /* copied from R2T */
uint32_t data_offset; /* copied from R2T */
int sent; /* R2T sequence progress */
int data_count; /* DATA-Out payload progress */
int solicit_datasn;
struct iscsi_data_task dtask; /* Data-Out header buf */
};
struct iscsi_tcp_task {
struct iscsi_hdr_buff {
struct iscsi_cmd cmd_hdr;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
struct iscsi_sw_tcp_hdrbuf {
struct iscsi_hdr hdrbuf;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
ISCSI_DIGEST_SIZE];
} hdr;
int sent;
uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
int data_offset;
struct iscsi_r2t_info *r2t; /* in progress R2T */
struct iscsi_pool r2tpool;
struct kfifo *r2tqueue;
struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
};
#endif /* ISCSI_H */
#endif /* ISCSI_SW_TCP_H */

Просмотреть файл

@ -0,0 +1,12 @@
# $Id: Makefile
obj-$(CONFIG_LIBFC) += libfc.o
libfc-objs := \
fc_disc.o \
fc_exch.o \
fc_elsct.o \
fc_frame.o \
fc_lport.o \
fc_rport.o \
fc_fcp.o

Просмотреть файл

@ -0,0 +1,845 @@
/*
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Target Discovery
*
* This block discovers all FC-4 remote ports, including FCP initiators. It
* also handles RSCN events and re-discovery if necessary.
*/
/*
* DISC LOCKING
*
* The disc mutex is can be locked when acquiring rport locks, but may not
* be held when acquiring the lport lock. Refer to fc_lport.c for more
* details.
*/
#include <linux/timer.h>
#include <linux/err.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/libfc.h>
#define FC_DISC_RETRY_LIMIT 3 /* max retries */
#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
#define FC_DISC_DELAY 3
static int fc_disc_debug;
#define FC_DEBUG_DISC(fmt...) \
do { \
if (fc_disc_debug) \
FC_DBG(fmt); \
} while (0)
static void fc_disc_gpn_ft_req(struct fc_disc *);
static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
struct fc_rport_identifiers *);
static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
static void fc_disc_done(struct fc_disc *);
static void fc_disc_timeout(struct work_struct *);
static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
static void fc_disc_restart(struct fc_disc *);
/**
* fc_disc_lookup_rport - lookup a remote port by port_id
* @lport: Fibre Channel host port instance
* @port_id: remote port port_id to match
*/
struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
u32 port_id)
{
const struct fc_disc *disc = &lport->disc;
struct fc_rport *rport, *found = NULL;
struct fc_rport_libfc_priv *rdata;
int disc_found = 0;
list_for_each_entry(rdata, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
if (rport->port_id == port_id) {
disc_found = 1;
found = rport;
break;
}
}
if (!disc_found)
found = NULL;
return found;
}
/**
* fc_disc_stop_rports - delete all the remote ports associated with the lport
* @disc: The discovery job to stop rports on
*
* Locking Note: This function expects that the lport mutex is locked before
* calling it.
*/
void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata, *next;
lport = disc->lport;
mutex_lock(&disc->disc_mutex);
list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_rport_callback - Event handler for rport events
* @lport: The lport which is receiving the event
* @rport: The rport which the event has occured on
* @event: The event that occured
*
* Locking Note: The rport lock should not be held when calling
* this function.
*/
static void fc_disc_rport_callback(struct fc_lport *lport,
struct fc_rport *rport,
enum fc_rport_event event)
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_disc *disc = &lport->disc;
int found = 0;
FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
rport->port_id);
if (event == RPORT_EV_CREATED) {
if (disc) {
found = 1;
mutex_lock(&disc->disc_mutex);
list_add_tail(&rdata->peers, &disc->rports);
mutex_unlock(&disc->disc_mutex);
}
}
if (!found)
FC_DEBUG_DISC("The rport (%6x) is not maintained "
"by the discovery layer\n", rport->port_id);
}
/**
* fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
* @sp: Current sequence of the RSCN exchange
* @fp: RSCN Frame
* @lport: Fibre Channel host port instance
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata;
struct fc_els_rscn *rp;
struct fc_els_rscn_page *pp;
struct fc_seq_els_data rjt_data;
unsigned int len;
int redisc = 0;
enum fc_els_rscn_ev_qual ev_qual;
enum fc_els_rscn_addr_fmt fmt;
LIST_HEAD(disc_ports);
struct fc_disc_port *dp, *next;
lport = disc->lport;
FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
fc_host_port_id(lport->host));
/* make sure the frame contains an RSCN message */
rp = fc_frame_payload_get(fp, sizeof(*rp));
if (!rp)
goto reject;
/* make sure the page length is as expected (4 bytes) */
if (rp->rscn_page_len != sizeof(*pp))
goto reject;
/* get the RSCN payload length */
len = ntohs(rp->rscn_plen);
if (len < sizeof(*rp))
goto reject;
/* make sure the frame contains the expected payload */
rp = fc_frame_payload_get(fp, len);
if (!rp)
goto reject;
/* payload must be a multiple of the RSCN page size */
len -= sizeof(*rp);
if (len % sizeof(*pp))
goto reject;
for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
ev_qual &= ELS_RSCN_EV_QUAL_MASK;
fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
fmt &= ELS_RSCN_ADDR_FMT_MASK;
/*
* if we get an address format other than port
* (area, domain, fabric), then do a full discovery
*/
switch (fmt) {
case ELS_ADDR_FMT_PORT:
FC_DEBUG_DISC("Port address format for port (%6x)\n",
ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
break;
}
dp->lp = lport;
dp->ids.port_id = ntoh24(pp->rscn_fid);
dp->ids.port_name = -1;
dp->ids.node_name = -1;
dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
list_add_tail(&dp->peers, &disc_ports);
break;
case ELS_ADDR_FMT_AREA:
case ELS_ADDR_FMT_DOM:
case ELS_ADDR_FMT_FAB:
default:
FC_DEBUG_DISC("Address format is (%d)\n", fmt);
redisc = 1;
break;
}
}
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
if (redisc) {
FC_DEBUG_DISC("RSCN received: rediscovering\n");
fc_disc_restart(disc);
} else {
FC_DEBUG_DISC("RSCN received: not rediscovering. "
"redisc %d state %d in_prog %d\n",
redisc, lport->state, disc->pending);
list_for_each_entry_safe(dp, next, &disc_ports, peers) {
list_del(&dp->peers);
rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
if (rport) {
rdata = RPORT_TO_PRIV(rport);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
fc_disc_single(disc, dp);
}
}
fc_frame_free(fp);
return;
reject:
FC_DEBUG_DISC("Received a bad RSCN frame\n");
rjt_data.fp = NULL;
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
/**
* fc_disc_recv_req - Handle incoming requests
* @sp: Current sequence of the request exchange
* @fp: The frame
* @lport: The FC local port
*
* Locking Note: This function is called from the EM and will lock
* the disc_mutex before calling the handler for the
* request.
*/
static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport)
{
u8 op;
struct fc_disc *disc = &lport->disc;
op = fc_frame_payload_op(fp);
switch (op) {
case ELS_RSCN:
mutex_lock(&disc->disc_mutex);
fc_disc_recv_rscn_req(sp, fp, disc);
mutex_unlock(&disc->disc_mutex);
break;
default:
FC_DBG("Received an unsupported request. opcode (%x)\n", op);
break;
}
}
/**
* fc_disc_restart - Restart discovery
* @lport: FC discovery context
*
* Locking Note: This function expects that the disc mutex
* is already locked.
*/
static void fc_disc_restart(struct fc_disc *disc)
{
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata, *next;
struct fc_lport *lport = disc->lport;
FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
fc_host_port_id(lport->host));
list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
disc->requested = 1;
if (!disc->pending)
fc_disc_gpn_ft_req(disc);
}
/**
* fc_disc_start - Fibre Channel Target discovery
* @lport: FC local port
*
* Returns non-zero if discovery cannot be started.
*/
static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
enum fc_disc_event),
struct fc_lport *lport)
{
struct fc_rport *rport;
struct fc_rport_identifiers ids;
struct fc_disc *disc = &lport->disc;
/*
* At this point we may have a new disc job or an existing
* one. Either way, let's lock when we make changes to it
* and send the GPN_FT request.
*/
mutex_lock(&disc->disc_mutex);
disc->disc_callback = disc_callback;
/*
* If not ready, or already running discovery, just set request flag.
*/
disc->requested = 1;
if (disc->pending) {
mutex_unlock(&disc->disc_mutex);
return;
}
/*
* Handle point-to-point mode as a simple discovery
* of the remote port. Yucky, yucky, yuck, yuck!
*/
rport = disc->lport->ptp_rp;
if (rport) {
ids.port_id = rport->port_id;
ids.port_name = rport->port_name;
ids.node_name = rport->node_name;
ids.roles = FC_RPORT_ROLE_UNKNOWN;
get_device(&rport->dev);
if (!fc_disc_new_target(disc, rport, &ids)) {
disc->event = DISC_EV_SUCCESS;
fc_disc_done(disc);
}
put_device(&rport->dev);
} else {
fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
}
mutex_unlock(&disc->disc_mutex);
}
static struct fc_rport_operations fc_disc_rport_ops = {
.event_callback = fc_disc_rport_callback,
};
/**
* fc_disc_new_target - Handle new target found by discovery
* @lport: FC local port
* @rport: The previous FC remote port (NULL if new remote port)
* @ids: Identifiers for the new FC remote port
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static int fc_disc_new_target(struct fc_disc *disc,
struct fc_rport *rport,
struct fc_rport_identifiers *ids)
{
struct fc_lport *lport = disc->lport;
struct fc_rport_libfc_priv *rp;
int error = 0;
if (rport && ids->port_name) {
if (rport->port_name == -1) {
/*
* Set WWN and fall through to notify of create.
*/
fc_rport_set_name(rport, ids->port_name,
rport->node_name);
} else if (rport->port_name != ids->port_name) {
/*
* This is a new port with the same FCID as
* a previously-discovered port. Presumably the old
* port logged out and a new port logged in and was
* assigned the same FCID. This should be rare.
* Delete the old one and fall thru to re-create.
*/
fc_disc_del_target(disc, rport);
rport = NULL;
}
}
if (((ids->port_name != -1) || (ids->port_id != -1)) &&
ids->port_id != fc_host_port_id(lport->host) &&
ids->port_name != lport->wwpn) {
if (!rport) {
rport = lport->tt.rport_lookup(lport, ids->port_id);
if (!rport) {
struct fc_disc_port dp;
dp.lp = lport;
dp.ids.port_id = ids->port_id;
dp.ids.port_name = ids->port_name;
dp.ids.node_name = ids->node_name;
dp.ids.roles = ids->roles;
rport = fc_rport_rogue_create(&dp);
}
if (!rport)
error = -ENOMEM;
}
if (rport) {
rp = rport->dd_data;
rp->ops = &fc_disc_rport_ops;
rp->rp_state = RPORT_ST_INIT;
lport->tt.rport_login(rport);
}
}
return error;
}
/**
* fc_disc_del_target - Delete a target
* @disc: FC discovery context
* @rport: The remote port to be removed
*/
static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
{
struct fc_lport *lport = disc->lport;
struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
/**
* fc_disc_done - Discovery has been completed
* @disc: FC discovery context
*/
static void fc_disc_done(struct fc_disc *disc)
{
struct fc_lport *lport = disc->lport;
FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
fc_host_port_id(lport->host));
disc->disc_callback(lport, disc->event);
disc->event = DISC_EV_NONE;
if (disc->requested)
fc_disc_gpn_ft_req(disc);
else
disc->pending = 0;
}
/**
* fc_disc_error - Handle error on dNS request
* @disc: FC discovery context
* @fp: The frame pointer
*/
static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
{
struct fc_lport *lport = disc->lport;
unsigned long delay = 0;
if (fc_disc_debug)
FC_DBG("Error %ld, retries %d/%d\n",
PTR_ERR(fp), disc->retry_count,
FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
/*
* Memory allocation failure, or the exchange timed out,
* retry after delay.
*/
if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
/* go ahead and retry */
if (!fp)
delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
else {
delay = msecs_to_jiffies(lport->e_d_tov);
/* timeout faster first time */
if (!disc->retry_count)
delay /= 4;
}
disc->retry_count++;
schedule_delayed_work(&disc->disc_work, delay);
} else {
/* exceeded retries */
disc->event = DISC_EV_FAILED;
fc_disc_done(disc);
}
}
}
/**
* fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
* @lport: FC discovery context
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
{
struct fc_frame *fp;
struct fc_lport *lport = disc->lport;
WARN_ON(!fc_lport_test_ready(lport));
disc->pending = 1;
disc->requested = 0;
disc->buf_len = 0;
disc->seq_count = 0;
fp = fc_frame_alloc(lport,
sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_gid_ft));
if (!fp)
goto err;
if (lport->tt.elsct_send(lport, NULL, fp,
FC_NS_GPN_FT,
fc_disc_gpn_ft_resp,
disc, lport->e_d_tov))
return;
err:
fc_disc_error(disc, fp);
}
/**
* fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
* @lport: Fibre Channel host port instance
* @buf: GPN_FT response buffer
* @len: size of response buffer
*/
static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
{
struct fc_lport *lport;
struct fc_gpn_ft_resp *np;
char *bp;
size_t plen;
size_t tlen;
int error = 0;
struct fc_disc_port dp;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rdata;
lport = disc->lport;
/*
* Handle partial name record left over from previous call.
*/
bp = buf;
plen = len;
np = (struct fc_gpn_ft_resp *)bp;
tlen = disc->buf_len;
if (tlen) {
WARN_ON(tlen >= sizeof(*np));
plen = sizeof(*np) - tlen;
WARN_ON(plen <= 0);
WARN_ON(plen >= sizeof(*np));
if (plen > len)
plen = len;
np = &disc->partial_buf;
memcpy((char *)np + tlen, bp, plen);
/*
* Set bp so that the loop below will advance it to the
* first valid full name element.
*/
bp -= tlen;
len += tlen;
plen += tlen;
disc->buf_len = (unsigned char) plen;
if (plen == sizeof(*np))
disc->buf_len = 0;
}
/*
* Handle full name records, including the one filled from above.
* Normally, np == bp and plen == len, but from the partial case above,
* bp, len describe the overall buffer, and np, plen describe the
* partial buffer, which if would usually be full now.
* After the first time through the loop, things return to "normal".
*/
while (plen >= sizeof(*np)) {
dp.lp = lport;
dp.ids.port_id = ntoh24(np->fp_fid);
dp.ids.port_name = ntohll(np->fp_wwpn);
dp.ids.node_name = -1;
dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
(dp.ids.port_name != lport->wwpn)) {
rport = fc_rport_rogue_create(&dp);
if (rport) {
rdata = rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
rdata->local_port = lport;
lport->tt.rport_login(rport);
} else
FC_DBG("Failed to allocate memory for "
"the newly discovered port (%6x)\n",
dp.ids.port_id);
}
if (np->fp_flags & FC_NS_FID_LAST) {
disc->event = DISC_EV_SUCCESS;
fc_disc_done(disc);
len = 0;
break;
}
len -= sizeof(*np);
bp += sizeof(*np);
np = (struct fc_gpn_ft_resp *)bp;
plen = len;
}
/*
* Save any partial record at the end of the buffer for next time.
*/
if (error == 0 && len > 0 && len < sizeof(*np)) {
if (np != &disc->partial_buf) {
FC_DEBUG_DISC("Partial buffer remains "
"for discovery by (%6x)\n",
fc_host_port_id(lport->host));
memcpy(&disc->partial_buf, np, len);
}
disc->buf_len = (unsigned char) len;
} else {
disc->buf_len = 0;
}
return error;
}
/*
* Handle retry of memory allocation for remote ports.
*/
static void fc_disc_timeout(struct work_struct *work)
{
struct fc_disc *disc = container_of(work,
struct fc_disc,
disc_work.work);
mutex_lock(&disc->disc_mutex);
if (disc->requested && !disc->pending)
fc_disc_gpn_ft_req(disc);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
* @sp: Current sequence of GPN_FT exchange
* @fp: response frame
* @lp_arg: Fibre Channel host port instance
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
void *disc_arg)
{
struct fc_disc *disc = disc_arg;
struct fc_ct_hdr *cp;
struct fc_frame_header *fh;
unsigned int seq_cnt;
void *buf = NULL;
unsigned int len;
int error;
FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
fc_host_port_id(disc->lport->host));
if (IS_ERR(fp)) {
fc_disc_error(disc, fp);
return;
}
WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
fh = fc_frame_header_get(fp);
len = fr_len(fp) - sizeof(*fh);
seq_cnt = ntohs(fh->fh_seq_cnt);
if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
disc->seq_count == 0) {
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp) {
FC_DBG("GPN_FT response too short, len %d\n",
fr_len(fp));
} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
/*
* Accepted. Parse response.
*/
buf = cp + 1;
len -= sizeof(*cp);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DBG("GPN_FT rejected reason %x exp %x "
"(check zoning)\n", cp->ct_reason,
cp->ct_explan);
disc->event = DISC_EV_FAILED;
fc_disc_done(disc);
} else {
FC_DBG("GPN_FT unexpected response code %x\n",
ntohs(cp->ct_cmd));
}
} else if (fr_sof(fp) == FC_SOF_N3 &&
seq_cnt == disc->seq_count) {
buf = fh + 1;
} else {
FC_DBG("GPN_FT unexpected frame - out of sequence? "
"seq_cnt %x expected %x sof %x eof %x\n",
seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
}
if (buf) {
error = fc_disc_gpn_ft_parse(disc, buf, len);
if (error)
fc_disc_error(disc, fp);
else
disc->seq_count++;
}
fc_frame_free(fp);
}
/**
* fc_disc_single - Discover the directory information for a single target
* @lport: FC local port
* @dp: The port to rediscover
*
* Locking Note: This function expects that the disc_mutex is locked
* before it is called.
*/
static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
{
struct fc_lport *lport;
struct fc_rport *rport;
struct fc_rport *new_rport;
struct fc_rport_libfc_priv *rdata;
lport = disc->lport;
if (dp->ids.port_id == fc_host_port_id(lport->host))
goto out;
rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
if (rport)
fc_disc_del_target(disc, rport);
new_rport = fc_rport_rogue_create(dp);
if (new_rport) {
rdata = new_rport->dd_data;
rdata->ops = &fc_disc_rport_ops;
kfree(dp);
lport->tt.rport_login(new_rport);
}
return;
out:
kfree(dp);
}
/**
* fc_disc_stop - Stop discovery for a given lport
* @lport: The lport that discovery should stop for
*/
void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
if (disc) {
cancel_delayed_work_sync(&disc->disc_work);
fc_disc_stop_rports(disc);
}
}
/**
* fc_disc_stop_final - Stop discovery for a given lport
* @lport: The lport that discovery should stop for
*
* This function will block until discovery has been
* completely stopped and all rports have been deleted.
*/
void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
lport->tt.rport_flush_queue();
}
/**
* fc_disc_init - Initialize the discovery block
* @lport: FC local port
*/
int fc_disc_init(struct fc_lport *lport)
{
struct fc_disc *disc;
if (!lport->tt.disc_start)
lport->tt.disc_start = fc_disc_start;
if (!lport->tt.disc_stop)
lport->tt.disc_stop = fc_disc_stop;
if (!lport->tt.disc_stop_final)
lport->tt.disc_stop_final = fc_disc_stop_final;
if (!lport->tt.disc_recv_req)
lport->tt.disc_recv_req = fc_disc_recv_req;
if (!lport->tt.rport_lookup)
lport->tt.rport_lookup = fc_disc_lookup_rport;
disc = &lport->disc;
INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
mutex_init(&disc->disc_mutex);
INIT_LIST_HEAD(&disc->rports);
disc->lport = lport;
disc->delay = FC_DISC_DELAY;
disc->event = DISC_EV_NONE;
return 0;
}
EXPORT_SYMBOL(fc_disc_init);

Просмотреть файл

@ -0,0 +1,71 @@
/*
* Copyright(c) 2008 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Provide interface to send ELS/CT FC frames
*/
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/fc/fc_ns.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
/*
* fc_elsct_send - sends ELS/CT frame
*/
static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
struct fc_rport *rport,
struct fc_frame *fp,
unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *fp,
void *arg),
void *arg, u32 timer_msec)
{
enum fc_rctl r_ctl;
u32 did;
enum fc_fh_type fh_type;
int rc;
/* ELS requests */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
else
/* CT requests */
rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
if (rc)
return NULL;
fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
int fc_elsct_init(struct fc_lport *lport)
{
if (!lport->tt.elsct_send)
lport->tt.elsct_send = fc_elsct_send;
return 0;
}
EXPORT_SYMBOL(fc_elsct_init);

1970
drivers/scsi/libfc/fc_exch.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

2131
drivers/scsi/libfc/fc_fcp.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,89 @@
/*
* Copyright(c) 2007 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Frame allocation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include <scsi/fc_frame.h>
/*
* Check the CRC in a frame.
*/
u32 fc_frame_crc_check(struct fc_frame *fp)
{
u32 crc;
u32 error;
const u8 *bp;
unsigned int len;
WARN_ON(!fc_frame_is_linear(fp));
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
bp = (const u8 *) fr_hdr(fp);
crc = ~crc32(~0, bp, len);
error = crc ^ fr_crc(fp);
return error;
}
EXPORT_SYMBOL(fc_frame_crc_check);
/*
* Allocate a frame intended to be sent via fcoe_xmit.
* Get an sk_buff for the frame and set the length.
*/
struct fc_frame *__fc_frame_alloc(size_t len)
{
struct fc_frame *fp;
struct sk_buff *skb;
WARN_ON((len % sizeof(u32)) != 0);
len += sizeof(struct fc_frame_header);
skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
if (!skb)
return NULL;
fp = (struct fc_frame *) skb;
fc_frame_init(fp);
skb_reserve(skb, FC_FRAME_HEADROOM);
skb_put(skb, len);
return fp;
}
EXPORT_SYMBOL(__fc_frame_alloc);
struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
{
struct fc_frame *fp;
size_t fill;
fill = payload_len % 4;
if (fill != 0)
fill = 4 - fill;
fp = __fc_frame_alloc(payload_len + fill);
if (fp) {
memset((char *) fr_hdr(fp) + payload_len, 0, fill);
/* trim is OK, we just allocated it so there are no fragments */
skb_trim(fp_skb(fp),
payload_len + sizeof(struct fc_frame_header));
}
return fp;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -88,34 +88,47 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
}
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
struct iscsi_data *hdr)
/**
* iscsi_prep_data_out_pdu - initialize Data-Out
* @task: scsi command task
* @r2t: R2T info
* @hdr: iscsi data in pdu
*
* Notes:
* Initialize Data-Out within this R2T sequence and finds
* proper data_offset within this SCSI command.
*
* This function is called with connection lock taken.
**/
void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
struct iscsi_data *hdr)
{
struct iscsi_conn *conn = task->conn;
unsigned int left = r2t->data_length - r2t->sent;
task->hdr_len = sizeof(struct iscsi_data);
memset(hdr, 0, sizeof(struct iscsi_data));
hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
hdr->datasn = cpu_to_be32(task->unsol_datasn);
task->unsol_datasn++;
hdr->ttt = r2t->ttt;
hdr->datasn = cpu_to_be32(r2t->datasn);
r2t->datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
hdr->itt = task->hdr->itt;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
hdr->offset = cpu_to_be32(task->unsol_offset);
if (task->unsol_count > conn->max_xmit_dlength) {
memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
hdr->itt = task->hdr_itt;
hdr->exp_statsn = r2t->exp_statsn;
hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
if (left > conn->max_xmit_dlength) {
hton24(hdr->dlength, conn->max_xmit_dlength);
task->data_count = conn->max_xmit_dlength;
task->unsol_offset += task->data_count;
r2t->data_count = conn->max_xmit_dlength;
hdr->flags = 0;
} else {
hton24(hdr->dlength, task->unsol_count);
task->data_count = task->unsol_count;
hton24(hdr->dlength, left);
r2t->data_count = left;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
}
conn->dataout_pdus_cnt++;
}
EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
{
@ -206,11 +219,24 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
struct iscsi_cmd *hdr = task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iscsi_cmd *hdr;
unsigned hdrlength, cmd_len;
itt_t itt;
int rc;
rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
if (rc)
return rc;
hdr = (struct iscsi_cmd *) task->hdr;
itt = hdr->itt;
memset(hdr, 0, sizeof(*hdr));
if (session->tt->parse_pdu_itt)
hdr->itt = task->hdr_itt = itt;
else
hdr->itt = task->hdr_itt = build_itt(task->itt,
task->conn->session->age);
task->hdr_len = 0;
rc = iscsi_add_hdr(task, sizeof(*hdr));
if (rc)
@ -218,8 +244,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
hdr->opcode = ISCSI_OP_SCSI_CMD;
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
hdr->itt = build_itt(task->itt, session->age);
hdr->cmdsn = cpu_to_be32(session->cmdsn);
memcpy(task->lun, hdr->lun, sizeof(task->lun));
hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
cmd_len = sc->cmd_len;
@ -242,6 +268,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
}
if (sc->sc_data_direction == DMA_TO_DEVICE) {
unsigned out_len = scsi_out(sc)->length;
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
hdr->data_length = cpu_to_be32(out_len);
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/*
@ -254,13 +282,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
* without R2T ack right after
* immediate data
*
* r2t_data_count bytes to be sent via R2T ack's
* r2t data_length bytes to be sent via R2T ack's
*
* pad_count bytes to be sent as zero-padding
*/
task->unsol_count = 0;
task->unsol_offset = 0;
task->unsol_datasn = 0;
memset(r2t, 0, sizeof(*r2t));
if (session->imm_data_en) {
if (out_len >= session->first_burst)
@ -274,12 +300,14 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
zero_data(hdr->dlength);
if (!session->initial_r2t_en) {
task->unsol_count = min(session->first_burst, out_len)
- task->imm_count;
task->unsol_offset = task->imm_count;
r2t->data_length = min(session->first_burst, out_len) -
task->imm_count;
r2t->data_offset = task->imm_count;
r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
}
if (!task->unsol_count)
if (!task->unsol_r2t.data_length)
/* No unsolicit Data-Out's */
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
@ -300,8 +328,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
WARN_ON(hdrlength >= 256);
hdr->hlength = hdrlength & 0xFF;
if (conn->session->tt->init_task &&
conn->session->tt->init_task(task))
if (session->tt->init_task && session->tt->init_task(task))
return -EIO;
task->state = ISCSI_TASK_RUNNING;
@ -332,6 +359,7 @@ static void iscsi_complete_command(struct iscsi_task *task)
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
session->tt->cleanup_task(task);
list_del_init(&task->running);
task->state = ISCSI_TASK_COMPLETED;
task->sc = NULL;
@ -402,8 +430,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
* the cmd in the sequencing
*/
conn->session->queued_cmdsn--;
else
conn->session->tt->cleanup_task(conn, task);
sc->result = err;
if (!scsi_bidi_cmnd(sc))
@ -423,7 +449,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iscsi_session *session = conn->session;
struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
struct iscsi_hdr *hdr = task->hdr;
struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@ -437,7 +463,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
*/
nop->cmdsn = cpu_to_be32(session->cmdsn);
if (hdr->itt != RESERVED_ITT) {
hdr->itt = build_itt(task->itt, session->age);
/*
* TODO: We always use immediate, so we never hit this.
* If we start to send tmfs or nops as non-immediate then
@ -450,12 +475,13 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
}
}
if (session->tt->init_task)
session->tt->init_task(task);
if (session->tt->init_task && session->tt->init_task(task))
return -EIO;
if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
session->state = ISCSI_STATE_LOGGING_OUT;
task->state = ISCSI_TASK_RUNNING;
list_move_tail(&task->running, &conn->mgmt_run_list);
debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
@ -469,6 +495,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
{
struct iscsi_session *session = conn->session;
struct iscsi_task *task;
itt_t itt;
if (session->state == ISCSI_STATE_TERMINATE)
return NULL;
@ -505,23 +532,47 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else
task->data_count = 0;
if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
"pdu for mgmt task.\n");
goto requeue_task;
}
itt = task->hdr->itt;
task->hdr_len = sizeof(struct iscsi_hdr);
memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
if (hdr->itt != RESERVED_ITT) {
if (session->tt->parse_pdu_itt)
task->hdr->itt = itt;
else
task->hdr->itt = build_itt(task->itt,
task->conn->session->age);
}
INIT_LIST_HEAD(&task->running);
list_add_tail(&task->running, &conn->mgmtqueue);
if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
if (iscsi_prep_mgmt_task(conn, task)) {
__iscsi_put_task(task);
return NULL;
}
if (iscsi_prep_mgmt_task(conn, task))
goto free_task;
if (session->tt->xmit_task(task))
task = NULL;
goto free_task;
} else
scsi_queue_work(conn->session->host, &conn->xmitwork);
return task;
free_task:
__iscsi_put_task(task);
return NULL;
requeue_task:
if (task != conn->login_task)
__kfifo_put(session->cmdpool.queue, (void*)&task,
sizeof(void*));
return NULL;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@ -709,7 +760,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
{
struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
struct iscsi_hdr rejected_pdu;
uint32_t itt;
conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
@ -719,10 +769,9 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
itt = get_itt(rejected_pdu.itt);
iscsi_conn_printk(KERN_ERR, conn,
"itt 0x%x had pdu (op 0x%x) rejected "
"due to DataDigest error.\n", itt,
"pdu (op 0x%x) rejected "
"due to DataDigest error.\n",
rejected_pdu.opcode);
}
}
@ -742,12 +791,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
uint32_t i;
int i;
if (itt == RESERVED_ITT)
return NULL;
i = get_itt(itt);
if (session->tt->parse_pdu_itt)
session->tt->parse_pdu_itt(conn, itt, &i, NULL);
else
i = get_itt(itt);
if (i >= session->cmds_max)
return NULL;
@ -922,20 +974,25 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
uint32_t i;
int age = 0, i = 0;
if (itt == RESERVED_ITT)
return 0;
if (((__force u32)itt & ISCSI_AGE_MASK) !=
(session->age << ISCSI_AGE_SHIFT)) {
if (session->tt->parse_pdu_itt)
session->tt->parse_pdu_itt(conn, itt, &i, &age);
else {
i = get_itt(itt);
age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
}
if (age != session->age) {
iscsi_conn_printk(KERN_ERR, conn,
"received itt %x expected session age (%x)\n",
(__force u32)itt, session->age);
return ISCSI_ERR_BAD_ITT;
}
i = get_itt(itt);
if (i >= session->cmds_max) {
iscsi_conn_printk(KERN_ERR, conn,
"received invalid itt index %u (max cmds "
@ -1136,8 +1193,13 @@ check_mgmt:
fail_command(conn, conn->task, DID_IMM_RETRY << 16);
continue;
}
if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
fail_command(conn, conn->task, DID_ABORT << 16);
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM) {
conn->task = NULL;
goto again;
} else
fail_command(conn, conn->task, DID_ABORT << 16);
continue;
}
rc = iscsi_xmit_task(conn);
@ -1195,6 +1257,26 @@ static void iscsi_xmitworker(struct work_struct *work)
} while (rc >= 0 || rc == -EAGAIN);
}
static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
struct scsi_cmnd *sc)
{
struct iscsi_task *task;
if (!__kfifo_get(conn->session->cmdpool.queue,
(void *) &task, sizeof(void *)))
return NULL;
sc->SCp.phase = conn->session->age;
sc->SCp.ptr = (char *) task;
atomic_set(&task->refcount, 1);
task->state = ISCSI_TASK_PENDING;
task->conn = conn;
task->sc = sc;
INIT_LIST_HEAD(&task->running);
return task;
}
enum {
FAILURE_BAD_HOST = 1,
FAILURE_SESSION_FAILED,
@ -1281,33 +1363,27 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto reject;
}
if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
sizeof(void*))) {
task = iscsi_alloc_task(conn, sc);
if (!task) {
reason = FAILURE_OOM;
goto reject;
}
sc->SCp.phase = session->age;
sc->SCp.ptr = (char *)task;
atomic_set(&task->refcount, 1);
task->state = ISCSI_TASK_PENDING;
task->conn = conn;
task->sc = sc;
INIT_LIST_HEAD(&task->running);
list_add_tail(&task->running, &conn->xmitqueue);
if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
if (iscsi_prep_scsi_cmd_pdu(task)) {
sc->result = DID_ABORT << 16;
sc->scsi_done = NULL;
iscsi_complete_command(task);
goto fault;
reason = iscsi_prep_scsi_cmd_pdu(task);
if (reason) {
if (reason == -ENOMEM) {
reason = FAILURE_OOM;
goto prepd_reject;
} else {
sc->result = DID_ABORT << 16;
goto prepd_fault;
}
}
if (session->tt->xmit_task(task)) {
sc->scsi_done = NULL;
iscsi_complete_command(task);
reason = FAILURE_SESSION_NOT_READY;
goto reject;
goto prepd_reject;
}
} else
scsi_queue_work(session->host, &conn->xmitwork);
@ -1317,12 +1393,18 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
spin_lock(host->host_lock);
return 0;
prepd_reject:
sc->scsi_done = NULL;
iscsi_complete_command(task);
reject:
spin_unlock(&session->lock);
debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
spin_lock(host->host_lock);
return SCSI_MLQUEUE_TARGET_BUSY;
prepd_fault:
sc->scsi_done = NULL;
iscsi_complete_command(task);
fault:
spin_unlock(&session->lock);
debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
@ -1634,9 +1716,9 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
hdr->rtt = task->hdr->itt;
hdr->refcmdsn = task->hdr->cmdsn;
memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
hdr->rtt = task->hdr_itt;
hdr->refcmdsn = task->cmdsn;
}
int iscsi_eh_abort(struct scsi_cmnd *sc)
@ -2223,7 +2305,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
}
spin_unlock_bh(&session->lock);
data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
data = (char *) __get_free_pages(GFP_KERNEL,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
if (!data)
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
@ -2294,7 +2377,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
kfree(conn->data);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));

1163
drivers/scsi/libiscsi_tcp.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -29,8 +29,10 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@ -354,8 +356,6 @@ struct lpfc_vport {
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
char *vname; /* Application assigned name */
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
@ -376,7 +376,7 @@ struct lpfc_vport {
struct fc_vport *fc_vport;
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *vport_debugfs_root;
@ -428,6 +428,7 @@ struct lpfc_hba {
#define LPFC_SLI3_VPORT_TEARDOWN 0x04
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_INB_ENABLED 0x10
#define LPFC_SLI3_BG_ENABLED 0x20
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@ -501,12 +502,14 @@ struct lpfc_hba {
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
uint32_t cfg_enable_bg;
lpfc_vpd_t vpd; /* vital product data */
@ -572,6 +575,9 @@ struct lpfc_hba {
uint64_t fc4InputRequests;
uint64_t fc4OutputRequests;
uint64_t fc4ControlRequests;
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
struct lpfc_sysfs_mbox sysfs_mbox;
@ -594,6 +600,8 @@ struct lpfc_hba {
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct list_head port_list;
@ -613,12 +621,14 @@ struct lpfc_hba {
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
unsigned long last_ramp_up_time;
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
struct dentry *debug_dumpData; /* BlockGuard BPL*/
struct dentry *debug_dumpDif; /* BlockGuard BPL*/
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;

Просмотреть файл

@ -96,6 +96,61 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
}
static ssize_t
lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
if (phba->cfg_enable_bg)
if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
else
return snprintf(buf, PAGE_SIZE,
"BlockGuard Not Supported\n");
else
return snprintf(buf, PAGE_SIZE,
"BlockGuard Disabled\n");
}
static ssize_t
lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_guard_err_cnt);
}
static ssize_t
lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_apptag_err_cnt);
}
static ssize_t
lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)phba->bg_reftag_err_cnt);
}
/**
* lpfc_info_show: Return some pci info about the host in ascii.
* @dev: class converted to a Scsi_host structure.
@ -1485,6 +1540,10 @@ lpfc_vport_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
@ -1970,6 +2029,7 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# LOG_LINK_EVENT 0x10 Link events
# LOG_FCP 0x40 FCP traffic history
# LOG_NODE 0x80 Node table events
# LOG_BG 0x200 BlockBuard events
# LOG_MISC 0x400 Miscellaneous events
# LOG_SLI 0x800 SLI events
# LOG_FCP_ERROR 0x1000 Only log FCP errors
@ -2768,6 +2828,42 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
*/
LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
/*
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
# 0 = BlockGuard disabled (default)
# 1 = BlockGuard enabled
# Value range is [0,1]. Default value is 0.
*/
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
# SCSI mid-layer
# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
# - Allows you to ultimately specify which profiles to use
# - Default will result in registering capabilities for all profiles.
#
*/
unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION;
module_param(lpfc_prot_mask, uint, 0);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
/*
# lpfc_prot_guard: i
# - Bit mask of protection guard types to register with the SCSI mid-layer
# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC
# - Allows you to ultimately specify which profiles to use
# - Default will result in registering capabilities for all guard types
#
*/
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
module_param(lpfc_prot_guard, byte, 0);
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
/*
* lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
* This value can be set to values between 64 and 256. The default value is
@ -2777,7 +2873,15 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
"Max Protection Scatter Gather Segment Count");
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_bg_info,
&dev_attr_bg_guard_err,
&dev_attr_bg_apptag_err,
&dev_attr_bg_reftag_err,
&dev_attr_info,
&dev_attr_serialnum,
&dev_attr_modeldesc,
@ -2825,6 +2929,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
&dev_attr_lpfc_soft_wwn_enable,
@ -2833,6 +2938,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_prot_sg_seg_cnt,
NULL,
};
@ -3281,26 +3387,29 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
/* Virtual ports do not need ctrl_reg and mbox */
if (error || vport->port_type == LPFC_NPIV_PORT)
goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
goto out;
goto out_remove_stat_attr;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
if (error)
goto out_remove_ctlreg_attr;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
if (error)
goto out_remove_mbox_attr;
return 0;
out_remove_mbox_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
out_remove_stat_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
out:
return error;
}
@ -3315,6 +3424,9 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
sysfs_remove_bin_file(&shost->shost_dev.kobj,
&sysfs_drvr_stat_data_attr);
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
}
@ -3792,6 +3904,23 @@ lpfc_show_rport_##field (struct device *dev, \
lpfc_rport_show_function(field, format_string, sz, ) \
static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
/**
* lpfc_set_vport_symbolic_name: Set the vport's symbolic name.
* @fc_vport: The fc_vport who's symbolic name has been changed.
*
* Description:
* This function is called by the transport after the @fc_vport's symbolic name
* has been changed. This function re-registers the symbolic name with the
* switch to propogate the change into the fabric if the vport is active.
**/
static void
lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
if (vport->port_state == LPFC_VPORT_READY)
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
}
struct fc_function_template lpfc_transport_functions = {
/* fixed attributes the driver supports */
@ -3801,6 +3930,7 @@ struct fc_function_template lpfc_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
@ -3850,6 +3980,10 @@ struct fc_function_template lpfc_transport_functions = {
.terminate_rport_io = lpfc_terminate_rport_io,
.dd_fcvport_size = sizeof(struct lpfc_vport *),
.vport_disable = lpfc_vport_disable,
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
};
struct fc_function_template lpfc_vport_transport_functions = {
@ -3860,6 +3994,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
@ -3908,6 +4043,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
.terminate_rport_io = lpfc_terminate_rport_io,
.vport_disable = lpfc_vport_disable,
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
};
/**
@ -3930,13 +4067,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg);
phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
/* Also reinitialize the host templates with new values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@ -3945,6 +4081,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
if (phba->cfg_enable_bg) {
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
phba->cfg_sg_dma_buf_size +=
phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
}
/* Also reinitialize the host templates with new values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
return;
}

Просмотреть файл

@ -22,6 +22,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@ -284,12 +285,24 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* externs BlockGuard */
extern char *_dump_buf_data;
extern unsigned long _dump_buf_data_order;
extern char *_dump_buf_dif;
extern unsigned long _dump_buf_dif_order;
extern spinlock_t _dump_buf_lock;
extern int _dump_buf_done;
extern spinlock_t pgcnt_lock;
extern unsigned int pgcnt;
extern unsigned int lpfc_prot_mask;
extern unsigned char lpfc_prot_guard;
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_adjust_queue_depth(struct lpfc_hba *);
void lpfc_rampdown_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
void lpfc_scsi_dev_block(struct lpfc_hba *);

Просмотреть файл

@ -560,18 +560,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING)
if (vport->load_flag & FC_UNLOADING) {
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
goto out;
}
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
if (lpfc_error_lost_link(irsp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event\n");
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
goto out;
}
if (irsp->ulpStatus) {
@ -587,6 +594,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (rc == 0)
goto out;
}
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0257 GID_FT Query error: 0x%x 0x%x\n",
@ -1008,8 +1017,10 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
if (n < size)
n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
if (n < size && vport->vname)
n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
if (n < size &&
strlen(vport->fc_vport->symbolic_name))
n += snprintf(symbol + n, size - n, " VName-%s",
vport->fc_vport->symbolic_name);
return n;
}

Просмотреть файл

@ -46,7 +46,7 @@
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
* debugfs interface
*
@ -618,7 +618,7 @@ inline void
lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
{
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_debugfs_trc *dtp;
int index;
@ -659,7 +659,7 @@ inline void
lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
{
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_debugfs_trc *dtp;
int index;
@ -680,7 +680,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
return;
}
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
* lpfc_debugfs_disc_trc_open - Open the discovery trace log.
* @inode: The inode pointer that contains a vport pointer.
@ -907,6 +907,91 @@ out:
return rc;
}
static int
lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
{
struct lpfc_debug *debug;
int rc = -ENOMEM;
if (!_dump_buf_data)
return -EBUSY;
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundry */
printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",
__func__, _dump_buf_data);
debug->buffer = _dump_buf_data;
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
file->private_data = debug;
rc = 0;
out:
return rc;
}
static int
lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
{
struct lpfc_debug *debug;
int rc = -ENOMEM;
if (!_dump_buf_dif)
return -EBUSY;
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundry */
printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__,
_dump_buf_dif, file->f_dentry->d_name.name);
debug->buffer = _dump_buf_dif;
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
file->private_data = debug;
rc = 0;
out:
return rc;
}
static ssize_t
lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
/*
* The Data/DIF buffers only save one failing IO
* The write op is used as a reset mechanism after an IO has
* already been saved to the next one can be saved
*/
spin_lock(&_dump_buf_lock);
memset((void *)_dump_buf_data, 0,
((1 << PAGE_SHIFT) << _dump_buf_data_order));
memset((void *)_dump_buf_dif, 0,
((1 << PAGE_SHIFT) << _dump_buf_dif_order));
_dump_buf_done = 0;
spin_unlock(&_dump_buf_lock);
return nbytes;
}
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
* @inode: The inode pointer that contains a vport pointer.
@ -1035,6 +1120,17 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
return 0;
}
static int
lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
{
struct lpfc_debug *debug = file->private_data;
debug->buffer = NULL;
kfree(debug);
return 0;
}
#undef lpfc_debugfs_op_disc_trc
static struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@ -1080,6 +1176,26 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_dumpData
static struct file_operations lpfc_debugfs_op_dumpData = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_dumpData_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.write = lpfc_debugfs_dumpDataDif_write,
.release = lpfc_debugfs_dumpDataDif_release,
};
#undef lpfc_debugfs_op_dumpDif
static struct file_operations lpfc_debugfs_op_dumpDif = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_dumpDif_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.write = lpfc_debugfs_dumpDataDif_write,
.release = lpfc_debugfs_dumpDataDif_release,
};
#undef lpfc_debugfs_op_slow_ring_trc
static struct file_operations lpfc_debugfs_op_slow_ring_trc = {
.owner = THIS_MODULE,
@ -1106,7 +1222,7 @@ static atomic_t lpfc_debugfs_hba_count;
inline void
lpfc_debugfs_initialize(struct lpfc_vport *vport)
{
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hba *phba = vport->phba;
char name[64];
uint32_t num, i;
@ -1176,6 +1292,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
/* Setup dumpData */
snprintf(name, sizeof(name), "dumpData");
phba->debug_dumpData =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpData);
if (!phba->debug_dumpData) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0800 Cannot create debugfs dumpData\n");
goto debug_failed;
}
/* Setup dumpDif */
snprintf(name, sizeof(name), "dumpDif");
phba->debug_dumpDif =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpDif);
if (!phba->debug_dumpDif) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0801 Cannot create debugfs dumpDif\n");
goto debug_failed;
}
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@ -1305,7 +1447,7 @@ debug_failed:
inline void
lpfc_debugfs_terminate(struct lpfc_vport *vport)
{
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hba *phba = vport->phba;
if (vport->disc_trc) {
@ -1340,6 +1482,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
phba->debug_dumpHostSlim = NULL;
}
if (phba->debug_dumpData) {
debugfs_remove(phba->debug_dumpData); /* dumpData */
phba->debug_dumpData = NULL;
}
if (phba->debug_dumpDif) {
debugfs_remove(phba->debug_dumpDif); /* dumpDif */
phba->debug_dumpDif = NULL;
}
if (phba->slow_ring_trc) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;

Просмотреть файл

@ -21,7 +21,7 @@
#ifndef _H_LPFC_DEBUG_FS
#define _H_LPFC_DEBUG_FS
#ifdef CONFIG_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;

Просмотреть файл

@ -221,7 +221,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* For ELS_REQUEST64_CR, use the VPI by default */
icmd->ulpContext = vport->vpi;
icmd->ulpCt_h = 0;
icmd->ulpCt_l = 1;
/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
if (elscmd == ELS_CMD_ECHO)
icmd->ulpCt_l = 0; /* context = invalid RPI */
else
icmd->ulpCt_l = 1; /* context = VPI */
}
bpl = (struct ulp_bde64 *) pbuflist->virt;
@ -271,7 +275,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
return elsiocb;
els_iocb_free_pbuf_exit:
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
if (expectRsp)
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
els_iocb_free_prsp_exit:
@ -2468,6 +2473,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
if (cmd == ELS_CMD_FLOGI) {
if (PCI_DEVICE_ID_HORNET ==
phba->pcidev->device) {
phba->fc_topology = TOPOLOGY_LOOP;
phba->pport->fc_myDID = 0;
phba->alpa_map[0] = 0;
phba->alpa_map[1] = 0;
}
}
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@ -3823,27 +3837,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
while (payload_len) {
rscn_did.un.word = be32_to_cpu(*lp++);
payload_len -= sizeof(uint32_t);
switch (rscn_did.un.b.resv) {
case 0: /* Single N_Port ID effected */
switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
case RSCN_ADDRESS_FORMAT_PORT:
if (ns_did.un.word == rscn_did.un.word)
goto return_did_out;
break;
case 1: /* Whole N_Port Area effected */
case RSCN_ADDRESS_FORMAT_AREA:
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area))
goto return_did_out;
break;
case 2: /* Whole N_Port Domain effected */
case RSCN_ADDRESS_FORMAT_DOMAIN:
if (ns_did.un.b.domain == rscn_did.un.b.domain)
goto return_did_out;
break;
default:
/* Unknown Identifier in RSCN node */
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0217 Unknown Identifier in "
"RSCN payload Data: x%x\n",
rscn_did.un.word);
case 3: /* Whole Fabric effected */
case RSCN_ADDRESS_FORMAT_FABRIC:
goto return_did_out;
}
}
@ -3886,6 +3894,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
return 0;
}
/**
* lpfc_send_rscn_event: Send an RSCN event to management application.
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
*
* lpfc_send_rscn_event sends an RSCN netlink event to management
* applications.
*/
static void
lpfc_send_rscn_event(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb)
{
struct lpfc_dmabuf *pcmd;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
uint32_t *payload_ptr;
uint32_t payload_len;
struct lpfc_rscn_event_header *rscn_event_data;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
payload_ptr = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
payload_len, GFP_KERNEL);
if (!rscn_event_data) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0147 Failed to allocate memory for RSCN event\n");
return;
}
rscn_event_data->event_type = FC_REG_RSCN_EVENT;
rscn_event_data->payload_length = payload_len;
memcpy(rscn_event_data->rscn_payload, payload_ptr,
payload_len);
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(struct lpfc_els_event_header) + payload_len,
(char *)rscn_event_data,
LPFC_NL_VENDOR_ID);
kfree(rscn_event_data);
}
/**
* lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
* @vport: pointer to a host virtual N_Port data structure.
@ -3933,6 +3984,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"0214 RSCN received Data: x%x x%x x%x x%x\n",
vport->fc_flag, payload_len, *lp,
vport->fc_rscn_id_cnt);
/* Send an RSCN event to the management application */
lpfc_send_rscn_event(vport, cmdiocb);
for (i = 0; i < payload_len/sizeof(uint32_t); i++)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_RSCN, lp[i]);
@ -4884,10 +4939,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
/* If the timer is already canceled do nothing */
if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
return;
}
spin_lock_irq(&phba->hbalock);
timeout = (uint32_t)(phba->fc_ratov << 1);
@ -5128,7 +5179,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
fc_get_event_number(),
sizeof(lsrjt_event),
(char *)&lsrjt_event,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
LPFC_NL_VENDOR_ID);
return;
}
if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
@ -5146,7 +5197,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
fc_get_event_number(),
sizeof(fabric_event),
(char *)&fabric_event,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
LPFC_NL_VENDOR_ID);
return;
}
@ -5164,32 +5215,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
static void
lpfc_send_els_event(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
uint32_t cmd)
uint32_t *payload)
{
struct lpfc_els_event_header els_data;
struct lpfc_els_event_header *els_data = NULL;
struct lpfc_logo_event *logo_data = NULL;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
els_data.event_type = FC_REG_ELS_EVENT;
switch (cmd) {
if (*payload == ELS_CMD_LOGO) {
logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
if (!logo_data) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0148 Failed to allocate memory "
"for LOGO event\n");
return;
}
els_data = &logo_data->header;
} else {
els_data = kmalloc(sizeof(struct lpfc_els_event_header),
GFP_KERNEL);
if (!els_data) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0149 Failed to allocate memory "
"for ELS event\n");
return;
}
}
els_data->event_type = FC_REG_ELS_EVENT;
switch (*payload) {
case ELS_CMD_PLOGI:
els_data.subcategory = LPFC_EVENT_PLOGI_RCV;
els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
break;
case ELS_CMD_PRLO:
els_data.subcategory = LPFC_EVENT_PRLO_RCV;
els_data->subcategory = LPFC_EVENT_PRLO_RCV;
break;
case ELS_CMD_ADISC:
els_data.subcategory = LPFC_EVENT_ADISC_RCV;
els_data->subcategory = LPFC_EVENT_ADISC_RCV;
break;
case ELS_CMD_LOGO:
els_data->subcategory = LPFC_EVENT_LOGO_RCV;
/* Copy the WWPN in the LOGO payload */
memcpy(logo_data->logo_wwpn, &payload[2],
sizeof(struct lpfc_name));
break;
default:
return;
}
memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(els_data),
(char *)&els_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
if (*payload == ELS_CMD_LOGO) {
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(struct lpfc_logo_event),
(char *)logo_data,
LPFC_NL_VENDOR_ID);
kfree(logo_data);
} else {
fc_host_post_vendor_event(shost,
fc_get_event_number(),
sizeof(struct lpfc_els_event_header),
(char *)els_data,
LPFC_NL_VENDOR_ID);
kfree(els_data);
}
return;
}
@ -5296,7 +5383,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPLOGI++;
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
lpfc_send_els_event(vport, ndlp, cmd);
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@ -5334,6 +5421,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvLOGO++;
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
break;
@ -5346,7 +5434,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLO++;
lpfc_send_els_event(vport, ndlp, cmd);
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
break;
@ -5364,7 +5452,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"RCV ADISC: did:x%x/ste:x%x flg:x%x",
did, vport->port_state, ndlp->nlp_flag);
lpfc_send_els_event(vport, ndlp, cmd);
lpfc_send_els_event(vport, ndlp, payload);
phba->fc_stat.elsRcvADISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;

Просмотреть файл

@ -350,7 +350,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
evt_data_size = sizeof(fast_evt_data->un.
read_check_error);
} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
(evt_sub_category == IOSTAT_NPORT_BSY)) {
(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
evt_data = (char *) &fast_evt_data->un.fabric_evt;
evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
} else {
@ -387,7 +387,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
fc_get_event_number(),
evt_data_size,
evt_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
LPFC_NL_VENDOR_ID);
lpfc_free_fast_evt(phba, fast_evt_data);
return;
@ -585,20 +585,25 @@ lpfc_do_work(void *p)
set_user_nice(current, -20);
phba->data_flags = 0;
while (1) {
while (!kthread_should_stop()) {
/* wait and check worker queue activities */
rc = wait_event_interruptible(phba->work_waitq,
(test_and_clear_bit(LPFC_DATA_READY,
&phba->data_flags)
|| kthread_should_stop()));
BUG_ON(rc);
if (kthread_should_stop())
/* Signal wakeup shall terminate the worker thread */
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"0433 Wakeup on signal: rc=x%x\n", rc);
break;
}
/* Attend pending lpfc data processing */
lpfc_work_done(phba);
}
phba->worker_thread = NULL;
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0432 Worker thread stopped.\n");
return 0;
}
@ -1852,6 +1857,32 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
NLP_STE_UNUSED_NODE);
}
/**
* lpfc_initialize_node: Initialize all fields of node object.
* @vport: Pointer to Virtual Port object.
* @ndlp: Pointer to FC node object.
* @did: FC_ID of the node.
* This function is always called when node object need to
* be initialized. It initializes all the fields of the node
* object.
**/
static inline void
lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->nlp_sid = NLP_NO_SID;
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
}
struct lpfc_nodelist *
lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@ -1892,17 +1923,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* re-initialize ndlp except of ndlp linked list pointer */
memset((((char *)ndlp) + sizeof (struct list_head)), 0,
sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->nlp_sid = NLP_NO_SID;
/* ndlp management re-initialize */
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
lpfc_initialize_node(vport, ndlp, did);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
@ -3116,19 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
{
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->nlp_sid = NLP_NO_SID;
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",

Просмотреть файл

@ -65,6 +65,9 @@
#define SLI3_IOCB_RSP_SIZE 64
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
/* Common Transport structures and definitions */
union CtRevisionId {
@ -866,6 +869,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */
} un;
} D_ID;
#define RSCN_ADDRESS_FORMAT_PORT 0x0
#define RSCN_ADDRESS_FORMAT_AREA 0x1
#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2
#define RSCN_ADDRESS_FORMAT_FABRIC 0x3
#define RSCN_ADDRESS_FORMAT_MASK 0x3
/*
* Structure to define all ELS Payload types
*/
@ -1535,6 +1544,108 @@ typedef struct ULP_BDL { /* SLI-2 */
uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
} ULP_BDL;
/*
* BlockGuard Definitions
*/
enum lpfc_protgrp_type {
LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */
LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */
LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */
LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */
};
/* PDE Descriptors */
#define LPFC_PDE1_DESCRIPTOR 0x81
#define LPFC_PDE2_DESCRIPTOR 0x82
#define LPFC_PDE3_DESCRIPTOR 0x83
/* BlockGuard Profiles */
enum lpfc_bg_prof_codes {
LPFC_PROF_INVALID,
LPFC_PROF_A1 = 128, /* Full Protection */
LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
LPFC_PROF_A3,
LPFC_PROF_A4,
LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
LPFC_PROF_B2,
LPFC_PROF_B3,
LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
LPFC_PROF_C2,
LPFC_PROF_C3,
LPFC_PROF_D1, /* Full Protection */
LPFC_PROF_D2, /* Partial Protection & Check Disabling */
LPFC_PROF_D3,
LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
LPFC_PROF_E2,
LPFC_PROF_E3,
LPFC_PROF_E4,
LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
/* F1 Translation BDE */
LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
LPFC_PROF_ANT2,
LPFC_PROF_AST2
};
/* BlockGuard error-control defines */
#define BG_EC_STOP_ERR 0x00
#define BG_EC_CONT_ERR 0x01
#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
#define PDE_DESC_TYPE_MASK 0xff000000
#define PDE_DESC_TYPE_SHIFT 24
#define PDE_BG_PROFILE_MASK 0x00ff0000
#define PDE_BG_PROFILE_SHIFT 16
#define PDE_BLOCK_LEN_MASK 0x0000fffc
#define PDE_BLOCK_LEN_SHIFT 2
#define PDE_ERR_CTRL_MASK 0x00000003
#define PDE_ERR_CTRL_SHIFT 0
/* PDE word 1 bit masks and shifts */
#define PDE_APPTAG_MASK_MASK 0xffff0000
#define PDE_APPTAG_MASK_SHIFT 16
#define PDE_APPTAG_VAL_MASK 0x0000ffff
#define PDE_APPTAG_VAL_SHIFT 0
struct lpfc_pde {
uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
uint32_t apptag; /* bitfields of app tag maskand app tag value */
uint32_t reftag; /* reference tag occupying all 32 bits */
};
/* inline function to set fields in parms of PDE */
static inline void
lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
{
uint32_t *wp = &p->parms;
/* spec indicates that adapter appends two 0's to length field */
len = len >> 2;
*wp &= 0;
*wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
*wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
*wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
*wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
*wp = le32_to_cpu(*wp);
}
/* inline function to set apptag and reftag fields of PDE */
static inline void
lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
u32 reftag)
{
uint32_t *wp = &p->apptag;
*wp &= 0;
*wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
*wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
*wp = le32_to_cpu(*wp);
wp = &p->reftag;
*wp = le32_to_cpu(reftag);
}
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
typedef struct {
@ -2359,6 +2470,30 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
/* Option rom version structure */
struct prog_id {
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t type;
uint8_t id;
uint32_t ver:4; /* Major Version */
uint32_t rev:4; /* Revision */
uint32_t lev:2; /* Level */
uint32_t dist:2; /* Dist Type */
uint32_t num:4; /* number after dist type */
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t num:4; /* number after dist type */
uint32_t dist:2; /* Dist Type */
uint32_t lev:2; /* Level */
uint32_t rev:4; /* Revision */
uint32_t ver:4; /* Major Version */
uint8_t id;
uint8_t type;
#endif
};
/* Structure for MB Command UPDATE_CFG (0x1B) */
struct update_cfg_var {
@ -2552,11 +2687,19 @@ typedef struct {
uint32_t pcbLow; /* bit 31:0 of memory based port config block */
uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
uint32_t hbainit[6];
uint32_t hbainit[5];
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
#else /* __LITTLE_ENDIAN */
uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd : 24; /* Reserved */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t rsvd1 : 23; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t chbs : 1; /* Cofigure Host Backing store */
@ -2573,10 +2716,12 @@ typedef struct {
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t rsvd : 24; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd1 : 23; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd2 : 24; /* Reserved */
uint32_t rsvd2 : 23; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
@ -2594,7 +2739,8 @@ typedef struct {
uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t rsvd2 : 24; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t rsvd2 : 23; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@ -3214,6 +3360,94 @@ struct que_xri64cx_ext_fields {
struct lpfc_hbq_entry buff[5];
};
struct sli3_bg_fields {
uint32_t filler[6]; /* word 8-13 in IOCB */
uint32_t bghm; /* word 14 - BlockGuard High Water Mark */
/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
#define BGS_BIDIR_BG_PROF_MASK 0xff000000
#define BGS_BIDIR_BG_PROF_SHIFT 24
#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000
#define BGS_BIDIR_ERR_COND_SHIFT 16
#define BGS_BG_PROFILE_MASK 0x0000ff00
#define BGS_BG_PROFILE_SHIFT 8
#define BGS_INVALID_PROF_MASK 0x00000020
#define BGS_INVALID_PROF_SHIFT 5
#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010
#define BGS_UNINIT_DIF_BLOCK_SHIFT 4
#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008
#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3
#define BGS_REFTAG_ERR_MASK 0x00000004
#define BGS_REFTAG_ERR_SHIFT 2
#define BGS_APPTAG_ERR_MASK 0x00000002
#define BGS_APPTAG_ERR_SHIFT 1
#define BGS_GUARD_ERR_MASK 0x00000001
#define BGS_GUARD_ERR_SHIFT 0
uint32_t bgstat; /* word 15 - BlockGuard Status */
};
static inline uint32_t
lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >>
BGS_BIDIR_BG_PROF_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
BGS_BIDIR_ERR_COND_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_bg_prof(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >>
BGS_BG_PROFILE_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_invalid_prof(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >>
BGS_INVALID_PROF_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >>
BGS_UNINIT_DIF_BLOCK_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >>
BGS_HI_WATER_MARK_PRESENT_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_reftag_err(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >>
BGS_REFTAG_ERR_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_apptag_err(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >>
BGS_APPTAG_ERR_SHIFT;
}
static inline uint32_t
lpfc_bgs_get_guard_err(uint32_t bgstat)
{
return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >>
BGS_GUARD_ERR_SHIFT;
}
#define LPFC_EXT_DATA_BDE_COUNT 3
struct fcp_irw_ext {
uint32_t io_tag64_low;
@ -3322,6 +3556,9 @@ typedef struct _IOCB { /* IOCB structure */
struct que_xri64cx_ext_fields que_xri64cx_ext_words;
struct fcp_irw_ext fcp_ext;
uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
/* words 8-15 for BlockGuard */
struct sli3_bg_fields sli3_bg;
} unsli3;
#define ulpCt_h ulpXS

Просмотреть файл

@ -45,6 +45,12 @@
#include "lpfc_vport.h"
#include "lpfc_version.h"
char *_dump_buf_data;
unsigned long _dump_buf_data_order;
char *_dump_buf_dif;
unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
@ -235,6 +241,51 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
/**
* lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox
* command used for getting wake up parameters.
* @phba: pointer to lpfc hba data structure.
* @pmboxq: pointer to the driver internal queue element for mailbox command.
*
* This is the completion handler for dump mailbox command for getting
* wake up parameters. When this command complete, the response contain
* Option rom version of the HBA. This function translate the version number
* into a human readable string and store it in OptionROMVersion.
**/
static void
lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct prog_id *prg;
uint32_t prog_id_word;
char dist = ' ';
/* character array used for decoding dist type. */
char dist_char[] = "nabx";
if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
prg = (struct prog_id *) &prog_id_word;
/* word 7 contain option rom version */
prog_id_word = pmboxq->mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
if (prg->dist < 4)
dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
sprintf(phba->OptionROMVersion, "%d.%d%d",
prg->ver, prg->rev, prg->lev);
else
sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
prg->ver, prg->rev, prg->lev,
dist, prg->num);
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
/**
* lpfc_config_port_post: Perform lpfc initialization after config port.
* @phba: pointer to lpfc hba data structure.
@ -482,6 +533,20 @@ lpfc_config_port_post(struct lpfc_hba *phba)
rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
/* Get Option rom version */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
lpfc_dump_wakeup_param(phba, pmb);
pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
pmb->vport = phba->pport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
"to get Option ROM version status x%x\n.", rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
return 0;
}
@ -686,11 +751,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
return;
spin_lock_irq(&phba->pport->work_port_lock);
/* If the timer is already canceled do nothing */
if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
spin_unlock_irq(&phba->pport->work_port_lock);
return;
}
if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
jiffies)) {
@ -833,8 +893,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(board_event),
(char *) &board_event,
SCSI_NL_VID_TYPE_PCI
| PCI_VENDOR_ID_EMULEX);
LPFC_NL_VENDOR_ID);
if (phba->work_hs & HS_FFER6) {
/* Re-establishing Link */
@ -1984,6 +2043,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
/*
* Set initial can_queue value since 0 is no longer supported and
* scsi_add_host will fail. This will be adjusted later based on the
@ -2042,8 +2102,6 @@ destroy_port(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
kfree(vport->vname);
lpfc_debugfs_terminate(vport);
fc_remove_host(shost);
scsi_remove_host(shost);
@ -2226,8 +2284,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
ARRAY_SIZE(phba->msix_entries));
if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0420 Enable MSI-X failed (%d), continuing "
"with MSI\n", rc);
"0420 PCI enable MSI-X failed (%d)\n", rc);
goto msi_fail_out;
} else
for (i = 0; i < LPFC_MSIX_VECTORS; i++)
@ -2244,9 +2301,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0421 MSI-X slow-path request_irq failed "
"(%d), continuing with MSI\n", rc);
"(%d)\n", rc);
goto msi_fail_out;
}
@ -2255,9 +2312,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0429 MSI-X fast-path request_irq failed "
"(%d), continuing with MSI\n", rc);
"(%d)\n", rc);
goto irq_fail_out;
}
@ -2278,7 +2335,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
goto mbx_fail_out;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0351 Config MSI mailbox command failed, "
"mbxCmd x%x, mbxStatus x%x\n",
pmb->mb.mbxCommand, pmb->mb.mbxStatus);
@ -2326,6 +2383,195 @@ lpfc_disable_msix(struct lpfc_hba *phba)
pci_disable_msix(phba->pcidev);
}
/**
* lpfc_enable_msi: Enable MSI interrupt mode.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI interrupt mode. The kernel
* function pci_enable_msi() is called to enable the MSI vector. The
* device driver is responsible for calling the request_irq() to register
* MSI vector with a interrupt the handler, which is done in this function.
*
* Return codes
* 0 - sucessful
* other values - error
*/
static int
lpfc_enable_msi(struct lpfc_hba *phba)
{
int rc;
rc = pci_enable_msi(phba->pcidev);
if (!rc)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0462 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0471 PCI enable MSI mode failed (%d)\n", rc);
return rc;
}
rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (rc) {
pci_disable_msi(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0478 MSI request_irq failed (%d)\n", rc);
}
return rc;
}
/**
* lpfc_disable_msi: Disable MSI interrupt mode.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to disable the MSI interrupt mode. The driver
* calls free_irq() on MSI vector it has done request_irq() on before
* calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
* a device will be left with MSI enabled and leaks its vector.
*/
static void
lpfc_disable_msi(struct lpfc_hba *phba)
{
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
return;
}
/**
* lpfc_log_intr_mode: Log the active interrupt mode
* @phba: pointer to lpfc hba data structure.
* @intr_mode: active interrupt mode adopted.
*
* This routine it invoked to log the currently used active interrupt mode
* to the device.
*/
static void
lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
{
switch (intr_mode) {
case 0:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0470 Enable INTx interrupt mode.\n");
break;
case 1:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0481 Enabled MSI interrupt mode.\n");
break;
case 2:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0480 Enabled MSI-X interrupt mode.\n");
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0482 Illegal interrupt mode.\n");
break;
}
return;
}
static void
lpfc_stop_port(struct lpfc_hba *phba)
{
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
/* Reset some HBA SLI setup states */
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
return;
}
/**
* lpfc_enable_intr: Enable device interrupt.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable device interrupt and associate driver's
* interrupt handler(s) to interrupt vector(s). Depends on the interrupt
* mode configured to the driver, the driver will try to fallback from the
* configured interrupt mode to an interrupt mode which is supported by the
* platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
*
* Return codes
* 0 - sucessful
* other values - error
**/
static uint32_t
lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
{
uint32_t intr_mode = LPFC_INTR_ERROR;
int retval;
if (cfg_mode == 2) {
/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
retval = lpfc_sli_config_port(phba, 3);
if (!retval) {
/* Now, try to enable MSI-X interrupt mode */
retval = lpfc_enable_msix(phba);
if (!retval) {
/* Indicate initialization to MSI-X mode */
phba->intr_type = MSIX;
intr_mode = 2;
}
}
}
/* Fallback to MSI if MSI-X initialization failed */
if (cfg_mode >= 1 && phba->intr_type == NONE) {
retval = lpfc_enable_msi(phba);
if (!retval) {
/* Indicate initialization to MSI mode */
phba->intr_type = MSI;
intr_mode = 1;
}
}
/* Fallback to INTx if both MSI-X/MSI initalization failed */
if (phba->intr_type == NONE) {
retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
}
}
return intr_mode;
}
/**
* lpfc_disable_intr: Disable device interrupt.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to disable device interrupt and disassociate the
* driver's interrupt handler(s) from interrupt vector(s). Depending on the
* interrupt mode, the driver will release the interrupt vector(s) for the
* message signaled interrupt.
**/
static void
lpfc_disable_intr(struct lpfc_hba *phba)
{
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX)
lpfc_disable_msix(phba);
else if (phba->intr_type == MSI)
lpfc_disable_msi(phba);
else if (phba->intr_type == INTx)
free_irq(phba->pcidev->irq, phba);
/* Reset interrupt management states */
phba->intr_type = NONE;
phba->sli.slistat.sli_intr = 0;
return;
}
/**
* lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
* @pdev: pointer to PCI device
@ -2356,6 +2602,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
int error = -ENODEV, retval;
int i, hbq_count;
uint16_t iotag;
uint32_t cfg_mode, intr_mode;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
struct lpfc_adapter_event_header adapter_event;
@ -2409,6 +2656,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->eratt_poll.data = (unsigned long) phba;
pci_set_master(pdev);
pci_save_state(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
@ -2557,7 +2805,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_debugfs_initialize(vport);
pci_set_drvdata(pdev, shost);
phba->intr_type = NONE;
phba->MBslimaddr = phba->slim_memmap_p;
phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
@ -2565,63 +2812,58 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
/* Configure and enable interrupt */
if (phba->cfg_use_msi == 2) {
/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
error = lpfc_sli_config_port(phba, 3);
if (error)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0427 Firmware not capable of SLI 3 mode.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0426 Firmware capable of SLI 3 mode.\n");
/* Now, try to enable MSI-X interrupt mode */
error = lpfc_enable_msix(phba);
if (!error) {
phba->intr_type = MSIX;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0430 enable MSI-X mode.\n");
}
}
}
/* Fallback to MSI if MSI-X initialization failed */
if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
retval = pci_enable_msi(phba->pcidev);
if (!retval) {
phba->intr_type = MSI;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0473 enable MSI mode.\n");
} else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 enable IRQ mode.\n");
}
/* MSI-X is the only case the doesn't need to call request_irq */
if (phba->intr_type != MSIX) {
retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (retval) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
"interrupt handler failed\n");
error = retval;
goto out_disable_msi;
} else if (phba->intr_type != MSI)
phba->intr_type = INTx;
}
/* Configure sysfs attributes */
if (lpfc_alloc_sysfs_attr(vport)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1476 Failed to allocate sysfs attr\n");
error = -ENOMEM;
goto out_free_irq;
goto out_destroy_port;
}
if (lpfc_sli_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1477 Failed to set up hba\n");
error = -ENODEV;
goto out_remove_device;
cfg_mode = phba->cfg_use_msi;
while (true) {
/* Configure and enable interrupt */
intr_mode = lpfc_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0426 Failed to enable interrupt.\n");
goto out_free_sysfs_attr;
}
/* HBA SLI setup */
if (lpfc_sli_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1477 Failed to set up hba\n");
error = -ENODEV;
goto out_remove_device;
}
/* Wait 50ms for the interrupts of previous mailbox commands */
msleep(50);
/* Check active interrupts received */
if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
/* Log the current active interrupt mode */
phba->intr_mode = intr_mode;
lpfc_log_intr_mode(phba, intr_mode);
break;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0451 Configure interrupt mode (%d) "
"failed active interrupt test.\n",
intr_mode);
if (intr_mode == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0479 Failed to enable "
"interrupt.\n");
error = -ENODEV;
goto out_remove_device;
}
/* Stop HBA SLI setups */
lpfc_stop_port(phba);
/* Disable the current interrupt mode */
lpfc_disable_intr(phba);
/* Try next level of interrupt mode */
cfg_mode = --intr_mode;
}
}
/*
@ -2629,6 +2871,75 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
* the value of can_queue.
*/
shost->can_queue = phba->cfg_hba_queue_depth - 10;
if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
if (lpfc_prot_mask && lpfc_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
"SCSI layer\n");
scsi_host_set_prot(shost, lpfc_prot_mask);
scsi_host_set_guard(shost, lpfc_prot_guard);
}
}
if (!_dump_buf_data) {
int pagecnt = 10;
while (pagecnt) {
spin_lock_init(&_dump_buf_lock);
_dump_buf_data =
(char *) __get_free_pages(GFP_KERNEL, pagecnt);
if (_dump_buf_data) {
printk(KERN_ERR "BLKGRD allocated %d pages for "
"_dump_buf_data at 0x%p\n",
(1 << pagecnt), _dump_buf_data);
_dump_buf_data_order = pagecnt;
memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
<< pagecnt));
break;
} else {
--pagecnt;
}
}
if (!_dump_buf_data_order)
printk(KERN_ERR "BLKGRD ERROR unable to allocate "
"memory for hexdump\n");
} else {
printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
"\n", _dump_buf_data);
}
if (!_dump_buf_dif) {
int pagecnt = 10;
while (pagecnt) {
_dump_buf_dif =
(char *) __get_free_pages(GFP_KERNEL, pagecnt);
if (_dump_buf_dif) {
printk(KERN_ERR "BLKGRD allocated %d pages for "
"_dump_buf_dif at 0x%p\n",
(1 << pagecnt), _dump_buf_dif);
_dump_buf_dif_order = pagecnt;
memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
<< pagecnt));
break;
} else {
--pagecnt;
}
}
if (!_dump_buf_dif_order)
printk(KERN_ERR "BLKGRD ERROR unable to allocate "
"memory for hexdump\n");
} else {
printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
_dump_buf_dif);
}
lpfc_host_attrib_init(shost);
@ -2646,29 +2957,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(adapter_event),
(char *) &adapter_event,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
scsi_scan_host(shost);
LPFC_NL_VENDOR_ID);
return 0;
out_remove_device:
lpfc_free_sysfs_attr(vport);
spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(shost->host_lock);
out_free_irq:
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
if (phba->intr_type == MSIX)
lpfc_disable_msix(phba);
else
free_irq(phba->pcidev->irq, phba);
out_disable_msi:
if (phba->intr_type == MSI)
pci_disable_msi(phba->pcidev);
lpfc_disable_intr(phba);
lpfc_sli_hba_down(phba);
lpfc_sli_brdrestart(phba);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(vport);
out_destroy_port:
destroy_port(vport);
out_kthread_stop:
kthread_stop(phba->worker_thread);
@ -2709,7 +3013,7 @@ out:
* @pdev: pointer to PCI device
*
* This routine is to be registered to the kernel's PCI subsystem. When an
* Emulex HBA is removed from PCI bus. It perform all the necessary cleanup
* Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
* for the HBA device to be removed from the PCI subsystem properly.
**/
static void __devexit
@ -2717,18 +3021,27 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_vport **vports;
struct lpfc_hba *phba = vport->phba;
int i;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
kfree(vport->vname);
lpfc_free_sysfs_attr(vport);
kthread_stop(phba->worker_thread);
/* Release all the vports against this physical port */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
fc_vport_terminate(vports[i]->fc_vport);
lpfc_destroy_vport_work_array(phba, vports);
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
lpfc_cleanup(vport);
@ -2748,13 +3061,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
if (phba->intr_type == MSIX)
lpfc_disable_msix(phba);
else {
free_irq(phba->pcidev->irq, phba);
if (phba->intr_type == MSI)
pci_disable_msi(phba->pcidev);
}
/* Disable interrupt */
lpfc_disable_intr(phba);
pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
@ -2785,6 +3093,115 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
/**
* lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
* @pdev: pointer to PCI device
* @msg: power management message
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it quiesces the
* device by stopping the driver's worker thread for the device, turning off
* device's interrupt and DMA, and bring the device offline. Note that as the
* driver implements the minimum PM requirements to a power-aware driver's PM
* support for suspend/resume -- all the possible PM messages (SUSPEND,
* HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
* and the driver will fully reinitialize its device during resume() method
* call, the driver will set device to PCI_D3hot state in PCI config space
* instead of setting it according to the @msg provided by the PM.
*
* Return code
* 0 - driver suspended the device
* Error otherwise
**/
static int
lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0473 PCI device Power Management suspend.\n");
/* Bring down the device */
lpfc_offline_prep(phba);
lpfc_offline(phba);
kthread_stop(phba->worker_thread);
/* Disable interrupt from device */
lpfc_disable_intr(phba);
/* Save device state to PCI config space */
pci_save_state(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
/**
* lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
* @pdev: pointer to PCI device
*
* This routine is to be registered to the kernel's PCI subsystem to support
* system Power Management (PM). When PM invokes this method, it restores
* the device's PCI config space state and fully reinitializes the device
* and brings it online. Note that as the driver implements the minimum PM
* requirements to a power-aware driver's PM for suspend/resume -- all
* the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
* method call will be treated as SUSPEND and the driver will fully
* reinitialize its device during resume() method call, the device will be
* set to PCI_D0 directly in PCI config space before restoring the state.
*
* Return code
* 0 - driver suspended the device
* Error otherwise
**/
static int
lpfc_pci_resume_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
uint32_t intr_mode;
int error;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 PCI device Power Management resume.\n");
/* Restore device state from PCI config space */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pdev->is_busmaster)
pci_set_master(pdev);
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
if (IS_ERR(phba->worker_thread)) {
error = PTR_ERR(phba->worker_thread);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0434 PM resume failed to start worker "
"thread: error=x%x.\n", error);
return error;
}
/* Configure and enable interrupt */
intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0430 PM resume Failed to enable interrupt\n");
return -EIO;
} else
phba->intr_mode = intr_mode;
/* Restart HBA and bring it online */
lpfc_sli_brdrestart(phba);
lpfc_online(phba);
/* Log the current active interrupt mode */
lpfc_log_intr_mode(phba, phba->intr_mode);
return 0;
}
/**
* lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
* @pdev: pointer to PCI device.
@ -2828,13 +3245,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
if (phba->intr_type == MSIX)
lpfc_disable_msix(phba);
else {
free_irq(phba->pcidev->irq, phba);
if (phba->intr_type == MSI)
pci_disable_msi(phba->pcidev);
}
/* Disable interrupt */
lpfc_disable_intr(phba);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@ -2862,7 +3274,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
int error, retval;
uint32_t intr_mode;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
@ -2871,61 +3283,31 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
if (pdev->is_busmaster)
pci_set_master(pdev);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
/* Enable configured interrupt method */
phba->intr_type = NONE;
if (phba->cfg_use_msi == 2) {
/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
error = lpfc_sli_config_port(phba, 3);
if (error)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0478 Firmware not capable of SLI 3 mode.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0479 Firmware capable of SLI 3 mode.\n");
/* Now, try to enable MSI-X interrupt mode */
error = lpfc_enable_msix(phba);
if (!error) {
phba->intr_type = MSIX;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0480 enable MSI-X mode.\n");
}
}
}
/* Fallback to MSI if MSI-X initialization failed */
if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
retval = pci_enable_msi(phba->pcidev);
if (!retval) {
phba->intr_type = MSI;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0481 enable MSI mode.\n");
} else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0470 enable IRQ mode.\n");
}
/* MSI-X is the only case the doesn't need to call request_irq */
if (phba->intr_type != MSIX) {
retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (retval) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0471 Enable interrupt handler "
"failed\n");
} else if (phba->intr_type != MSI)
phba->intr_type = INTx;
}
/* Configure and enable interrupt */
intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0427 Cannot re-enable interrupt after "
"slot reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
} else
phba->intr_mode = intr_mode;
/* Take device offline; this will perform cleanup */
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Log the current active interrupt mode */
lpfc_log_intr_mode(phba, phba->intr_mode);
return PCI_ERS_RESULT_RECOVERED;
}
@ -3037,6 +3419,8 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
};
@ -3100,6 +3484,19 @@ lpfc_exit(void)
fc_release_transport(lpfc_transport_template);
if (lpfc_enable_npiv)
fc_release_transport(lpfc_vport_transport_template);
if (_dump_buf_data) {
printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
"at 0x%p\n",
(1L << _dump_buf_data_order), _dump_buf_data);
free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
}
if (_dump_buf_dif) {
printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
"at 0x%p\n",
(1L << _dump_buf_dif_order), _dump_buf_dif);
free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
}
}
module_init(lpfc_init);

Просмотреть файл

@ -27,6 +27,7 @@
#define LOG_FCP 0x40 /* FCP traffic history */
#define LOG_NODE 0x80 /* Node table events */
#define LOG_TEMP 0x100 /* Temperature sensor events */
#define LOG_BG 0x200 /* BlockBuard events */
#define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */

Просмотреть файл

@ -76,6 +76,38 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
return;
}
/**
* lpfc_dump_mem: Prepare a mailbox command for retrieving wakeup params.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* This function create a dump memory mailbox command to dump wake up
* parameters.
*/
void
lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
void *ctx;
mb = &pmb->mb;
/* Save context so that we can restore after memset */
ctx = pmb->context2;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->mbxOwner = OWN_HOST;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = 0;
mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
pmb->context2 = ctx;
return;
}
/**
* lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
* @phba: pointer to lpfc hba data structure.
@ -1061,9 +1093,14 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
/* Always Host Group Pointer is in SLIM */
mb->un.varCfgPort.hps = 1;
/* If HBA supports SLI=3 ask for it */
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
@ -1163,16 +1200,11 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
sizeof(*phba->host_gp));
}
/* Setup Port Group ring pointer */
if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) {
pgp_offset = offsetof(struct lpfc_sli2_slim,
mbx.us.s3_inb_pgp.port);
phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
} else if (phba->sli_rev == 3) {
/* Setup Port Group offset */
if (phba->sli_rev == 3)
pgp_offset = offsetof(struct lpfc_sli2_slim,
mbx.us.s3_pgp.port);
phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
} else
else
pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
pdma_addr = phba->slim2p.phys + pgp_offset;
phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
@ -1285,10 +1317,12 @@ lpfc_mbox_get(struct lpfc_hba * phba)
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
unsigned long iflag;
/* This function expects to be called from interrupt context */
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}

Просмотреть файл

@ -22,18 +22,20 @@
#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
#define FC_REG_CT_EVENT 0x0004 /* CT request events */
#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */
#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */
#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */
#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */
#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */
#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */
#define FC_REG_DUMP_EVENT 0x0010 /* Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */
#define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */
#define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */
#define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */
#define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */
#define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */
#define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */
#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
FC_REG_RSCN_EVENT | \
FC_REG_CT_EVENT | \
FC_REG_DUMP_EVENT | \
FC_REG_TEMPERATURE_EVENT | \
FC_REG_VPORTRSCN_EVENT | \
FC_REG_ELS_EVENT | \
FC_REG_FABRIC_EVENT | \
FC_REG_SCSI_EVENT | \
@ -52,6 +54,13 @@
* The payload sent via the fc transport is one-way driver->application.
*/
/* RSCN event header */
struct lpfc_rscn_event_header {
uint32_t event_type;
uint32_t payload_length; /* RSCN data length in bytes */
uint32_t rscn_payload[];
};
/* els event header */
struct lpfc_els_event_header {
uint32_t event_type;
@ -65,6 +74,7 @@ struct lpfc_els_event_header {
#define LPFC_EVENT_PRLO_RCV 0x02
#define LPFC_EVENT_ADISC_RCV 0x04
#define LPFC_EVENT_LSRJT_RCV 0x08
#define LPFC_EVENT_LOGO_RCV 0x10
/* special els lsrjt event */
struct lpfc_lsrjt_event {
@ -74,6 +84,11 @@ struct lpfc_lsrjt_event {
uint32_t explanation;
};
/* special els logo event */
struct lpfc_logo_event {
struct lpfc_els_event_header header;
uint8_t logo_wwpn[8];
};
/* fabric event header */
struct lpfc_fabric_event_header {
@ -125,6 +140,7 @@ struct lpfc_scsi_varqueuedepth_event {
/* special case scsi check condition event */
struct lpfc_scsi_check_condition_event {
struct lpfc_scsi_event_header scsi_event;
uint8_t opcode;
uint8_t sense_key;
uint8_t asc;
uint8_t ascq;

Просмотреть файл

@ -1929,10 +1929,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
return ndlp->nlp_state;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -124,6 +124,8 @@ struct lpfc_scsi_buf {
uint32_t seg_cnt; /* Number of scatter-gather segments returned by
* dma_map_sg. The driver needs this for calls
* to dma_unmap_sg. */
uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
/*

Просмотреть файл

@ -542,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
"IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
@ -1258,68 +1259,6 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
return 0;
}
/**
* lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
* @phba: Pointer to HBA context object.
* @tag: Tag for the HBQ buffer.
*
* This function is called from unsolicited event handler code path to get the
* HBQ buffer associated with an unsolicited iocb. This function is called with
* no lock held. It returns the buffer associated with the given tag and posts
* another buffer to the firmware. Note that the new buffer must be allocated
* before taking the hbalock and that the hba lock must be held until it is
* finished with the hbq entry swap.
**/
static struct lpfc_dmabuf *
lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
{
struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
uint32_t hbqno;
void *virt; /* virtual address ptr */
dma_addr_t phys; /* mapped address */
unsigned long flags;
hbqno = tag >> 16;
new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
if (new_hbq_entry)
(phba->hbqs[hbqno].hbq_free_buffer)(phba,
new_hbq_entry);
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
}
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (hbq_entry == NULL) {
if (new_hbq_entry)
(phba->hbqs[hbqno].hbq_free_buffer)(phba,
new_hbq_entry);
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
}
list_del(&hbq_entry->dbuf.list);
if (new_hbq_entry == NULL) {
list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
return &hbq_entry->dbuf;
}
new_hbq_entry->tag = -1;
phys = new_hbq_entry->dbuf.phys;
virt = new_hbq_entry->dbuf.virt;
new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
hbq_entry->dbuf.phys = phys;
hbq_entry->dbuf.virt = virt;
lpfc_sli_free_hbq(phba, hbq_entry);
list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
spin_unlock_irqrestore(&phba->hbalock, flags);
return &new_hbq_entry->dbuf;
}
/**
* lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
* @phba: Pointer to HBA context object.
@ -1334,13 +1273,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
**/
static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
uint32_t tag)
struct lpfc_sli_ring *pring,
uint32_t tag)
{
struct hbq_dmabuf *hbq_entry;
if (tag & QUE_BUFTAG_BIT)
return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
else
return lpfc_sli_replace_hbqbuff(phba, tag);
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (!hbq_entry)
return NULL;
return &hbq_entry->dbuf;
}
@ -1372,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
match = 0;
irsp = &(saveq->iocb);
if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
return 1;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
@ -1982,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_adjust_queue_depth(phba);
lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
@ -2225,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_adjust_queue_depth(phba);
lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
@ -2790,7 +2731,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
uint16_t skip_post;
volatile uint32_t word0;
void __iomem *to_slim;
@ -2815,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport->port_state) {
skip_post = 1;
if (phba->pport->port_state)
word0 = 1; /* This is really setting up word1 */
} else {
skip_post = 0;
else
word0 = 0; /* This is really setting up word1 */
}
to_slim = phba->MBslimaddr + sizeof (uint32_t);
writel(*(uint32_t *) mb, to_slim);
readl(to_slim); /* flush */
@ -2835,10 +2772,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
if (skip_post)
mdelay(100);
else
mdelay(2000);
/* Give the INITFF and Post time to settle. */
mdelay(100);
lpfc_hba_down_post(phba);
@ -3084,7 +3019,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
spin_unlock_irq(&phba->hbalock);
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
msleep(2500);
rc = lpfc_sli_chipset_init(phba);
if (rc)
break;
@ -3111,7 +3045,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED |
LPFC_SLI3_INB_ENABLED);
LPFC_SLI3_INB_ENABLED |
LPFC_SLI3_BG_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
@ -3144,17 +3079,29 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
if (pmb->mb.un.varCfgPort.ginb) {
phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
phba->inb_last_counter =
phba->mbox->us.s3_inb_pgp.counter;
} else {
phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_pgp.port;
phba->inb_ha_copy = NULL;
phba->inb_counter = NULL;
}
if (phba->cfg_enable_bg) {
if (pmb->mb.un.varCfgPort.gbg)
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0443 Adapter did not grant "
"BlockGuard\n");
}
} else {
phba->hbq_get = NULL;
phba->port_gp = phba->mbox->us.s2.port;
phba->inb_ha_copy = NULL;
phba->inb_counter = NULL;
@ -3305,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
return;
}
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@ -4005,7 +3948,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data), (char *) &temp_event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
LPFC_NL_VENDOR_ID);
}
@ -5184,6 +5127,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
{
uint32_t ha_copy;
/* If PCI channel is offline, don't process it */
if (unlikely(pci_channel_offline(phba->pcidev)))
return 0;
/* If somebody is waiting to handle an eratt, don't process it
* here. The brdkill function will do this.
*/
@ -5242,6 +5189,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
unsigned long iflag;
uint32_t control;
MAILBOX_t *mbox, *pmbox;
@ -5274,7 +5222,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IRQ_NONE;
/* Need to read HA REG for slow-path events */
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
ha_copy = readl(phba->HAregaddr);
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
@ -5294,7 +5242,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
ha_copy = phba->ha_copy;
@ -5307,13 +5255,13 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
* Turn off Link Attention interrupts
* until CLEAR_LA done
*/
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
else
work_ha_copy &= ~HA_LATT;
@ -5328,7 +5276,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
(HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if (status & HA_RXMASK) {
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
control = readl(phba->HCregaddr);
lpfc_debugfs_slow_ring_trc(phba,
@ -5357,10 +5305,10 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
(uint32_t)((unsigned long)
&phba->work_waitq));
}
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
}
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
if (work_ha_copy & HA_ERATT)
lpfc_sli_read_hs(phba);
if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
@ -5372,7 +5320,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
/* First check out the status word */
lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
if (pmbox->mbxOwner != OWN_HOST) {
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
/*
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
@ -5389,7 +5337,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
work_ha_copy &= ~HA_MBATT;
} else {
phba->sli.mbox_active = NULL;
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->last_completion_time = jiffies;
del_timer(&phba->sli.mbox_tmo);
if (pmb->mbox_cmpl) {
@ -5438,14 +5386,18 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
goto send_current_mbox;
}
}
spin_lock(&phba->pport->work_port_lock);
spin_lock_irqsave(
&phba->pport->work_port_lock,
iflag);
phba->pport->work_port_events &=
~WORKER_MBOX_TMO;
spin_unlock(&phba->pport->work_port_lock);
spin_unlock_irqrestore(
&phba->pport->work_port_lock,
iflag);
lpfc_mbox_cmpl_put(phba, pmb);
}
} else
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
@ -5461,9 +5413,9 @@ send_current_mbox:
"MBX_SUCCESS");
}
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
phba->work_ha |= work_ha_copy;
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_worker_wake_up(phba);
}
return IRQ_HANDLED;
@ -5495,6 +5447,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
uint32_t ha_copy;
unsigned long status;
unsigned long iflag;
/* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
@ -5520,11 +5473,11 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
/* Need to read HA REG for FCP ring and other ring events */
ha_copy = readl(phba->HAregaddr);
/* Clear up only attention source related to fast-path */
spin_lock(&phba->hbalock);
spin_lock_irqsave(&phba->hbalock, iflag);
writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
ha_copy = phba->ha_copy;

Просмотреть файл

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.2.8"
#define LPFC_DRIVER_VERSION "8.3.0"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"

Просмотреть файл

@ -288,10 +288,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int vpi;
int rc = VPORT_ERROR;
int status;
int size;
if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1808 Create VPORT failed: "
"NPIV is not enabled: SLImode:%d\n",
@ -351,20 +349,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
if (size) {
vport->vname = kzalloc(size+1, GFP_KERNEL);
if (!vport->vname) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1814 Create VPORT failed. "
"vname allocation failed.\n");
rc = VPORT_ERROR;
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
goto error_out;
}
memcpy(vport->vname, fc_vport->symbolic_name, size+1);
}
if (fc_vport->node_name != 0)
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
if (fc_vport->port_name != 0)
@ -394,6 +378,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
/* Create binary sysfs attribute for vport */
lpfc_alloc_sysfs_attr(vport);
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
@ -405,6 +392,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (disable) {
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
rc = VPORT_OK;
goto out;
}
@ -587,8 +575,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
kfree(vport->vname);
lpfc_free_sysfs_attr(vport);
lpfc_debugfs_terminate(vport);
/* Remove FC host and then SCSI host with the vport */
fc_remove_host(lpfc_shost_from_vport(vport));
scsi_remove_host(lpfc_shost_from_vport(vport));

Просмотреть файл

@ -53,7 +53,8 @@ struct mac_esp_priv {
void __iomem *pdma_io;
int error;
};
static struct platform_device *internal_esp, *external_esp;
static struct platform_device *internal_pdev, *external_pdev;
static struct esp *esp_chips[2];
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
platform_get_drvdata((struct platform_device *) \
@ -170,7 +171,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
#define MAC_ESP_PDMA_LOOP(operands) \
asm volatile ( \
" tstw %2 \n" \
" tstw %1 \n" \
" jbeq 20f \n" \
"1: movew " operands " \n" \
"2: movew " operands " \n" \
@ -188,14 +189,14 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
"14: movew " operands " \n" \
"15: movew " operands " \n" \
"16: movew " operands " \n" \
" subqw #1,%2 \n" \
" subqw #1,%1 \n" \
" jbne 1b \n" \
"20: tstw %3 \n" \
"20: tstw %2 \n" \
" jbeq 30f \n" \
"21: movew " operands " \n" \
" subqw #1,%3 \n" \
" subqw #1,%2 \n" \
" jbne 21b \n" \
"30: tstw %4 \n" \
"30: tstw %3 \n" \
" jbeq 40f \n" \
"31: moveb " operands " \n" \
"32: nop \n" \
@ -223,8 +224,8 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
" .long 31b,40b \n" \
" .long 32b,40b \n" \
" .previous \n" \
: "+a" (addr) \
: "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
: "+a" (addr), "+r" (count32), "+r" (count2) \
: "g" (count1), "a" (mep->pdma_io))
static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
@ -247,19 +248,20 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
do {
unsigned int count32 = esp_count >> 5;
unsigned int count2 = (esp_count & 0x1F) >> 1;
unsigned int count1 = esp_count & 1;
unsigned int start_addr = addr;
if (mac_esp_wait_for_dreq(esp))
break;
if (write) {
MAC_ESP_PDMA_LOOP("%1@,%0@+");
MAC_ESP_PDMA_LOOP("%4@,%0@+");
esp_count -= addr - start_addr;
} else {
unsigned int n;
MAC_ESP_PDMA_LOOP("%0@+,%1@");
MAC_ESP_PDMA_LOOP("%0@+,%4@");
if (mac_esp_wait_for_empty_fifo(esp))
break;
@ -442,6 +444,32 @@ static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
return dma_len > 0xFFFF ? 0xFFFF : dma_len;
}
static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
{
int got_intr;
/*
* This is an edge triggered IRQ, so we have to be careful to
* avoid missing a transition when it is shared by two ESP devices.
*/
do {
got_intr = 0;
if (esp_chips[0] &&
(mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
(void)scsi_esp_intr(irq, esp_chips[0]);
got_intr = 1;
}
if (esp_chips[1] &&
(mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
(void)scsi_esp_intr(irq, esp_chips[1]);
got_intr = 1;
}
} while (got_intr);
return IRQ_HANDLED;
}
static struct esp_driver_ops mac_esp_ops = {
.esp_write8 = mac_esp_write8,
.esp_read8 = mac_esp_read8,
@ -556,10 +584,16 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
}
host->irq = IRQ_MAC_SCSI;
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
esp);
if (err < 0)
goto fail_free_priv;
esp_chips[dev->id] = esp;
mb();
if (esp_chips[!dev->id] == NULL) {
err = request_irq(host->irq, mac_scsi_esp_intr, 0,
"Mac ESP", NULL);
if (err < 0) {
esp_chips[dev->id] = NULL;
goto fail_free_priv;
}
}
err = scsi_esp_register(esp, &dev->dev);
if (err)
@ -568,7 +602,8 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
return 0;
fail_free_irq:
free_irq(host->irq, esp);
if (esp_chips[!dev->id] == NULL)
free_irq(host->irq, esp);
fail_free_priv:
kfree(mep);
fail_free_command_block:
@ -587,7 +622,9 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
scsi_esp_unregister(esp);
free_irq(irq, esp);
esp_chips[dev->id] = NULL;
if (!(esp_chips[0] || esp_chips[1]))
free_irq(irq, NULL);
kfree(mep);
@ -614,19 +651,18 @@ static int __init mac_esp_init(void)
if (err)
return err;
internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
if (internal_esp && platform_device_add(internal_esp)) {
platform_device_put(internal_esp);
internal_esp = NULL;
internal_pdev = platform_device_alloc(DRV_MODULE_NAME, 0);
if (internal_pdev && platform_device_add(internal_pdev)) {
platform_device_put(internal_pdev);
internal_pdev = NULL;
}
external_pdev = platform_device_alloc(DRV_MODULE_NAME, 1);
if (external_pdev && platform_device_add(external_pdev)) {
platform_device_put(external_pdev);
external_pdev = NULL;
}
external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
if (external_esp && platform_device_add(external_esp)) {
platform_device_put(external_esp);
external_esp = NULL;
}
if (internal_esp || external_esp) {
if (internal_pdev || external_pdev) {
return 0;
} else {
platform_driver_unregister(&esp_mac_driver);
@ -638,13 +674,13 @@ static void __exit mac_esp_exit(void)
{
platform_driver_unregister(&esp_mac_driver);
if (internal_esp) {
platform_device_unregister(internal_esp);
internal_esp = NULL;
if (internal_pdev) {
platform_device_unregister(internal_pdev);
internal_pdev = NULL;
}
if (external_esp) {
platform_device_unregister(external_esp);
external_esp = NULL;
if (external_pdev) {
platform_device_unregister(external_pdev);
external_pdev = NULL;
}
}

Просмотреть файл

@ -3401,8 +3401,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
data->NumAddress = pci_resource_len (pdev, 0);
data->MmioAddress = ioremap_nocache(pci_resource_start(pdev, 1),
pci_resource_len (pdev, 1));
data->MmioAddress = pci_ioremap_bar(pdev, 1);
data->MmioLength = pci_resource_len (pdev, 1);
pci_set_master(pdev);

Просмотреть файл

@ -4294,8 +4294,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = -ENODEV;
#if MEMORY_MAPPED_IO
ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1),
pci_resource_len(ha->pdev, 1));
ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
if (!ha->mmpbase) {
printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
goto error_free_response_ring;

Просмотреть файл

@ -19,8 +19,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (ha->fw_dump_reading == 0)
return 0;
@ -34,8 +35,9 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
int reading;
if (off != 0)
@ -48,7 +50,7 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
break;
qla_printk(KERN_INFO, ha,
"Firmware dump cleared on (%ld).\n", ha->host_no);
"Firmware dump cleared on (%ld).\n", vha->host_no);
ha->fw_dump_reading = 0;
ha->fw_dumped = 0;
@ -59,14 +61,14 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
qla_printk(KERN_INFO, ha,
"Raw firmware dump ready for read on (%ld).\n",
ha->host_no);
vha->host_no);
}
break;
case 2:
qla2x00_alloc_fw_dump(ha);
qla2x00_alloc_fw_dump(vha);
break;
case 3:
qla2x00_system_error(ha);
qla2x00_system_error(vha);
break;
}
return (count);
@ -87,8 +89,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN))
return 0;
@ -103,8 +106,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
uint16_t cnt;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
@ -134,11 +138,11 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
}
/* Write NVRAM. */
ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base,
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
count);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
return (count);
}
@ -158,8 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (ha->optrom_state != QLA_SREADING)
return 0;
@ -173,8 +178,9 @@ qla2x00_sysfs_write_optrom(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (ha->optrom_state != QLA_SWRITING)
return -EINVAL;
@ -203,8 +209,10 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
uint32_t start = 0;
uint32_t size = ha->optrom_size;
int val, valid;
@ -262,7 +270,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size));
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
ha->isp_ops->read_optrom(ha, ha->optrom_buffer,
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
break;
case 2:
@ -333,7 +341,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
"Writing flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
ha->isp_ops->write_optrom(ha, ha->optrom_buffer,
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
break;
default:
@ -356,8 +364,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN))
return 0;
@ -371,15 +380,16 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
return 0;
/* Write NVRAM. */
ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count);
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
return count;
}
@ -399,8 +409,9 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
uint16_t iter, addr, offset;
int rval;
@ -429,7 +440,7 @@ do_read:
offset = 0;
}
rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset,
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
SFP_BLOCK_SIZE);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
@ -469,30 +480,31 @@ static struct sysfs_entry {
};
void
qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
{
struct Scsi_Host *host = ha->host;
struct Scsi_Host *host = vha->host;
struct sysfs_entry *iter;
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
qla_printk(KERN_INFO, ha,
qla_printk(KERN_INFO, vha->hw,
"Unable to create sysfs %s binary attribute "
"(%d).\n", iter->name, ret);
}
}
void
qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
{
struct Scsi_Host *host = ha->host;
struct Scsi_Host *host = vha->host;
struct sysfs_entry *iter;
struct qla_hw_data *ha = vha->hw;
for (iter = bin_file_entries; iter->name; iter++) {
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
@ -503,7 +515,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
}
if (ha->beacon_blink_led == 1)
ha->isp_ops->beacon_off(ha);
ha->isp_ops->beacon_off(vha);
}
/* Scsi_Host attributes. */
@ -519,22 +531,24 @@ static ssize_t
qla2x00_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
char fw_str[30];
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
char fw_str[128];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops->fw_version_str(ha, fw_str));
ha->isp_ops->fw_version_str(vha, fw_str));
}
static ssize_t
qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
uint32_t sn;
if (IS_FWI2_CAPABLE(ha)) {
qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", buf);
}
@ -547,15 +561,16 @@ static ssize_t
qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device);
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
}
static ssize_t
qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
@ -565,43 +580,44 @@ static ssize_t
qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
}
static ssize_t
qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->model_desc ? ha->model_desc: "");
vha->hw->model_desc ? vha->hw->model_desc : "");
}
static ssize_t
qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
char pci_info[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops->pci_info_str(ha, pci_info));
vha->hw->isp_ops->pci_info_str(vha, pci_info));
}
static ssize_t
qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
int len = 0;
if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
atomic_read(&ha->loop_state) == LOOP_DEAD)
if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
atomic_read(&vha->loop_state) == LOOP_DEAD)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&ha->loop_state) != LOOP_READY ||
test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@ -632,10 +648,10 @@ static ssize_t
qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int len = 0;
switch (ha->zio_mode) {
switch (vha->hw->zio_mode) {
case QLA_ZIO_MODE_6:
len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
break;
@ -650,7 +666,8 @@ static ssize_t
qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
int val = 0;
uint16_t zio_mode;
@ -668,7 +685,7 @@ qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
/* Update per-hba values and queue a reset. */
if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = zio_mode;
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return strlen(buf);
}
@ -677,16 +694,16 @@ static ssize_t
qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d us\n", ha->zio_timer * 100);
return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
}
static ssize_t
qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int val = 0;
uint16_t zio_timer;
@ -696,7 +713,7 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
return -ERANGE;
zio_timer = (uint16_t)(val / 100);
ha->zio_timer = zio_timer;
vha->hw->zio_timer = zio_timer;
return strlen(buf);
}
@ -705,10 +722,10 @@ static ssize_t
qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int len = 0;
if (ha->beacon_blink_led)
if (vha->hw->beacon_blink_led)
len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
else
len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
@ -719,14 +736,15 @@ static ssize_t
qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
int val = 0;
int rval;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) {
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
@ -736,9 +754,9 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (val)
rval = ha->isp_ops->beacon_on(ha);
rval = ha->isp_ops->beacon_on(vha);
else
rval = ha->isp_ops->beacon_off(ha);
rval = ha->isp_ops->beacon_off(vha);
if (rval != QLA_SUCCESS)
count = 0;
@ -750,8 +768,8 @@ static ssize_t
qla2x00_optrom_bios_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
ha->bios_revision[0]);
}
@ -760,8 +778,8 @@ static ssize_t
qla2x00_optrom_efi_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
ha->efi_revision[0]);
}
@ -770,8 +788,8 @@ static ssize_t
qla2x00_optrom_fcode_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
ha->fcode_revision[0]);
}
@ -780,8 +798,8 @@ static ssize_t
qla2x00_optrom_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
ha->fw_revision[3]);
@ -791,8 +809,8 @@ static ssize_t
qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d\n",
ha->qla_stats.total_isp_aborts);
}
@ -848,16 +866,17 @@ struct device_attribute *qla2x00_host_attrs[] = {
static void
qla2x00_get_host_port_id(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *vha = shost_priv(shost);
fc_host_port_id(shost) = ha->d_id.b.domain << 16 |
ha->d_id.b.area << 8 | ha->d_id.b.al_pa;
fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
}
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
struct qla_hw_data *ha = ((struct scsi_qla_host *)
(shost_priv(shost)))->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
switch (ha->link_data_rate) {
@ -880,14 +899,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *vha = shost_priv(shost);
uint32_t port_type = FC_PORTTYPE_UNKNOWN;
if (ha->parent) {
if (vha->vp_idx) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
return;
}
switch (ha->current_topology) {
switch (vha->hw->current_topology) {
case ISP_CFG_NL:
port_type = FC_PORTTYPE_LPORT;
break;
@ -908,11 +927,11 @@ static void
qla2x00_get_starget_node_name(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
scsi_qla_host_t *ha = shost_priv(host);
scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport;
u64 node_name = 0;
list_for_each_entry(fcport, &ha->fcports, list) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->rport &&
starget->id == fcport->rport->scsi_target_id) {
node_name = wwn_to_u64(fcport->node_name);
@ -927,11 +946,11 @@ static void
qla2x00_get_starget_port_name(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
scsi_qla_host_t *ha = shost_priv(host);
scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport;
u64 port_name = 0;
list_for_each_entry(fcport, &ha->fcports, list) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->rport &&
starget->id == fcport->rport->scsi_target_id) {
port_name = wwn_to_u64(fcport->port_name);
@ -946,11 +965,11 @@ static void
qla2x00_get_starget_port_id(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
scsi_qla_host_t *ha = shost_priv(host);
scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport;
uint32_t port_id = ~0U;
list_for_each_entry(fcport, &ha->fcports, list) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->rport &&
starget->id == fcport->rport->scsi_target_id) {
port_id = fcport->d_id.b.domain << 16 |
@ -999,9 +1018,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* final cleanup of firmware resources (PCBs and XCBs).
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
}
@ -1011,16 +1030,18 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
static int
qla2x00_issue_lip(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *vha = shost_priv(shost);
qla2x00_loop_reset(ha);
qla2x00_loop_reset(vha);
return 0;
}
static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
scsi_qla_host_t *vha = shost_priv(shost);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int rval;
struct link_statistics *stats;
dma_addr_t stats_dma;
@ -1032,21 +1053,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
__func__, ha->host_no));
__func__, base_vha->host_no));
goto done;
}
memset(stats, 0, DMA_POOL_SIZE);
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
rval = qla24xx_get_isp_stats(ha, stats, stats_dma);
} else if (atomic_read(&ha->loop_state) == LOOP_READY &&
!test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
!test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
!test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!ha->dpc_active) {
/* Must be in a 'READY' state for statistics retrieval. */
rval = qla2x00_get_link_status(ha, ha->loop_id, stats,
stats_dma);
rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
stats, stats_dma);
}
if (rval != QLA_SUCCESS)
@ -1077,29 +1098,29 @@ done:
static void
qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *vha = shost_priv(shost);
qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost));
qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
}
static void
qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *vha = shost_priv(shost);
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
}
static void
qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *vha = shost_priv(shost);
u64 node_name;
if (ha->device_flags & SWITCH_FOUND)
node_name = wwn_to_u64(ha->fabric_node_name);
if (vha->device_flags & SWITCH_FOUND)
node_name = wwn_to_u64(vha->fabric_node_name);
else
node_name = wwn_to_u64(ha->node_name);
node_name = wwn_to_u64(vha->node_name);
fc_host_fabric_name(shost) = node_name;
}
@ -1107,11 +1128,12 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_state(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
if (!ha->flags.online)
if (!base_vha->flags.online)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT)
else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
else
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
@ -1121,8 +1143,11 @@ static int
qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
{
int ret = 0;
scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha;
int cnt = 0;
uint8_t qos = QLA_DEFAULT_QUE_QOS;
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
scsi_qla_host_t *vha = NULL;
struct qla_hw_data *ha = base_vha->hw;
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@ -1144,18 +1169,19 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
atomic_set(&vha->vp_state, VP_FAILED);
/* ready to create vport */
qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx);
qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
vha->vp_idx);
/* initialized vport states */
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->vp_err_state= VP_ERR_PORTDWN;
vha->vp_prev_err_state= VP_ERR_UNKWN;
/* Check if physical ha port is Up */
if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
atomic_read(&ha->loop_state) == LOOP_DEAD) {
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
/* Don't retry or attempt login of this virtual port */
DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
vha->host_no));
base_vha->host_no));
atomic_set(&vha->loop_state, LOOP_DEAD);
if (!disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@ -1171,18 +1197,32 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
fc_host_supported_classes(vha->host) =
fc_host_supported_classes(ha->host);
fc_host_supported_classes(base_vha->host);
fc_host_supported_speeds(vha->host) =
fc_host_supported_speeds(ha->host);
fc_host_supported_speeds(base_vha->host);
qla24xx_vport_disable(fc_vport, disable);
/* Create a queue pair for the vport */
if (ha->mqenable) {
if (ha->npiv_info) {
for (; cnt < ha->nvram_npiv_size; cnt++) {
if (ha->npiv_info[cnt].port_name ==
vha->port_name &&
ha->npiv_info[cnt].node_name ==
vha->node_name) {
qos = ha->npiv_info[cnt].q_qos;
break;
}
}
}
qla25xx_create_queues(vha, qos);
}
return 0;
vport_create_failed_2:
qla24xx_disable_vp(vha);
qla24xx_deallocate_vp_id(vha);
kfree(vha->port_name);
kfree(vha->node_name);
scsi_host_put(vha->host);
return FC_VPORT_FAILED;
}
@ -1191,17 +1231,34 @@ static int
qla24xx_vport_delete(struct fc_vport *fc_vport)
{
scsi_qla_host_t *vha = fc_vport->dd_data;
scsi_qla_host_t *pha = to_qla_parent(vha);
fc_port_t *fcport, *tfcport;
struct qla_hw_data *ha = vha->hw;
uint16_t id = vha->vp_idx;
while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
qla24xx_disable_vp(vha);
qla24xx_deallocate_vp_id(vha);
if (ha->mqenable) {
if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
"Queue delete failed.\n");
vha->req_ques[0] = ha->req_q_map[0]->id;
}
kfree(vha->node_name);
kfree(vha->port_name);
qla24xx_disable_vp(vha);
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
list_del(&fcport->list);
kfree(fcport);
fcport = NULL;
}
qla24xx_deallocate_vp_id(vha);
if (vha->timer_active) {
qla2x00_vp_stop_timer(vha);
@ -1210,12 +1267,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->host_no, vha->vp_idx, vha));
}
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
scsi_host_put(vha->host);
qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
return 0;
}
@ -1318,15 +1371,16 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
};
void
qla2x00_init_host_attr(scsi_qla_host_t *ha)
qla2x00_init_host_attr(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;;
fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@ -1338,5 +1392,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(ha->host) = speed;
fc_host_supported_speeds(vha->host) = speed;
}

Просмотреть файл

@ -9,7 +9,7 @@
#include <linux/delay.h>
static inline void
qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
{
fw_dump->fw_major_version = htonl(ha->fw_major_version);
fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
@ -23,22 +23,24 @@ qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
}
static inline void *
qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
{
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
/* Request queue. */
memcpy(ptr, ha->request_ring, ha->request_q_length *
memcpy(ptr, req->ring, req->length *
sizeof(request_t));
/* Response queue. */
ptr += ha->request_q_length * sizeof(request_t);
memcpy(ptr, ha->response_ring, ha->response_q_length *
ptr += req->length * sizeof(request_t);
memcpy(ptr, rsp->ring, rsp->length *
sizeof(response_t));
return ptr + (ha->response_q_length * sizeof(response_t));
return ptr + (rsp->length * sizeof(response_t));
}
static int
qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t ram_dwords, void **nxt)
{
int rval;
@ -112,7 +114,7 @@ qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
}
static int
qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
uint32_t cram_size, void **nxt)
{
int rval;
@ -163,7 +165,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
}
static int
qla24xx_soft_reset(scsi_qla_host_t *ha)
qla24xx_soft_reset(struct qla_hw_data *ha)
{
int rval = QLA_SUCCESS;
uint32_t cnt;
@ -215,8 +217,8 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
}
static int
qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
uint32_t ram_words, void **nxt)
qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
uint16_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
@ -314,16 +316,17 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
* @hardware_locked: Called with the hardware_lock
*/
void
qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2300_fw_dump *fw;
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
flags = 0;
@ -468,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
ha->host_no, ha->fw_dump);
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@ -483,16 +486,18 @@ qla2300_fw_dump_failed:
* @hardware_locked: Called with the hardware_lock
*/
void
qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt, timer;
uint16_t risc_address;
uint16_t mb0, mb2;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2100_fw_dump *fw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = 0;
mb0 = mb2 = 0;
@ -673,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
ha->host_no, ha->fw_dump);
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@ -683,12 +688,12 @@ qla2100_fw_dump_failed:
}
void
qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
uint32_t risc_address;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
uint32_t *iter_reg;
@ -697,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = ext_mem_cnt = 0;
flags = 0;
@ -919,7 +925,7 @@ qla24xx_fw_dump_failed_0:
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
ha->host_no, ha->fw_dump);
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@ -929,13 +935,14 @@ qla24xx_fw_dump_failed:
}
void
qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
uint32_t risc_address;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct device_reg_25xxmq __iomem *reg25;
uint32_t __iomem *dmp_reg;
uint32_t *iter_reg;
uint16_t __iomem *mbx_reg;
@ -944,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
uint32_t ext_mem_cnt;
void *nxt;
struct qla2xxx_fce_chain *fcec;
struct qla2xxx_mq_chain *mq = NULL;
uint32_t qreg_size;
uint8_t req_cnt, rsp_cnt, que_cnt;
uint32_t que_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = ext_mem_cnt = 0;
flags = 0;
@ -988,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
/* Multi queue registers */
if (ha->mqenable) {
qreg_size = sizeof(struct qla2xxx_mq_chain);
mq = kzalloc(qreg_size, GFP_KERNEL);
if (!mq)
goto qla25xx_fw_dump_failed_0;
req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
mq->count = htonl(que_cnt);
mq->chain_size = htonl(qreg_size);
mq->type = __constant_htonl(DUMP_CHAIN_MQ);
for (cnt = 0; cnt < que_cnt; cnt++) {
reg25 = (struct device_reg_25xxmq *) ((void *)
ha->mqiobase + cnt * QLA_QUE_PAGE);
que_idx = cnt * 4;
mq->qregs[que_idx] = htonl(reg25->req_q_in);
mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
}
}
WRT_REG_DWORD(&reg->iobase_window, 0x00);
RD_REG_DWORD(&reg->iobase_window);
@ -1225,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
fcec = nxt + ntohl(ha->fw_dump->eft_size);
if (ha->mqenable) {
nxt = nxt + ntohl(ha->fw_dump->eft_size);
memcpy(nxt, mq, qreg_size);
kfree(mq);
fcec = nxt + qreg_size;
} else {
fcec = nxt + ntohl(ha->fw_dump->eft_size);
}
fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
fce_calc_size(ha->fce_bufs));
@ -1248,7 +1290,7 @@ qla25xx_fw_dump_failed_0:
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
ha->host_no, ha->fw_dump);
base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@ -1256,15 +1298,15 @@ qla25xx_fw_dump_failed:
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
void
qla2x00_dump_regs(scsi_qla_host_t *ha)
qla2x00_dump_regs(scsi_qla_host_t *vha)
{
int i;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
uint16_t __iomem *mbx_reg;
@ -1274,7 +1316,7 @@ qla2x00_dump_regs(scsi_qla_host_t *ha)
printk("Mailbox registers:\n");
for (i = 0; i < 6; i++)
printk("scsi(%ld): mbox %d 0x%04x \n", ha->host_no, i,
printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
RD_REG_WORD(mbx_reg++));
}
@ -1302,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
if (cnt % 16)
printk("\n");
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше