Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (170 commits) [SCSI] scsi_dh_rdac: Add MD36xxf into device list [SCSI] scsi_debug: add consecutive medium errors [SCSI] libsas: fix ata list corruption issue [SCSI] hpsa: export resettable host attribute [SCSI] hpsa: move device attributes to avoid forward declarations [SCSI] scsi_debug: Logical Block Provisioning (SBC3r26) [SCSI] sd: Logical Block Provisioning update [SCSI] Include protection operation in SCSI command trace [SCSI] hpsa: fix incorrect PCI IDs and add two new ones (2nd try) [SCSI] target: Fix volume size misreporting for volumes > 2TB [SCSI] bnx2fc: Broadcom FCoE offload driver [SCSI] fcoe: fix broken fcoe interface reset [SCSI] fcoe: precedence bug in fcoe_filter_frames() [SCSI] libfcoe: Remove stale fcoe-netdev entries [SCSI] libfcoe: Move FCOE_MTU definition from fcoe.h to libfcoe.h [SCSI] libfc: introduce __fc_fill_fc_hdr that accepts fc_hdr as an argument [SCSI] fcoe, libfc: initialize EM anchors list and then update npiv EMs [SCSI] Revert "[SCSI] libfc: fix exchange being deleted when the abort itself is timed out" [SCSI] libfc: Fixing a memory leak when destroying an interface [SCSI] megaraid_sas: Version and Changelog update ... Fix up trivial conflicts due to whitespace differences in drivers/scsi/libsas/{sas_ata.c,sas_scsi_host.c}
This commit is contained in:
Коммит
c55d267de2
|
@ -1,3 +1,26 @@
|
|||
Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Adam Radford
|
||||
Current Version : 00.00.05.34-rc1
|
||||
Old Version : 00.00.05.29-rc1
|
||||
1. Fix some failure gotos from megasas_probe_one(), etc.
|
||||
2. Add missing check_and_restore_queue_depth() call in
|
||||
complete_cmd_fusion().
|
||||
3. Enable MSI-X before calling megasas_init_fw().
|
||||
4. Call tasklet_schedule() even if outbound_intr_status == 0 for MFI based
|
||||
boards in MSI-X mode.
|
||||
5. Fix megasas_probe_one() to clear PCI_MSIX_FLAGS_ENABLE in msi control
|
||||
register in kdump kernel.
|
||||
6. Fix megasas_get_cmd() to only print "Command pool empty" if
|
||||
megasas_dbg_lvl is set.
|
||||
7. Fix megasas_build_dcdb_fusion() to not filter by TYPE_DISK.
|
||||
8. Fix megasas_build_dcdb_fusion() to use io_request->LUN[1] field.
|
||||
9. Add MR_EVT_CFG_CLEARED to megasas_aen_polling().
|
||||
10. Fix tasklet_init() in megasas_init_fw() to use instancet->tasklet.
|
||||
11. Fix fault state handling in megasas_transition_to_ready().
|
||||
12. Fix max_sectors setting for IEEE SGL's.
|
||||
13. Fix iMR OCR support to work correctly.
|
||||
-------------------------------------------------------------------------------
|
||||
Release Date : Tues. Dec 14, 2010 17:00:00 PST 2010 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Adam Radford
|
||||
|
|
|
@ -28,6 +28,12 @@ boot parameter "hpsa_allow_any=1" is specified, however these are not tested
|
|||
nor supported by HP with this driver. For older Smart Arrays, the cciss
|
||||
driver should still be used.
|
||||
|
||||
The "hpsa_simple_mode=1" boot parameter may be used to prevent the driver from
|
||||
putting the controller into "performant" mode. The difference is that with simple
|
||||
mode, each command completion requires an interrupt, while with "performant mode"
|
||||
(the default, and ordinarily better performing) it is possible to have multiple
|
||||
command completions indicated by a single interrupt.
|
||||
|
||||
HPSA specific entries in /sys
|
||||
-----------------------------
|
||||
|
||||
|
@ -39,6 +45,8 @@ HPSA specific entries in /sys
|
|||
|
||||
/sys/class/scsi_host/host*/rescan
|
||||
/sys/class/scsi_host/host*/firmware_revision
|
||||
/sys/class/scsi_host/host*/resettable
|
||||
/sys/class/scsi_host/host*/transport_mode
|
||||
|
||||
the host "rescan" attribute is a write only attribute. Writing to this
|
||||
attribute will cause the driver to scan for new, changed, or removed devices
|
||||
|
@ -55,6 +63,21 @@ HPSA specific entries in /sys
|
|||
root@host:/sys/class/scsi_host/host4# cat firmware_revision
|
||||
7.14
|
||||
|
||||
The transport_mode indicates whether the controller is in "performant"
|
||||
or "simple" mode. This is controlled by the "hpsa_simple_mode" module
|
||||
parameter.
|
||||
|
||||
The "resettable" read-only attribute indicates whether a particular
|
||||
controller is able to honor the "reset_devices" kernel parameter. If the
|
||||
device is resettable, this file will contain a "1", otherwise, a "0". This
|
||||
parameter is used by kdump, for example, to reset the controller at driver
|
||||
load time to eliminate any outstanding commands on the controller and get the
|
||||
controller into a known state so that the kdump initiated i/o will work right
|
||||
and not be disrupted in any way by stale commands or other stale state
|
||||
remaining on the controller from the previous kernel. This attribute enables
|
||||
kexec tools to warn the user if they attempt to designate a device which is
|
||||
unable to honor the reset_devices kernel parameter as a dump device.
|
||||
|
||||
HPSA specific disk attributes:
|
||||
------------------------------
|
||||
|
||||
|
|
|
@ -1343,7 +1343,7 @@ Members of interest:
|
|||
underruns (overruns should be rare). If possible an LLD
|
||||
should set 'resid' prior to invoking 'done'. The most
|
||||
interesting case is data transfers from a SCSI target
|
||||
device device (i.e. READs) that underrun.
|
||||
device (e.g. READs) that underrun.
|
||||
underflow - LLD should place (DID_ERROR << 16) in 'result' if
|
||||
actual number of bytes transferred is less than this
|
||||
figure. Not many LLDs implement this check and some that
|
||||
|
@ -1351,6 +1351,18 @@ Members of interest:
|
|||
report a DID_ERROR. Better for an LLD to implement
|
||||
'resid'.
|
||||
|
||||
It is recommended that a LLD set 'resid' on data transfers from a SCSI
|
||||
target device (e.g. READs). It is especially important that 'resid' is set
|
||||
when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
|
||||
(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
|
||||
data has been received then the safest approach is to indicate no bytes have
|
||||
been received. For example: to indicate that no valid data has been received
|
||||
a LLD might use these helpers:
|
||||
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
|
||||
where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
|
||||
bytes blocks has been received 'resid' could be set like this:
|
||||
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
|
||||
|
||||
The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
|
||||
|
||||
|
||||
|
|
|
@ -5359,8 +5359,7 @@ S: Supported
|
|||
F: drivers/s390/crypto/
|
||||
|
||||
S390 ZFCP DRIVER
|
||||
M: Christof Schmitt <christof.schmitt@de.ibm.com>
|
||||
M: Swen Schillig <swen@vnet.ibm.com>
|
||||
M: Steffen Maier <maier@linux.vnet.ibm.com>
|
||||
M: linux390@de.ibm.com
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
|
|
@ -2045,9 +2045,26 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||
|
||||
if (error && req->cmd_type == REQ_TYPE_FS &&
|
||||
!(req->cmd_flags & REQ_QUIET)) {
|
||||
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
|
||||
req->rq_disk ? req->rq_disk->disk_name : "?",
|
||||
(unsigned long long)blk_rq_pos(req));
|
||||
char *error_type;
|
||||
|
||||
switch (error) {
|
||||
case -ENOLINK:
|
||||
error_type = "recoverable transport";
|
||||
break;
|
||||
case -EREMOTEIO:
|
||||
error_type = "critical target";
|
||||
break;
|
||||
case -EBADE:
|
||||
error_type = "critical nexus";
|
||||
break;
|
||||
case -EIO:
|
||||
default:
|
||||
error_type = "I/O";
|
||||
break;
|
||||
}
|
||||
printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
|
||||
error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
|
||||
(unsigned long long)blk_rq_pos(req));
|
||||
}
|
||||
|
||||
blk_account_io_completion(req, nr_bytes);
|
||||
|
|
|
@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
|
|||
stats->custom[3].value = conn->fmr_unalign_cnt;
|
||||
}
|
||||
|
||||
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
|
||||
enum iscsi_param param, char *buf)
|
||||
{
|
||||
struct iser_conn *ib_conn = ep->dd_data;
|
||||
int len;
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
case ISCSI_PARAM_CONN_ADDRESS:
|
||||
if (!ib_conn || !ib_conn->cma_id)
|
||||
return -ENOTCONN;
|
||||
|
||||
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
|
||||
&ib_conn->cma_id->route.addr.dst_addr,
|
||||
param, buf);
|
||||
break;
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static struct iscsi_endpoint *
|
||||
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||
int non_blocking)
|
||||
|
@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = {
|
|||
ISCSI_MAX_BURST |
|
||||
ISCSI_PDU_INORDER_EN |
|
||||
ISCSI_DATASEQ_INORDER_EN |
|
||||
ISCSI_CONN_PORT |
|
||||
ISCSI_CONN_ADDRESS |
|
||||
ISCSI_EXP_STATSN |
|
||||
ISCSI_PERSISTENT_PORT |
|
||||
ISCSI_PERSISTENT_ADDRESS |
|
||||
|
@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = {
|
|||
.destroy_conn = iscsi_iser_conn_destroy,
|
||||
.set_param = iscsi_iser_set_param,
|
||||
.get_conn_param = iscsi_conn_get_param,
|
||||
.get_ep_param = iscsi_iser_get_ep_param,
|
||||
.get_session_param = iscsi_session_get_param,
|
||||
.start_conn = iscsi_iser_conn_start,
|
||||
.stop_conn = iscsi_iser_conn_stop,
|
||||
|
|
|
@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone,
|
|||
if (!error && !clone->errors)
|
||||
return 0; /* I/O complete */
|
||||
|
||||
if (error == -EOPNOTSUPP)
|
||||
return error;
|
||||
|
||||
if (clone->cmd_flags & REQ_DISCARD)
|
||||
/*
|
||||
* Pass all discard request failures up.
|
||||
* FIXME: only fail_path if the discard failed due to a
|
||||
* transport problem. This requires precise understanding
|
||||
* of the underlying failure (e.g. the SCSI sense).
|
||||
*/
|
||||
if (error == -EOPNOTSUPP || error == -EREMOTEIO)
|
||||
return error;
|
||||
|
||||
if (mpio->pgpath)
|
||||
fail_path(mpio->pgpath);
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
|
||||
r = -EIO;
|
||||
if (!m->nr_valid_paths) {
|
||||
if (!m->queue_if_no_path) {
|
||||
if (!__must_push_back(m))
|
||||
r = -EIO;
|
||||
} else {
|
||||
if (error == -EBADE)
|
||||
r = error;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
|
|||
#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
|
||||
#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
|
||||
#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
|
||||
#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A)
|
||||
|
||||
/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
|
||||
|
||||
|
|
|
@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
|
|||
#define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03)
|
||||
#define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08)
|
||||
#define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09)
|
||||
#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A)
|
||||
|
||||
/* SAS Discovery Event data */
|
||||
|
||||
|
|
|
@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
|
|||
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS PHY Link Status: Phy=%d:"
|
||||
" Rate 3.0 Gpbs",PhyNumber);
|
||||
" Rate 3.0 Gbps", PhyNumber);
|
||||
break;
|
||||
case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS PHY Link Status: Phy=%d:"
|
||||
" Rate 6.0 Gbps", PhyNumber);
|
||||
break;
|
||||
default:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
|
|
|
@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
|
|||
else
|
||||
karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
|
||||
|
||||
if (karg->hdr.port > 1)
|
||||
if (karg->hdr.port > 1) {
|
||||
kfree(karg);
|
||||
return -EINVAL;
|
||||
}
|
||||
port = karg->hdr.port;
|
||||
|
||||
karg->port = port;
|
||||
|
|
|
@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = {
|
|||
.change_queue_depth = mptscsih_change_queue_depth,
|
||||
.eh_abort_handler = mptscsih_abort,
|
||||
.eh_device_reset_handler = mptscsih_dev_reset,
|
||||
.eh_bus_reset_handler = mptscsih_bus_reset,
|
||||
.eh_host_reset_handler = mptscsih_host_reset,
|
||||
.bios_param = mptscsih_bios_param,
|
||||
.can_queue = MPT_SAS_CAN_QUEUE,
|
||||
|
@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev,
|
|||
case MPI_SAS_IOUNIT0_RATE_3_0:
|
||||
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
|
||||
break;
|
||||
case MPI_SAS_IOUNIT0_RATE_6_0:
|
||||
phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
|
||||
break;
|
||||
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
|
||||
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
|
||||
default:
|
||||
|
@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
|
|||
}
|
||||
|
||||
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
|
||||
link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
|
||||
link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
|
||||
link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
|
||||
|
||||
if (!port_info) {
|
||||
if (ioc->old_sas_discovery_protocal) {
|
||||
|
|
|
@ -145,13 +145,6 @@ static struct {
|
|||
{ "Broadcom NetXtreme II BCM57712E XGb" }
|
||||
};
|
||||
|
||||
#ifndef PCI_DEVICE_ID_NX2_57712
|
||||
#define PCI_DEVICE_ID_NX2_57712 0x1662
|
||||
#endif
|
||||
#ifndef PCI_DEVICE_ID_NX2_57712E
|
||||
#define PCI_DEVICE_ID_NX2_57712E 0x1663
|
||||
#endif
|
||||
|
||||
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
|
||||
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
|
||||
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
|
||||
|
|
|
@ -122,36 +122,21 @@ static int __init zfcp_module_init(void)
|
|||
{
|
||||
int retval = -ENOMEM;
|
||||
|
||||
zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
|
||||
sizeof(struct zfcp_fc_gpn_ft_req));
|
||||
if (!zfcp_data.gpn_ft_cache)
|
||||
goto out;
|
||||
|
||||
zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
|
||||
sizeof(struct fsf_qtcb));
|
||||
if (!zfcp_data.qtcb_cache)
|
||||
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
|
||||
sizeof(struct fsf_qtcb));
|
||||
if (!zfcp_fsf_qtcb_cache)
|
||||
goto out_qtcb_cache;
|
||||
|
||||
zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
|
||||
sizeof(struct fsf_status_read_buffer));
|
||||
if (!zfcp_data.sr_buffer_cache)
|
||||
goto out_sr_cache;
|
||||
zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
|
||||
sizeof(struct zfcp_fc_req));
|
||||
if (!zfcp_fc_req_cache)
|
||||
goto out_fc_cache;
|
||||
|
||||
zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
|
||||
sizeof(struct zfcp_fc_gid_pn));
|
||||
if (!zfcp_data.gid_pn_cache)
|
||||
goto out_gid_cache;
|
||||
|
||||
zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
|
||||
sizeof(struct zfcp_fc_els_adisc));
|
||||
if (!zfcp_data.adisc_cache)
|
||||
goto out_adisc_cache;
|
||||
|
||||
zfcp_data.scsi_transport_template =
|
||||
zfcp_scsi_transport_template =
|
||||
fc_attach_transport(&zfcp_transport_functions);
|
||||
if (!zfcp_data.scsi_transport_template)
|
||||
if (!zfcp_scsi_transport_template)
|
||||
goto out_transport;
|
||||
scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
|
||||
scsi_transport_reserve_device(zfcp_scsi_transport_template,
|
||||
sizeof(struct zfcp_scsi_dev));
|
||||
|
||||
|
||||
|
@ -175,18 +160,12 @@ static int __init zfcp_module_init(void)
|
|||
out_ccw_register:
|
||||
misc_deregister(&zfcp_cfdc_misc);
|
||||
out_misc:
|
||||
fc_release_transport(zfcp_data.scsi_transport_template);
|
||||
fc_release_transport(zfcp_scsi_transport_template);
|
||||
out_transport:
|
||||
kmem_cache_destroy(zfcp_data.adisc_cache);
|
||||
out_adisc_cache:
|
||||
kmem_cache_destroy(zfcp_data.gid_pn_cache);
|
||||
out_gid_cache:
|
||||
kmem_cache_destroy(zfcp_data.sr_buffer_cache);
|
||||
out_sr_cache:
|
||||
kmem_cache_destroy(zfcp_data.qtcb_cache);
|
||||
kmem_cache_destroy(zfcp_fc_req_cache);
|
||||
out_fc_cache:
|
||||
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
|
||||
out_qtcb_cache:
|
||||
kmem_cache_destroy(zfcp_data.gpn_ft_cache);
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void)
|
|||
{
|
||||
ccw_driver_unregister(&zfcp_ccw_driver);
|
||||
misc_deregister(&zfcp_cfdc_misc);
|
||||
fc_release_transport(zfcp_data.scsi_transport_template);
|
||||
kmem_cache_destroy(zfcp_data.adisc_cache);
|
||||
kmem_cache_destroy(zfcp_data.gid_pn_cache);
|
||||
kmem_cache_destroy(zfcp_data.sr_buffer_cache);
|
||||
kmem_cache_destroy(zfcp_data.qtcb_cache);
|
||||
kmem_cache_destroy(zfcp_data.gpn_ft_cache);
|
||||
fc_release_transport(zfcp_scsi_transport_template);
|
||||
kmem_cache_destroy(zfcp_fc_req_cache);
|
||||
kmem_cache_destroy(zfcp_fsf_qtcb_cache);
|
||||
}
|
||||
|
||||
module_exit(zfcp_module_exit);
|
||||
|
@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
|
|||
return -ENOMEM;
|
||||
|
||||
adapter->pool.qtcb_pool =
|
||||
mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
|
||||
mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
|
||||
if (!adapter->pool.qtcb_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
adapter->pool.status_read_data =
|
||||
mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
|
||||
zfcp_data.sr_buffer_cache);
|
||||
if (!adapter->pool.status_read_data)
|
||||
BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
|
||||
adapter->pool.sr_data =
|
||||
mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
|
||||
if (!adapter->pool.sr_data)
|
||||
return -ENOMEM;
|
||||
|
||||
adapter->pool.gid_pn =
|
||||
mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
|
||||
mempool_create_slab_pool(1, zfcp_fc_req_cache);
|
||||
if (!adapter->pool.gid_pn)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
|
|||
mempool_destroy(adapter->pool.qtcb_pool);
|
||||
if (adapter->pool.status_read_req)
|
||||
mempool_destroy(adapter->pool.status_read_req);
|
||||
if (adapter->pool.status_read_data)
|
||||
mempool_destroy(adapter->pool.status_read_data);
|
||||
if (adapter->pool.sr_data)
|
||||
mempool_destroy(adapter->pool.sr_data);
|
||||
if (adapter->pool.gid_pn)
|
||||
mempool_destroy(adapter->pool.gid_pn);
|
||||
}
|
||||
|
@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
|
|||
|
||||
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
|
||||
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
|
||||
INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
|
||||
|
||||
if (zfcp_qdio_setup(adapter))
|
||||
goto failed;
|
||||
|
@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
|
|||
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
|
||||
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
|
||||
|
||||
if (!zfcp_adapter_scsi_register(adapter))
|
||||
if (!zfcp_scsi_adapter_register(adapter))
|
||||
return adapter;
|
||||
|
||||
failed:
|
||||
|
@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
|
|||
|
||||
cancel_work_sync(&adapter->scan_work);
|
||||
cancel_work_sync(&adapter->stat_work);
|
||||
cancel_work_sync(&adapter->ns_up_work);
|
||||
zfcp_destroy_adapter_work_queue(adapter);
|
||||
|
||||
zfcp_fc_wka_ports_force_offline(adapter->gs);
|
||||
zfcp_adapter_scsi_unregister(adapter);
|
||||
zfcp_scsi_adapter_unregister(adapter);
|
||||
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
|
||||
|
||||
zfcp_erp_thread_kill(adapter);
|
||||
|
|
|
@ -89,7 +89,6 @@ struct zfcp_reqlist;
|
|||
#define ZFCP_STATUS_LUN_READONLY 0x00000008
|
||||
|
||||
/* FSF request status (this does not have a common part) */
|
||||
#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
|
||||
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
|
||||
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
|
||||
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
|
||||
|
@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
|
|||
mempool_t *scsi_req;
|
||||
mempool_t *scsi_abort;
|
||||
mempool_t *status_read_req;
|
||||
mempool_t *status_read_data;
|
||||
mempool_t *sr_data;
|
||||
mempool_t *gid_pn;
|
||||
mempool_t *qtcb_pool;
|
||||
};
|
||||
|
@ -190,6 +189,7 @@ struct zfcp_adapter {
|
|||
struct fsf_qtcb_bottom_port *stats_reset_data;
|
||||
unsigned long stats_reset;
|
||||
struct work_struct scan_work;
|
||||
struct work_struct ns_up_work;
|
||||
struct service_level service_level;
|
||||
struct workqueue_struct *work_queue;
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
@ -314,15 +314,4 @@ struct zfcp_fsf_req {
|
|||
void (*handler)(struct zfcp_fsf_req *);
|
||||
};
|
||||
|
||||
/* driver data */
|
||||
struct zfcp_data {
|
||||
struct scsi_host_template scsi_host_template;
|
||||
struct scsi_transport_template *scsi_transport_template;
|
||||
struct kmem_cache *gpn_ft_cache;
|
||||
struct kmem_cache *qtcb_cache;
|
||||
struct kmem_cache *sr_buffer_cache;
|
||||
struct kmem_cache *gid_pn_cache;
|
||||
struct kmem_cache *adisc_cache;
|
||||
};
|
||||
|
||||
#endif /* ZFCP_DEF_H */
|
||||
|
|
|
@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
|
|||
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
||||
if (mempool_resize(act->adapter->pool.status_read_data,
|
||||
if (mempool_resize(act->adapter->pool.sr_data,
|
||||
act->adapter->stat_read_buf_num, GFP_KERNEL))
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
||||
|
@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
|||
if (result == ZFCP_ERP_SUCCEEDED) {
|
||||
register_service_level(&adapter->service_level);
|
||||
queue_work(adapter->work_queue, &adapter->scan_work);
|
||||
queue_work(adapter->work_queue, &adapter->ns_up_work);
|
||||
} else
|
||||
unregister_service_level(&adapter->service_level);
|
||||
|
||||
kref_put(&adapter->ref, zfcp_adapter_release);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
|
|||
extern void zfcp_erp_timeout_handler(unsigned long);
|
||||
|
||||
/* zfcp_fc.c */
|
||||
extern struct kmem_cache *zfcp_fc_req_cache;
|
||||
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
|
||||
enum fc_host_event_code event_code, u32);
|
||||
extern void zfcp_fc_post_event(struct work_struct *);
|
||||
|
@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
|
|||
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
|
||||
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
|
||||
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
|
||||
extern void zfcp_fc_sym_name_update(struct work_struct *);
|
||||
|
||||
/* zfcp_fsf.c */
|
||||
extern struct kmem_cache *zfcp_fsf_qtcb_cache;
|
||||
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
|
||||
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
|
||||
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
|
||||
|
@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
|
|||
struct qdio_buffer *);
|
||||
|
||||
/* zfcp_scsi.c */
|
||||
extern struct zfcp_data zfcp_data;
|
||||
extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
|
||||
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
|
||||
extern struct scsi_transport_template *zfcp_scsi_transport_template;
|
||||
extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
|
||||
extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
|
||||
extern struct fc_function_template zfcp_transport_functions;
|
||||
extern void zfcp_scsi_rport_work(struct work_struct *);
|
||||
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
|
||||
|
|
|
@ -11,11 +11,14 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <scsi/fc/fc_els.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include "zfcp_ext.h"
|
||||
#include "zfcp_fc.h"
|
||||
|
||||
struct kmem_cache *zfcp_fc_req_cache;
|
||||
|
||||
static u32 zfcp_fc_rscn_range_mask[] = {
|
||||
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
|
||||
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
|
||||
|
@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
|
|||
zfcp_fc_incoming_rscn(fsf_req);
|
||||
}
|
||||
|
||||
static void zfcp_fc_ns_gid_pn_eval(void *data)
|
||||
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
|
||||
{
|
||||
struct zfcp_fc_gid_pn *gid_pn = data;
|
||||
struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
|
||||
struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
|
||||
struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
|
||||
struct zfcp_port *port = gid_pn->port;
|
||||
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
|
||||
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
|
||||
|
||||
if (ct->status)
|
||||
if (ct_els->status)
|
||||
return;
|
||||
if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
|
||||
if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
|
||||
return;
|
||||
|
||||
/* paranoia */
|
||||
if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
|
||||
return;
|
||||
/* looks like a valid d_id */
|
||||
port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
|
||||
ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
|
||||
}
|
||||
|
||||
static void zfcp_fc_complete(void *data)
|
||||
|
@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
|
|||
complete(data);
|
||||
}
|
||||
|
||||
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
|
||||
{
|
||||
ct_hdr->ct_rev = FC_CT_REV;
|
||||
ct_hdr->ct_fs_type = FC_FST_DIR;
|
||||
ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
|
||||
ct_hdr->ct_cmd = cmd;
|
||||
ct_hdr->ct_mr_size = mr_size / 4;
|
||||
}
|
||||
|
||||
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
|
||||
struct zfcp_fc_gid_pn *gid_pn)
|
||||
struct zfcp_fc_req *fc_req)
|
||||
{
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
|
||||
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
|
||||
int ret;
|
||||
|
||||
/* setup parameters for send generic command */
|
||||
gid_pn->port = port;
|
||||
gid_pn->ct.handler = zfcp_fc_complete;
|
||||
gid_pn->ct.handler_data = &completion;
|
||||
gid_pn->ct.req = &gid_pn->sg_req;
|
||||
gid_pn->ct.resp = &gid_pn->sg_resp;
|
||||
sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
|
||||
sizeof(struct zfcp_fc_gid_pn_req));
|
||||
sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
|
||||
sizeof(struct zfcp_fc_gid_pn_resp));
|
||||
fc_req->ct_els.port = port;
|
||||
fc_req->ct_els.handler = zfcp_fc_complete;
|
||||
fc_req->ct_els.handler_data = &completion;
|
||||
fc_req->ct_els.req = &fc_req->sg_req;
|
||||
fc_req->ct_els.resp = &fc_req->sg_rsp;
|
||||
sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
|
||||
sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
|
||||
|
||||
/* setup nameserver request */
|
||||
gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
|
||||
gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
|
||||
gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
|
||||
gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
|
||||
gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
|
||||
gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
|
||||
gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
|
||||
zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
|
||||
FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
|
||||
gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
|
||||
|
||||
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
|
||||
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
|
||||
adapter->pool.gid_pn_req,
|
||||
ZFCP_FC_CTELS_TMO);
|
||||
if (!ret) {
|
||||
wait_for_completion(&completion);
|
||||
zfcp_fc_ns_gid_pn_eval(gid_pn);
|
||||
zfcp_fc_ns_gid_pn_eval(fc_req);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
|
||||
* zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
|
||||
* @port: port where GID_PN request is needed
|
||||
* return: -ENOMEM on error, 0 otherwise
|
||||
*/
|
||||
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
|
||||
{
|
||||
int ret;
|
||||
struct zfcp_fc_gid_pn *gid_pn;
|
||||
struct zfcp_fc_req *fc_req;
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
|
||||
gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
|
||||
if (!gid_pn)
|
||||
fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
|
||||
if (!fc_req)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(gid_pn, 0, sizeof(*gid_pn));
|
||||
memset(fc_req, 0, sizeof(*fc_req));
|
||||
|
||||
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
|
||||
ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
|
||||
|
||||
zfcp_fc_wka_port_put(&adapter->gs->ds);
|
||||
out:
|
||||
mempool_free(gid_pn, adapter->pool.gid_pn);
|
||||
mempool_free(fc_req, adapter->pool.gid_pn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
|
|||
|
||||
static void zfcp_fc_adisc_handler(void *data)
|
||||
{
|
||||
struct zfcp_fc_els_adisc *adisc = data;
|
||||
struct zfcp_port *port = adisc->els.port;
|
||||
struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
|
||||
struct zfcp_fc_req *fc_req = data;
|
||||
struct zfcp_port *port = fc_req->ct_els.port;
|
||||
struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
|
||||
|
||||
if (adisc->els.status) {
|
||||
if (fc_req->ct_els.status) {
|
||||
/* request rejected or timed out */
|
||||
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
|
||||
"fcadh_1");
|
||||
|
@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
|
|||
out:
|
||||
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
|
||||
put_device(&port->dev);
|
||||
kmem_cache_free(zfcp_data.adisc_cache, adisc);
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
}
|
||||
|
||||
static int zfcp_fc_adisc(struct zfcp_port *port)
|
||||
{
|
||||
struct zfcp_fc_els_adisc *adisc;
|
||||
struct zfcp_fc_req *fc_req;
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
struct Scsi_Host *shost = adapter->scsi_host;
|
||||
int ret;
|
||||
|
||||
adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
|
||||
if (!adisc)
|
||||
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
|
||||
if (!fc_req)
|
||||
return -ENOMEM;
|
||||
|
||||
adisc->els.port = port;
|
||||
adisc->els.req = &adisc->req;
|
||||
adisc->els.resp = &adisc->resp;
|
||||
sg_init_one(adisc->els.req, &adisc->adisc_req,
|
||||
fc_req->ct_els.port = port;
|
||||
fc_req->ct_els.req = &fc_req->sg_req;
|
||||
fc_req->ct_els.resp = &fc_req->sg_rsp;
|
||||
sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
|
||||
sizeof(struct fc_els_adisc));
|
||||
sg_init_one(adisc->els.resp, &adisc->adisc_resp,
|
||||
sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
|
||||
sizeof(struct fc_els_adisc));
|
||||
|
||||
adisc->els.handler = zfcp_fc_adisc_handler;
|
||||
adisc->els.handler_data = adisc;
|
||||
fc_req->ct_els.handler = zfcp_fc_adisc_handler;
|
||||
fc_req->ct_els.handler_data = fc_req;
|
||||
|
||||
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
|
||||
without FC-AL-2 capability, so we don't set it */
|
||||
adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
|
||||
adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
|
||||
adisc->adisc_req.adisc_cmd = ELS_ADISC;
|
||||
hton24(adisc->adisc_req.adisc_port_id,
|
||||
fc_host_port_id(adapter->scsi_host));
|
||||
fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
|
||||
fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
|
||||
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
|
||||
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
|
||||
|
||||
ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
|
||||
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
|
||||
ZFCP_FC_CTELS_TMO);
|
||||
if (ret)
|
||||
kmem_cache_free(zfcp_data.adisc_cache, adisc);
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
|
|||
put_device(&port->dev);
|
||||
}
|
||||
|
||||
static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
|
||||
static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
|
||||
{
|
||||
struct scatterlist *sg = &gpn_ft->sg_req;
|
||||
struct zfcp_fc_req *fc_req;
|
||||
|
||||
kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
|
||||
zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
|
||||
|
||||
kfree(gpn_ft);
|
||||
}
|
||||
|
||||
static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
|
||||
{
|
||||
struct zfcp_fc_gpn_ft *gpn_ft;
|
||||
struct zfcp_fc_gpn_ft_req *req;
|
||||
|
||||
gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
|
||||
if (!gpn_ft)
|
||||
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
|
||||
if (!fc_req)
|
||||
return NULL;
|
||||
|
||||
req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
|
||||
if (!req) {
|
||||
kfree(gpn_ft);
|
||||
gpn_ft = NULL;
|
||||
goto out;
|
||||
if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
return NULL;
|
||||
}
|
||||
sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
|
||||
|
||||
if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
|
||||
zfcp_free_sg_env(gpn_ft, buf_num);
|
||||
gpn_ft = NULL;
|
||||
}
|
||||
out:
|
||||
return gpn_ft;
|
||||
sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
|
||||
sizeof(struct zfcp_fc_gpn_ft_req));
|
||||
|
||||
return fc_req;
|
||||
}
|
||||
|
||||
|
||||
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
|
||||
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
|
||||
struct zfcp_adapter *adapter, int max_bytes)
|
||||
{
|
||||
struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
|
||||
struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
|
||||
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
|
||||
struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
int ret;
|
||||
|
||||
/* prepare CT IU for GPN_FT */
|
||||
req->ct_hdr.ct_rev = FC_CT_REV;
|
||||
req->ct_hdr.ct_fs_type = FC_FST_DIR;
|
||||
req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
|
||||
req->ct_hdr.ct_options = 0;
|
||||
req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
|
||||
req->ct_hdr.ct_mr_size = max_bytes / 4;
|
||||
req->gpn_ft.fn_domain_id_scope = 0;
|
||||
req->gpn_ft.fn_area_id_scope = 0;
|
||||
zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
|
||||
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
|
||||
|
||||
/* prepare zfcp_send_ct */
|
||||
ct->handler = zfcp_fc_complete;
|
||||
ct->handler_data = &completion;
|
||||
ct->req = &gpn_ft->sg_req;
|
||||
ct->resp = gpn_ft->sg_resp;
|
||||
ct_els->handler = zfcp_fc_complete;
|
||||
ct_els->handler_data = &completion;
|
||||
ct_els->req = &fc_req->sg_req;
|
||||
ct_els->resp = &fc_req->sg_rsp;
|
||||
|
||||
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
|
||||
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
|
||||
ZFCP_FC_CTELS_TMO);
|
||||
if (!ret)
|
||||
wait_for_completion(&completion);
|
||||
|
@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
|
|||
list_move_tail(&port->list, lh);
|
||||
}
|
||||
|
||||
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
|
||||
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
|
||||
struct zfcp_adapter *adapter, int max_entries)
|
||||
{
|
||||
struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
|
||||
struct scatterlist *sg = gpn_ft->sg_resp;
|
||||
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
|
||||
struct scatterlist *sg = &fc_req->sg_rsp;
|
||||
struct fc_ct_hdr *hdr = sg_virt(sg);
|
||||
struct fc_gpn_ft_resp *acc = sg_virt(sg);
|
||||
struct zfcp_port *port, *tmp;
|
||||
|
@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
|
|||
u32 d_id;
|
||||
int ret = 0, x, last = 0;
|
||||
|
||||
if (ct->status)
|
||||
if (ct_els->status)
|
||||
return -EIO;
|
||||
|
||||
if (hdr->ct_cmd != FC_FS_ACC) {
|
||||
|
@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
|
|||
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
|
||||
scan_work);
|
||||
int ret, i;
|
||||
struct zfcp_fc_gpn_ft *gpn_ft;
|
||||
struct zfcp_fc_req *fc_req;
|
||||
int chain, max_entries, buf_num, max_bytes;
|
||||
|
||||
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
|
||||
|
@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
|
|||
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
|
||||
return;
|
||||
|
||||
gpn_ft = zfcp_alloc_sg_env(buf_num);
|
||||
if (!gpn_ft)
|
||||
fc_req = zfcp_alloc_sg_env(buf_num);
|
||||
if (!fc_req)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
|
||||
ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
|
||||
if (!ret) {
|
||||
ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
|
||||
ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
|
||||
if (ret == -EAGAIN)
|
||||
ssleep(1);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
zfcp_free_sg_env(gpn_ft, buf_num);
|
||||
zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
out:
|
||||
zfcp_fc_wka_port_put(&adapter->gs->ds);
|
||||
}
|
||||
|
||||
static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
|
||||
struct zfcp_fc_req *fc_req)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
char devno[] = "DEVNO:";
|
||||
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
|
||||
struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
|
||||
struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
|
||||
int ret;
|
||||
|
||||
zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
|
||||
FC_SYMBOLIC_NAME_SIZE);
|
||||
hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
|
||||
|
||||
sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
|
||||
sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
|
||||
|
||||
ct_els->handler = zfcp_fc_complete;
|
||||
ct_els->handler_data = &completion;
|
||||
ct_els->req = &fc_req->sg_req;
|
||||
ct_els->resp = &fc_req->sg_rsp;
|
||||
|
||||
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
|
||||
ZFCP_FC_CTELS_TMO);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wait_for_completion(&completion);
|
||||
if (ct_els->status)
|
||||
return ct_els->status;
|
||||
|
||||
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
|
||||
!(strstr(gspn_rsp->gspn.fp_name, devno)))
|
||||
snprintf(fc_host_symbolic_name(adapter->scsi_host),
|
||||
FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
|
||||
gspn_rsp->gspn.fp_name, devno,
|
||||
dev_name(&adapter->ccw_device->dev),
|
||||
init_utsname()->nodename);
|
||||
else
|
||||
strlcpy(fc_host_symbolic_name(adapter->scsi_host),
|
||||
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
|
||||
struct zfcp_fc_req *fc_req)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
struct Scsi_Host *shost = adapter->scsi_host;
|
||||
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
|
||||
struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
|
||||
struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
|
||||
int ret, len;
|
||||
|
||||
zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
|
||||
FC_SYMBOLIC_NAME_SIZE);
|
||||
hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
|
||||
len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
|
||||
FC_SYMBOLIC_NAME_SIZE);
|
||||
rspn_req->rspn.fr_name_len = len;
|
||||
|
||||
sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
|
||||
sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
|
||||
|
||||
ct_els->handler = zfcp_fc_complete;
|
||||
ct_els->handler_data = &completion;
|
||||
ct_els->req = &fc_req->sg_req;
|
||||
ct_els->resp = &fc_req->sg_rsp;
|
||||
|
||||
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
|
||||
ZFCP_FC_CTELS_TMO);
|
||||
if (!ret)
|
||||
wait_for_completion(&completion);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
|
||||
* @work: ns_up_work of the adapter where to update the symbolic port name
|
||||
*
|
||||
* Retrieve the current symbolic port name that may have been set by
|
||||
* the hardware using the GSPN request and update the fc_host
|
||||
* symbolic_name sysfs attribute. When running in NPIV mode (and hence
|
||||
* the port name is unique for this system), update the symbolic port
|
||||
* name to add Linux specific information and update the FC nameserver
|
||||
* using the RSPN request.
|
||||
*/
|
||||
void zfcp_fc_sym_name_update(struct work_struct *work)
|
||||
{
|
||||
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
|
||||
ns_up_work);
|
||||
int ret;
|
||||
struct zfcp_fc_req *fc_req;
|
||||
|
||||
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
|
||||
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
|
||||
return;
|
||||
|
||||
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
|
||||
if (!fc_req)
|
||||
return;
|
||||
|
||||
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = zfcp_fc_gspn(adapter, fc_req);
|
||||
if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
|
||||
goto out_ds_put;
|
||||
|
||||
memset(fc_req, 0, sizeof(*fc_req));
|
||||
zfcp_fc_rspn(adapter, fc_req);
|
||||
|
||||
out_ds_put:
|
||||
zfcp_fc_wka_port_put(&adapter->gs->ds);
|
||||
out_free:
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
}
|
||||
|
||||
static void zfcp_fc_ct_els_job_handler(void *data)
|
||||
{
|
||||
struct fc_bsg_job *job = data;
|
||||
|
|
|
@ -64,32 +64,15 @@ struct zfcp_fc_gid_pn_req {
|
|||
} __packed;
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
|
||||
* struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
|
||||
* @ct_hdr: FC GS common transport header
|
||||
* @gid_pn: GID_PN response
|
||||
*/
|
||||
struct zfcp_fc_gid_pn_resp {
|
||||
struct zfcp_fc_gid_pn_rsp {
|
||||
struct fc_ct_hdr ct_hdr;
|
||||
struct fc_gid_pn_resp gid_pn;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
|
||||
* @ct: data passed to zfcp_fsf for issuing fsf request
|
||||
* @sg_req: scatterlist entry for request data
|
||||
* @sg_resp: scatterlist entry for response data
|
||||
* @gid_pn_req: GID_PN request data
|
||||
* @gid_pn_resp: GID_PN response data
|
||||
*/
|
||||
struct zfcp_fc_gid_pn {
|
||||
struct zfcp_fsf_ct_els ct;
|
||||
struct scatterlist sg_req;
|
||||
struct scatterlist sg_resp;
|
||||
struct zfcp_fc_gid_pn_req gid_pn_req;
|
||||
struct zfcp_fc_gid_pn_resp gid_pn_resp;
|
||||
struct zfcp_port *port;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
|
||||
* @ct_hdr: FC GS common transport header
|
||||
|
@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
|
|||
} __packed;
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
|
||||
* struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
|
||||
* @ct_hdr: FC GS common transport header
|
||||
* @gpn_ft: Array of gpn_ft response data to fill one memory page
|
||||
* @gspn: GSPN_ID request
|
||||
*/
|
||||
struct zfcp_fc_gpn_ft_resp {
|
||||
struct zfcp_fc_gspn_req {
|
||||
struct fc_ct_hdr ct_hdr;
|
||||
struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
|
||||
struct fc_gid_pn_resp gspn;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
|
||||
* @ct: data passed to zfcp_fsf for issuing fsf request
|
||||
* @sg_req: scatter list entry for gpn_ft request
|
||||
* @sg_resp: scatter list entries for gpn_ft responses (per memory page)
|
||||
* struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
|
||||
* @ct_hdr: FC GS common transport header
|
||||
* @gspn: GSPN_ID response
|
||||
* @name: The name string of the GSPN_ID response
|
||||
*/
|
||||
struct zfcp_fc_gpn_ft {
|
||||
struct zfcp_fsf_ct_els ct;
|
||||
struct scatterlist sg_req;
|
||||
struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
|
||||
};
|
||||
struct zfcp_fc_gspn_rsp {
|
||||
struct fc_ct_hdr ct_hdr;
|
||||
struct fc_gspn_resp gspn;
|
||||
char name[FC_SYMBOLIC_NAME_SIZE];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
|
||||
* @els: data required for issuing els fsf command
|
||||
* @req: scatterlist entry for ELS ADISC request
|
||||
* @resp: scatterlist entry for ELS ADISC response
|
||||
* @adisc_req: ELS ADISC request data
|
||||
* @adisc_resp: ELS ADISC response data
|
||||
* struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
|
||||
* @ct_hdr: FC GS common transport header
|
||||
* @rspn: RSPN_ID request
|
||||
* @name: The name string of the RSPN_ID request
|
||||
*/
|
||||
struct zfcp_fc_els_adisc {
|
||||
struct zfcp_fsf_ct_els els;
|
||||
struct scatterlist req;
|
||||
struct scatterlist resp;
|
||||
struct fc_els_adisc adisc_req;
|
||||
struct fc_els_adisc adisc_resp;
|
||||
struct zfcp_fc_rspn_req {
|
||||
struct fc_ct_hdr ct_hdr;
|
||||
struct fc_ns_rspn rspn;
|
||||
char name[FC_SYMBOLIC_NAME_SIZE];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
|
||||
* @ct_els: data required for issuing fsf command
|
||||
* @sg_req: scatterlist entry for request data
|
||||
* @sg_rsp: scatterlist entry for response data
|
||||
* @u: request specific data
|
||||
*/
|
||||
struct zfcp_fc_req {
|
||||
struct zfcp_fsf_ct_els ct_els;
|
||||
struct scatterlist sg_req;
|
||||
struct scatterlist sg_rsp;
|
||||
union {
|
||||
struct {
|
||||
struct fc_els_adisc req;
|
||||
struct fc_els_adisc rsp;
|
||||
} adisc;
|
||||
struct {
|
||||
struct zfcp_fc_gid_pn_req req;
|
||||
struct zfcp_fc_gid_pn_rsp rsp;
|
||||
} gid_pn;
|
||||
struct {
|
||||
struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
|
||||
struct zfcp_fc_gpn_ft_req req;
|
||||
} gpn_ft;
|
||||
struct {
|
||||
struct zfcp_fc_gspn_req req;
|
||||
struct zfcp_fc_gspn_rsp rsp;
|
||||
} gspn;
|
||||
struct {
|
||||
struct zfcp_fc_rspn_req req;
|
||||
struct fc_ct_hdr rsp;
|
||||
} rspn;
|
||||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
|
|||
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
|
||||
* @fcp: fcp_cmnd to setup
|
||||
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
|
||||
* @tm: task management flags to setup task management command
|
||||
*/
|
||||
static inline
|
||||
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
|
||||
void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
|
||||
u8 tm_flags)
|
||||
{
|
||||
char tag[2];
|
||||
|
||||
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
|
||||
|
||||
if (unlikely(tm_flags)) {
|
||||
fcp->fc_tm_flags = tm_flags;
|
||||
return;
|
||||
}
|
||||
|
||||
if (scsi_populate_tag_msg(scsi, tag)) {
|
||||
switch (tag[0]) {
|
||||
case MSG_ORDERED_TAG:
|
||||
|
@ -225,19 +246,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
|
|||
fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_fcp_tm - setup FCP command as task management command
|
||||
* @fcp: fcp_cmnd to setup
|
||||
* @dev: scsi_device where to send the task management command
|
||||
* @tm: task management flags to setup tm command
|
||||
*/
|
||||
static inline
|
||||
void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
|
||||
{
|
||||
int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
|
||||
fcp->fc_tm_flags |= tm_flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
|
||||
* @fcp_rsp: FCP RSP IU to evaluate
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include "zfcp_qdio.h"
|
||||
#include "zfcp_reqlist.h"
|
||||
|
||||
struct kmem_cache *zfcp_fsf_qtcb_cache;
|
||||
|
||||
static void zfcp_fsf_request_timeout_handler(unsigned long data)
|
||||
{
|
||||
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
|
||||
|
@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
|
|||
}
|
||||
|
||||
if (likely(req->qtcb))
|
||||
kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
|
||||
kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
|
@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
|
||||
zfcp_dbf_hba_fsf_uss("fssrh_1", req);
|
||||
mempool_free(sr_buf, adapter->pool.status_read_data);
|
||||
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
|
||||
zfcp_fsf_req_free(req);
|
||||
return;
|
||||
}
|
||||
|
@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
|
|||
break;
|
||||
}
|
||||
|
||||
mempool_free(sr_buf, adapter->pool.status_read_data);
|
||||
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
|
||||
zfcp_fsf_req_free(req);
|
||||
|
||||
atomic_inc(&adapter->stat_miss);
|
||||
|
@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
|
|||
if (likely(pool))
|
||||
qtcb = mempool_alloc(pool, GFP_ATOMIC);
|
||||
else
|
||||
qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
|
||||
qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!qtcb))
|
||||
return NULL;
|
||||
|
@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
|
|||
struct zfcp_adapter *adapter = qdio->adapter;
|
||||
struct zfcp_fsf_req *req;
|
||||
struct fsf_status_read_buffer *sr_buf;
|
||||
struct page *page;
|
||||
int retval = -EIO;
|
||||
|
||||
spin_lock_irq(&qdio->req_q_lock);
|
||||
|
@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
|
|||
goto out;
|
||||
}
|
||||
|
||||
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
|
||||
if (!sr_buf) {
|
||||
page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
|
||||
if (!page) {
|
||||
retval = -ENOMEM;
|
||||
goto failed_buf;
|
||||
}
|
||||
sr_buf = page_address(page);
|
||||
memset(sr_buf, 0, sizeof(*sr_buf));
|
||||
req->data = sr_buf;
|
||||
|
||||
|
@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
|
|||
|
||||
failed_req_send:
|
||||
req->data = NULL;
|
||||
mempool_free(sr_buf, adapter->pool.status_read_data);
|
||||
mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
|
||||
failed_buf:
|
||||
zfcp_dbf_hba_fsf_uss("fssr__1", req);
|
||||
zfcp_fsf_req_free(req);
|
||||
|
@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
|
|||
SBAL_FLAGS0_TYPE_READ,
|
||||
qdio->adapter->pool.erp_req);
|
||||
|
||||
if (unlikely(IS_ERR(req))) {
|
||||
if (IS_ERR(req)) {
|
||||
retval = PTR_ERR(req);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
|
|||
SBAL_FLAGS0_TYPE_READ,
|
||||
qdio->adapter->pool.erp_req);
|
||||
|
||||
if (unlikely(IS_ERR(req))) {
|
||||
if (IS_ERR(req)) {
|
||||
retval = PTR_ERR(req);
|
||||
goto out;
|
||||
}
|
||||
|
@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
|
|||
zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
|
||||
|
||||
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
|
||||
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
|
||||
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
|
||||
|
||||
if (scsi_prot_sg_count(scsi_cmnd)) {
|
||||
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
|
||||
|
@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
|
|||
goto out;
|
||||
}
|
||||
|
||||
req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
|
||||
req->data = scmnd;
|
||||
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
|
||||
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
|
||||
|
@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
|
|||
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
|
||||
|
||||
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
|
||||
zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
|
||||
zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
|
||||
|
||||
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
|
||||
if (!zfcp_fsf_req_send(req))
|
||||
|
|
|
@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
|
|||
return SUCCESS;
|
||||
}
|
||||
|
||||
int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
|
||||
struct scsi_transport_template *zfcp_scsi_transport_template;
|
||||
|
||||
static struct scsi_host_template zfcp_scsi_host_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "zfcp",
|
||||
.queuecommand = zfcp_scsi_queuecommand,
|
||||
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
|
||||
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
|
||||
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
|
||||
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
|
||||
.slave_alloc = zfcp_scsi_slave_alloc,
|
||||
.slave_configure = zfcp_scsi_slave_configure,
|
||||
.slave_destroy = zfcp_scsi_slave_destroy,
|
||||
.change_queue_depth = zfcp_scsi_change_queue_depth,
|
||||
.proc_name = "zfcp",
|
||||
.can_queue = 4096,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
|
||||
.max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
|
||||
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = 1,
|
||||
.shost_attrs = zfcp_sysfs_shost_attrs,
|
||||
.sdev_attrs = zfcp_sysfs_sdev_attrs,
|
||||
};
|
||||
|
||||
/**
|
||||
* zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
|
||||
* @adapter: The zfcp adapter to register with the SCSI midlayer
|
||||
*/
|
||||
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
|
||||
{
|
||||
struct ccw_dev_id dev_id;
|
||||
|
||||
|
@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
|
|||
|
||||
ccw_device_get_id(adapter->ccw_device, &dev_id);
|
||||
/* register adapter as SCSI host with mid layer of SCSI stack */
|
||||
adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
|
||||
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
|
||||
sizeof (struct zfcp_adapter *));
|
||||
if (!adapter->scsi_host) {
|
||||
dev_err(&adapter->ccw_device->dev,
|
||||
|
@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
|
|||
adapter->scsi_host->max_channel = 0;
|
||||
adapter->scsi_host->unique_id = dev_id.devno;
|
||||
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
|
||||
adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
|
||||
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
|
||||
|
||||
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
|
||||
|
||||
|
@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
|
||||
/**
|
||||
* zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
|
||||
* @adapter: The zfcp adapter to unregister.
|
||||
*/
|
||||
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
struct zfcp_port *port;
|
||||
|
@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
|
|||
scsi_remove_host(shost);
|
||||
scsi_host_put(shost);
|
||||
adapter->scsi_host = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static struct fc_host_statistics*
|
||||
|
@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
|
|||
/* no functions registered for following dynamic attributes but
|
||||
directly set by LLDD */
|
||||
.show_host_port_type = 1,
|
||||
.show_host_symbolic_name = 1,
|
||||
.show_host_speed = 1,
|
||||
.show_host_port_id = 1,
|
||||
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
|
||||
};
|
||||
|
||||
struct zfcp_data zfcp_data = {
|
||||
.scsi_host_template = {
|
||||
.name = "zfcp",
|
||||
.module = THIS_MODULE,
|
||||
.proc_name = "zfcp",
|
||||
.change_queue_depth = zfcp_scsi_change_queue_depth,
|
||||
.slave_alloc = zfcp_scsi_slave_alloc,
|
||||
.slave_configure = zfcp_scsi_slave_configure,
|
||||
.slave_destroy = zfcp_scsi_slave_destroy,
|
||||
.queuecommand = zfcp_scsi_queuecommand,
|
||||
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
|
||||
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
|
||||
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
|
||||
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
|
||||
.can_queue = 4096,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = 1,
|
||||
.sdev_attrs = zfcp_sysfs_sdev_attrs,
|
||||
.max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
|
||||
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
|
||||
.shost_attrs = zfcp_sysfs_shost_attrs,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS
|
|||
|
||||
source "drivers/scsi/cxgbi/Kconfig"
|
||||
source "drivers/scsi/bnx2i/Kconfig"
|
||||
source "drivers/scsi/bnx2fc/Kconfig"
|
||||
source "drivers/scsi/be2iscsi/Kconfig"
|
||||
|
||||
config SGIWD93_SCSI
|
||||
|
|
|
@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/
|
|||
obj-$(CONFIG_LIBFCOE) += fcoe/
|
||||
obj-$(CONFIG_FCOE) += fcoe/
|
||||
obj-$(CONFIG_FCOE_FNIC) += fnic/
|
||||
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
|
||||
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
|
||||
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
|
||||
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
|
||||
|
|
|
@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
|
|||
{
|
||||
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
|
||||
|
||||
cancel_delayed_work(&hostdata->coroutine);
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&hostdata->coroutine);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
|
|||
int poll_count = 0;
|
||||
arcmsr_free_sysfs_attr(acb);
|
||||
scsi_remove_host(host);
|
||||
flush_scheduled_work();
|
||||
flush_work_sync(&acb->arcmsr_do_message_isr_bh);
|
||||
del_timer_sync(&acb->eternal_timer);
|
||||
arcmsr_disable_outbound_ints(acb);
|
||||
arcmsr_stop_adapter_bgrb(acb);
|
||||
|
@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
|
|||
(struct AdapterControlBlock *)host->hostdata;
|
||||
del_timer_sync(&acb->eternal_timer);
|
||||
arcmsr_disable_outbound_ints(acb);
|
||||
flush_scheduled_work();
|
||||
flush_work_sync(&acb->arcmsr_do_message_isr_bh);
|
||||
arcmsr_stop_adapter_bgrb(acb);
|
||||
arcmsr_flush_adapter_cache(acb);
|
||||
}
|
||||
|
|
|
@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
}
|
||||
|
||||
/**
|
||||
* beiscsi_conn_get_param - get the iscsi parameter
|
||||
* @cls_conn: pointer to iscsi cls conn
|
||||
* beiscsi_ep_get_param - get the iscsi parameter
|
||||
* @ep: pointer to iscsi ep
|
||||
* @param: parameter type identifier
|
||||
* @buf: buffer pointer
|
||||
*
|
||||
* returns iscsi parameter
|
||||
*/
|
||||
int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
||||
int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
|
||||
enum iscsi_param param, char *buf)
|
||||
{
|
||||
struct beiscsi_endpoint *beiscsi_ep;
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
|
||||
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
|
||||
int len = 0;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
|
||||
beiscsi_ep = beiscsi_conn->ep;
|
||||
if (!beiscsi_ep) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"In beiscsi_conn_get_param , no beiscsi_ep\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
|
@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
|||
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
|
||||
break;
|
||||
default:
|
||||
return iscsi_conn_get_param(cls_conn, param, buf);
|
||||
return -ENOSYS;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
|
|
@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
struct iscsi_cls_conn *cls_conn,
|
||||
uint64_t transport_fd, int is_leading);
|
||||
|
||||
int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
||||
enum iscsi_param param, char *buf);
|
||||
int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
|
||||
char *buf);
|
||||
|
||||
int beiscsi_get_host_param(struct Scsi_Host *shost,
|
||||
enum iscsi_host_param param, char *buf);
|
||||
|
|
|
@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
|
|||
.bind_conn = beiscsi_conn_bind,
|
||||
.destroy_conn = iscsi_conn_teardown,
|
||||
.set_param = beiscsi_set_param,
|
||||
.get_conn_param = beiscsi_conn_get_param,
|
||||
.get_conn_param = iscsi_conn_get_param,
|
||||
.get_session_param = iscsi_session_get_param,
|
||||
.get_host_param = beiscsi_get_host_param,
|
||||
.start_conn = beiscsi_conn_start,
|
||||
|
@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
|
|||
.alloc_pdu = beiscsi_alloc_pdu,
|
||||
.parse_pdu_itt = beiscsi_parse_pdu,
|
||||
.get_stats = beiscsi_conn_get_stats,
|
||||
.get_ep_param = beiscsi_ep_get_param,
|
||||
.ep_connect = beiscsi_ep_connect,
|
||||
.ep_poll = beiscsi_ep_poll,
|
||||
.ep_disconnect = beiscsi_ep_disconnect,
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,11 @@
|
|||
config SCSI_BNX2X_FCOE
|
||||
tristate "Broadcom NetXtreme II FCoE support"
|
||||
depends on PCI
|
||||
select NETDEVICES
|
||||
select NETDEV_1000
|
||||
select LIBFC
|
||||
select LIBFCOE
|
||||
select CNIC
|
||||
---help---
|
||||
This driver supports FCoE offload for the Broadcom NetXtreme II
|
||||
devices.
|
|
@ -0,0 +1,3 @@
|
|||
obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
|
||||
|
||||
bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
|
|
@ -0,0 +1,511 @@
|
|||
#ifndef _BNX2FC_H_
|
||||
#define _BNX2FC_H_
|
||||
/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/libfc.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
#include <scsi/fc_encode.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <scsi/fc/fc_fip.h>
|
||||
#include <scsi/fc/fc_fc2.h>
|
||||
#include <scsi/fc_frame.h>
|
||||
#include <scsi/fc/fc_fcoe.h>
|
||||
#include <scsi/fc/fc_fcp.h>
|
||||
|
||||
#include "57xx_hsi_bnx2fc.h"
|
||||
#include "bnx2fc_debug.h"
|
||||
#include "../../net/cnic_if.h"
|
||||
#include "bnx2fc_constants.h"
|
||||
|
||||
#define BNX2FC_NAME "bnx2fc"
|
||||
#define BNX2FC_VERSION "1.0.0"
|
||||
|
||||
#define PFX "bnx2fc: "
|
||||
|
||||
#define BNX2X_DOORBELL_PCI_BAR 2
|
||||
|
||||
#define BNX2FC_MAX_BD_LEN 0xffff
|
||||
#define BNX2FC_BD_SPLIT_SZ 0x8000
|
||||
#define BNX2FC_MAX_BDS_PER_CMD 256
|
||||
|
||||
#define BNX2FC_SQ_WQES_MAX 256
|
||||
|
||||
#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
|
||||
#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
|
||||
#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
|
||||
|
||||
#define BNX2FC_RQ_WQES_MAX 16
|
||||
#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
|
||||
|
||||
#define BNX2FC_NUM_MAX_SESS 128
|
||||
#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
|
||||
|
||||
#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
|
||||
#define BNX2FC_MIN_PAYLOAD 256
|
||||
#define BNX2FC_MAX_PAYLOAD 2048
|
||||
|
||||
#define BNX2FC_RQ_BUF_SZ 256
|
||||
#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
|
||||
|
||||
#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
|
||||
#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
|
||||
#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
|
||||
#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
|
||||
#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
|
||||
#define BNX2FC_5771X_DB_PAGE_SIZE 128
|
||||
|
||||
#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
|
||||
#define BNX2FC_TASK_SIZE 128
|
||||
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
|
||||
#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
|
||||
|
||||
#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
|
||||
#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
|
||||
|
||||
#define BNX2FC_MAX_SEQS 255
|
||||
|
||||
#define BNX2FC_READ (1 << 1)
|
||||
#define BNX2FC_WRITE (1 << 0)
|
||||
|
||||
#define BNX2FC_MIN_XID 0
|
||||
#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
|
||||
#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
|
||||
#define FCOE_MAX_XID \
|
||||
(BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
|
||||
#define BNX2FC_MAX_LUN 0xFFFF
|
||||
#define BNX2FC_MAX_FCP_TGT 256
|
||||
#define BNX2FC_MAX_CMD_LEN 16
|
||||
|
||||
#define BNX2FC_TM_TIMEOUT 60 /* secs */
|
||||
#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
|
||||
|
||||
#define BNX2FC_WAIT_CNT 120
|
||||
#define BNX2FC_FW_TIMEOUT (3 * HZ)
|
||||
|
||||
#define PORT_MAX 2
|
||||
|
||||
#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
|
||||
|
||||
/* FC FCP Status */
|
||||
#define FC_GOOD 0
|
||||
|
||||
#define BNX2FC_RNID_HBA 0x7
|
||||
|
||||
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
|
||||
extern struct fcoe_percpu_s bnx2fc_global;
|
||||
|
||||
extern struct workqueue_struct *bnx2fc_wq;
|
||||
|
||||
struct bnx2fc_percpu_s {
|
||||
struct task_struct *iothread;
|
||||
struct list_head work_list;
|
||||
spinlock_t fp_work_lock;
|
||||
};
|
||||
|
||||
|
||||
struct bnx2fc_hba {
|
||||
struct list_head link;
|
||||
struct cnic_dev *cnic;
|
||||
struct pci_dev *pcidev;
|
||||
struct net_device *netdev;
|
||||
struct net_device *phys_dev;
|
||||
unsigned long reg_with_cnic;
|
||||
#define BNX2FC_CNIC_REGISTERED 1
|
||||
struct packet_type fcoe_packet_type;
|
||||
struct packet_type fip_packet_type;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr;
|
||||
struct workqueue_struct *timer_work_queue;
|
||||
struct kref kref;
|
||||
spinlock_t hba_lock;
|
||||
struct mutex hba_mutex;
|
||||
unsigned long adapter_state;
|
||||
#define ADAPTER_STATE_UP 0
|
||||
#define ADAPTER_STATE_GOING_DOWN 1
|
||||
#define ADAPTER_STATE_LINK_DOWN 2
|
||||
#define ADAPTER_STATE_READY 3
|
||||
u32 flags;
|
||||
unsigned long init_done;
|
||||
#define BNX2FC_FW_INIT_DONE 0
|
||||
#define BNX2FC_CTLR_INIT_DONE 1
|
||||
#define BNX2FC_CREATE_DONE 2
|
||||
struct fcoe_ctlr ctlr;
|
||||
u8 vlan_enabled;
|
||||
int vlan_id;
|
||||
u32 next_conn_id;
|
||||
struct fcoe_task_ctx_entry **task_ctx;
|
||||
dma_addr_t *task_ctx_dma;
|
||||
struct regpair *task_ctx_bd_tbl;
|
||||
dma_addr_t task_ctx_bd_dma;
|
||||
|
||||
int hash_tbl_segment_count;
|
||||
void **hash_tbl_segments;
|
||||
void *hash_tbl_pbl;
|
||||
dma_addr_t hash_tbl_pbl_dma;
|
||||
struct fcoe_t2_hash_table_entry *t2_hash_tbl;
|
||||
dma_addr_t t2_hash_tbl_dma;
|
||||
char *t2_hash_tbl_ptr;
|
||||
dma_addr_t t2_hash_tbl_ptr_dma;
|
||||
|
||||
char *dummy_buffer;
|
||||
dma_addr_t dummy_buf_dma;
|
||||
|
||||
struct fcoe_statistics_params *stats_buffer;
|
||||
dma_addr_t stats_buf_dma;
|
||||
|
||||
/*
|
||||
* PCI related info.
|
||||
*/
|
||||
u16 pci_did;
|
||||
u16 pci_vid;
|
||||
u16 pci_sdid;
|
||||
u16 pci_svid;
|
||||
u16 pci_func;
|
||||
u16 pci_devno;
|
||||
|
||||
struct task_struct *l2_thread;
|
||||
|
||||
/* linkdown handling */
|
||||
wait_queue_head_t shutdown_wait;
|
||||
int wait_for_link_down;
|
||||
|
||||
/*destroy handling */
|
||||
struct timer_list destroy_timer;
|
||||
wait_queue_head_t destroy_wait;
|
||||
|
||||
/* Active list of offloaded sessions */
|
||||
struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
|
||||
int num_ofld_sess;
|
||||
|
||||
/* statistics */
|
||||
struct completion stat_req_done;
|
||||
};
|
||||
|
||||
#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
|
||||
|
||||
struct bnx2fc_cmd_mgr {
|
||||
struct bnx2fc_hba *hba;
|
||||
u16 next_idx;
|
||||
struct list_head *free_list;
|
||||
spinlock_t *free_list_lock;
|
||||
struct io_bdt **io_bdt_pool;
|
||||
struct bnx2fc_cmd **cmds;
|
||||
};
|
||||
|
||||
struct bnx2fc_rport {
|
||||
struct fcoe_port *port;
|
||||
struct fc_rport *rport;
|
||||
struct fc_rport_priv *rdata;
|
||||
void __iomem *ctx_base;
|
||||
#define DPM_TRIGER_TYPE 0x40
|
||||
u32 fcoe_conn_id;
|
||||
u32 context_id;
|
||||
u32 sid;
|
||||
|
||||
unsigned long flags;
|
||||
#define BNX2FC_FLAG_SESSION_READY 0x1
|
||||
#define BNX2FC_FLAG_OFFLOADED 0x2
|
||||
#define BNX2FC_FLAG_DISABLED 0x3
|
||||
#define BNX2FC_FLAG_DESTROYED 0x4
|
||||
#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
|
||||
#define BNX2FC_FLAG_DESTROY_CMPL 0x6
|
||||
#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
|
||||
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
|
||||
#define BNX2FC_FLAG_EXPL_LOGO 0x9
|
||||
|
||||
u32 max_sqes;
|
||||
u32 max_rqes;
|
||||
u32 max_cqes;
|
||||
|
||||
struct fcoe_sqe *sq;
|
||||
dma_addr_t sq_dma;
|
||||
u16 sq_prod_idx;
|
||||
u8 sq_curr_toggle_bit;
|
||||
u32 sq_mem_size;
|
||||
|
||||
struct fcoe_cqe *cq;
|
||||
dma_addr_t cq_dma;
|
||||
u32 cq_cons_idx;
|
||||
u8 cq_curr_toggle_bit;
|
||||
u32 cq_mem_size;
|
||||
|
||||
void *rq;
|
||||
dma_addr_t rq_dma;
|
||||
u32 rq_prod_idx;
|
||||
u32 rq_cons_idx;
|
||||
u32 rq_mem_size;
|
||||
|
||||
void *rq_pbl;
|
||||
dma_addr_t rq_pbl_dma;
|
||||
u32 rq_pbl_size;
|
||||
|
||||
struct fcoe_xfrqe *xferq;
|
||||
dma_addr_t xferq_dma;
|
||||
u32 xferq_mem_size;
|
||||
|
||||
struct fcoe_confqe *confq;
|
||||
dma_addr_t confq_dma;
|
||||
u32 confq_mem_size;
|
||||
|
||||
void *confq_pbl;
|
||||
dma_addr_t confq_pbl_dma;
|
||||
u32 confq_pbl_size;
|
||||
|
||||
struct fcoe_conn_db *conn_db;
|
||||
dma_addr_t conn_db_dma;
|
||||
u32 conn_db_mem_size;
|
||||
|
||||
struct fcoe_sqe *lcq;
|
||||
dma_addr_t lcq_dma;
|
||||
u32 lcq_mem_size;
|
||||
|
||||
void *ofld_req[4];
|
||||
dma_addr_t ofld_req_dma[4];
|
||||
void *enbl_req;
|
||||
dma_addr_t enbl_req_dma;
|
||||
|
||||
spinlock_t tgt_lock;
|
||||
spinlock_t cq_lock;
|
||||
atomic_t num_active_ios;
|
||||
u32 flush_in_prog;
|
||||
unsigned long work_time_slice;
|
||||
unsigned long timestamp;
|
||||
struct list_head free_task_list;
|
||||
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
|
||||
atomic_t pi;
|
||||
atomic_t ci;
|
||||
struct list_head active_cmd_queue;
|
||||
struct list_head els_queue;
|
||||
struct list_head io_retire_queue;
|
||||
struct list_head active_tm_queue;
|
||||
|
||||
struct timer_list ofld_timer;
|
||||
wait_queue_head_t ofld_wait;
|
||||
|
||||
struct timer_list upld_timer;
|
||||
wait_queue_head_t upld_wait;
|
||||
};
|
||||
|
||||
struct bnx2fc_mp_req {
|
||||
u8 tm_flags;
|
||||
|
||||
u32 req_len;
|
||||
void *req_buf;
|
||||
dma_addr_t req_buf_dma;
|
||||
struct fcoe_bd_ctx *mp_req_bd;
|
||||
dma_addr_t mp_req_bd_dma;
|
||||
struct fc_frame_header req_fc_hdr;
|
||||
|
||||
u32 resp_len;
|
||||
void *resp_buf;
|
||||
dma_addr_t resp_buf_dma;
|
||||
struct fcoe_bd_ctx *mp_resp_bd;
|
||||
dma_addr_t mp_resp_bd_dma;
|
||||
struct fc_frame_header resp_fc_hdr;
|
||||
};
|
||||
|
||||
struct bnx2fc_els_cb_arg {
|
||||
struct bnx2fc_cmd *aborted_io_req;
|
||||
struct bnx2fc_cmd *io_req;
|
||||
u16 l2_oxid;
|
||||
};
|
||||
|
||||
/* bnx2fc command structure */
|
||||
struct bnx2fc_cmd {
|
||||
struct list_head link;
|
||||
u8 on_active_queue;
|
||||
u8 on_tmf_queue;
|
||||
u8 cmd_type;
|
||||
#define BNX2FC_SCSI_CMD 1
|
||||
#define BNX2FC_TASK_MGMT_CMD 2
|
||||
#define BNX2FC_ABTS 3
|
||||
#define BNX2FC_ELS 4
|
||||
#define BNX2FC_CLEANUP 5
|
||||
u8 io_req_flags;
|
||||
struct kref refcount;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct scsi_cmnd *sc_cmd;
|
||||
struct bnx2fc_cmd_mgr *cmd_mgr;
|
||||
struct bnx2fc_mp_req mp_req;
|
||||
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
|
||||
struct bnx2fc_els_cb_arg *cb_arg;
|
||||
struct delayed_work timeout_work; /* timer for ULP timeouts */
|
||||
struct completion tm_done;
|
||||
int wait_for_comp;
|
||||
u16 xid;
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
struct io_bdt *bd_tbl;
|
||||
struct fcp_rsp *rsp;
|
||||
size_t data_xfer_len;
|
||||
unsigned long req_flags;
|
||||
#define BNX2FC_FLAG_ISSUE_RRQ 0x1
|
||||
#define BNX2FC_FLAG_ISSUE_ABTS 0x2
|
||||
#define BNX2FC_FLAG_ABTS_DONE 0x3
|
||||
#define BNX2FC_FLAG_TM_COMPL 0x4
|
||||
#define BNX2FC_FLAG_TM_TIMEOUT 0x5
|
||||
#define BNX2FC_FLAG_IO_CLEANUP 0x6
|
||||
#define BNX2FC_FLAG_RETIRE_OXID 0x7
|
||||
#define BNX2FC_FLAG_EH_ABORT 0x8
|
||||
#define BNX2FC_FLAG_IO_COMPL 0x9
|
||||
#define BNX2FC_FLAG_ELS_DONE 0xa
|
||||
#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
|
||||
u32 fcp_resid;
|
||||
u32 fcp_rsp_len;
|
||||
u32 fcp_sns_len;
|
||||
u8 cdb_status; /* SCSI IO status */
|
||||
u8 fcp_status; /* FCP IO status */
|
||||
u8 fcp_rsp_code;
|
||||
u8 scsi_comp_flags;
|
||||
};
|
||||
|
||||
struct io_bdt {
|
||||
struct bnx2fc_cmd *io_req;
|
||||
struct fcoe_bd_ctx *bd_tbl;
|
||||
dma_addr_t bd_tbl_dma;
|
||||
u16 bd_valid;
|
||||
};
|
||||
|
||||
struct bnx2fc_work {
|
||||
struct list_head list;
|
||||
struct bnx2fc_rport *tgt;
|
||||
u16 wqe;
|
||||
};
|
||||
struct bnx2fc_unsol_els {
|
||||
struct fc_lport *lport;
|
||||
struct fc_frame *fp;
|
||||
struct work_struct unsol_els_work;
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
|
||||
void bnx2fc_cmd_release(struct kref *ref);
|
||||
int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
|
||||
int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
|
||||
int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
|
||||
int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
|
||||
void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
|
||||
u32 num_cqe);
|
||||
int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
|
||||
void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
|
||||
int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
|
||||
void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
|
||||
struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
|
||||
u16 min_xid, u16 max_xid);
|
||||
void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
|
||||
void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
|
||||
char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
|
||||
void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
|
||||
int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
|
||||
int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
|
||||
int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
|
||||
int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
|
||||
int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
|
||||
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
|
||||
int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
|
||||
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
|
||||
unsigned int timer_msec);
|
||||
int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
|
||||
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u16 orig_xid);
|
||||
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task);
|
||||
void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task);
|
||||
void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
|
||||
void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
|
||||
int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
|
||||
int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
|
||||
int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
|
||||
void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
struct fc_rport_priv *rport,
|
||||
enum fc_rport_event event);
|
||||
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 num_rq);
|
||||
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 num_rq);
|
||||
void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 num_rq);
|
||||
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 num_rq);
|
||||
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
|
||||
struct fcoe_task_ctx_entry *task,
|
||||
u8 num_rq);
|
||||
void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
|
||||
struct fcp_cmnd *fcp_cmnd);
|
||||
|
||||
|
||||
|
||||
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
|
||||
struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
|
||||
struct fc_frame *fp, unsigned int op,
|
||||
void (*resp)(struct fc_seq *,
|
||||
struct fc_frame *,
|
||||
void *),
|
||||
void *arg, u32 timeout);
|
||||
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
|
||||
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
|
||||
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
|
||||
u32 port_id);
|
||||
void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
|
||||
unsigned char *buf,
|
||||
u32 frame_len, u16 l2_oxid);
|
||||
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,206 @@
|
|||
#ifndef __BNX2FC_CONSTANTS_H_
|
||||
#define __BNX2FC_CONSTANTS_H_
|
||||
|
||||
/**
|
||||
* This file defines HSI constants for the FCoE flows
|
||||
*/
|
||||
|
||||
/* KWQ/KCQ FCoE layer code */
|
||||
#define FCOE_KWQE_LAYER_CODE (7)
|
||||
|
||||
/* KWQ (kernel work queue) request op codes */
|
||||
#define FCOE_KWQE_OPCODE_INIT1 (0)
|
||||
#define FCOE_KWQE_OPCODE_INIT2 (1)
|
||||
#define FCOE_KWQE_OPCODE_INIT3 (2)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
|
||||
#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
|
||||
#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
|
||||
#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
|
||||
#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
|
||||
#define FCOE_KWQE_OPCODE_DESTROY (10)
|
||||
#define FCOE_KWQE_OPCODE_STAT (11)
|
||||
|
||||
/* KCQ (kernel completion queue) response op codes */
|
||||
#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
|
||||
#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
|
||||
#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
|
||||
#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
|
||||
#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
|
||||
#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
|
||||
#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
|
||||
#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
|
||||
#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
|
||||
|
||||
/* KCQ (kernel completion queue) completion status */
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
|
||||
#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
|
||||
|
||||
/* Unsolicited CQE type */
|
||||
#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
|
||||
#define FCOE_ERROR_DETECTION_CQE_TYPE 1
|
||||
#define FCOE_WARNING_DETECTION_CQE_TYPE 2
|
||||
|
||||
/* Task context constants */
|
||||
/* After driver has initialize the task in case timer services required */
|
||||
#define FCOE_TASK_TX_STATE_INIT 0
|
||||
/* In case timer services are required then shall be updated by Xstorm after
|
||||
* start processing the task. In case no timer facilities are required then the
|
||||
* driver would initialize the state to this value */
|
||||
#define FCOE_TASK_TX_STATE_NORMAL 1
|
||||
/* Task is under abort procedure. Updated in order to stop processing of
|
||||
* pending WQEs on this task */
|
||||
#define FCOE_TASK_TX_STATE_ABORT 2
|
||||
/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
|
||||
#define FCOE_TASK_TX_STATE_ERROR 3
|
||||
/* For REC_TOV timer expiration indication received from Xstorm */
|
||||
#define FCOE_TASK_TX_STATE_WARNING 4
|
||||
/* For completed unsolicited task */
|
||||
#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
|
||||
/* For exchange cleanup request task */
|
||||
#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
|
||||
/* For sequence cleanup request task */
|
||||
#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
|
||||
/* Mark task as aborted and indicate that ABTS was not transmitted */
|
||||
#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
|
||||
/* Mark task as aborted and indicate that ABTS was transmitted */
|
||||
#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
|
||||
/* For completion the ABTS task. */
|
||||
#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
|
||||
/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
|
||||
*/
|
||||
#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
|
||||
/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
|
||||
#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
|
||||
|
||||
#define FCOE_TASK_RX_STATE_NORMAL 0
|
||||
#define FCOE_TASK_RX_STATE_COMPLETED 1
|
||||
/* Obsolete: Intermediate completion (middle path with local completion) */
|
||||
#define FCOE_TASK_RX_STATE_INTER_COMP 2
|
||||
/* For REC_TOV timer expiration indication received from Xstorm */
|
||||
#define FCOE_TASK_RX_STATE_WARNING 3
|
||||
/* For E_D_T_TOV timer expiration in Ustorm */
|
||||
#define FCOE_TASK_RX_STATE_ERROR 4
|
||||
/* ABTS ACC arrived wait for local completion to finally complete the task. */
|
||||
#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
|
||||
/* local completion arrived wait for ABTS ACC to finally complete the task. */
|
||||
#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
|
||||
/* Special completion indication in case of task was aborted. */
|
||||
#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
|
||||
/* Special completion indication in case of task was cleaned. */
|
||||
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
|
||||
/* Special completion indication (in task requested the exchange cleanup) in
|
||||
* case cleaned task is in non-valid. */
|
||||
#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
|
||||
/* Special completion indication (in task requested the sequence cleanup) in
|
||||
* case cleaned task was already returned to normal. */
|
||||
#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
|
||||
/* Exchange cleanup arrived wait until xfer will be handled to finally
|
||||
* complete the task. */
|
||||
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
|
||||
/* Xfer handled, wait for exchange cleanup to finally complete the task. */
|
||||
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
|
||||
|
||||
#define FCOE_TASK_TYPE_WRITE 0
|
||||
#define FCOE_TASK_TYPE_READ 1
|
||||
#define FCOE_TASK_TYPE_MIDPATH 2
|
||||
#define FCOE_TASK_TYPE_UNSOLICITED 3
|
||||
#define FCOE_TASK_TYPE_ABTS 4
|
||||
#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
|
||||
#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
|
||||
|
||||
#define FCOE_TASK_DEV_TYPE_DISK 0
|
||||
#define FCOE_TASK_DEV_TYPE_TAPE 1
|
||||
|
||||
#define FCOE_TASK_CLASS_TYPE_3 0
|
||||
#define FCOE_TASK_CLASS_TYPE_2 1
|
||||
|
||||
/* Everest FCoE connection type */
|
||||
#define B577XX_FCOE_CONNECTION_TYPE 4
|
||||
|
||||
/* Error codes for Error Reporting in fast path flows */
|
||||
/* XFER error codes */
|
||||
#define FCOE_ERROR_CODE_XFER_OOO_RO 0
|
||||
#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
|
||||
#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
|
||||
#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
|
||||
#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
|
||||
#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
|
||||
#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
|
||||
#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
|
||||
#define FCOE_ERROR_CODE_XFER_FCTL 8
|
||||
|
||||
/* FCP RSP error codes */
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
|
||||
#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
|
||||
|
||||
/* FCP DATA error codes */
|
||||
#define FCOE_ERROR_CODE_DATA_OOO_RO 20
|
||||
#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
|
||||
#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
|
||||
#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
|
||||
#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
|
||||
#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
|
||||
#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
|
||||
#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
|
||||
#define FCOE_ERROR_CODE_DATA_FCTL 28
|
||||
|
||||
/* Middle path error codes */
|
||||
#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
|
||||
#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
|
||||
#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
|
||||
#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
|
||||
#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
|
||||
#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
|
||||
#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
|
||||
#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
|
||||
|
||||
/* ABTS error codes */
|
||||
#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
|
||||
#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
|
||||
#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
|
||||
#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
|
||||
#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
|
||||
|
||||
/* Common error codes */
|
||||
#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
|
||||
#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
|
||||
#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
|
||||
#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
|
||||
#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
|
||||
#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
|
||||
#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
|
||||
#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
|
||||
#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
|
||||
|
||||
/* Unsolicited Rx error codes */
|
||||
#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
|
||||
#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
|
||||
#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
|
||||
#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
|
||||
#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
|
||||
|
||||
#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
|
||||
#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
|
||||
#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
|
||||
|
||||
/* Timer error codes */
|
||||
#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
|
||||
#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
|
||||
|
||||
|
||||
#endif /* BNX2FC_CONSTANTS_H_ */
|
|
@ -0,0 +1,70 @@
|
|||
#ifndef __BNX2FC_DEBUG__
|
||||
#define __BNX2FC_DEBUG__
|
||||
|
||||
/* Log level bit mask */
|
||||
#define LOG_IO 0x01 /* scsi cmd error, cleanup */
|
||||
#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
|
||||
#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
|
||||
#define LOG_ELS 0x08 /* ELS logs */
|
||||
#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
|
||||
#define LOG_ALL 0xff /* LOG all messages */
|
||||
|
||||
extern unsigned int bnx2fc_debug_level;
|
||||
|
||||
#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
|
||||
do { \
|
||||
if (unlikely(bnx2fc_debug_level & LEVEL)) \
|
||||
do { \
|
||||
CMD; \
|
||||
} while (0); \
|
||||
} while (0)
|
||||
|
||||
#define BNX2FC_ELS_DBG(fmt, arg...) \
|
||||
BNX2FC_CHK_LOGGING(LOG_ELS, \
|
||||
printk(KERN_ALERT PFX fmt, ##arg))
|
||||
|
||||
#define BNX2FC_MISC_DBG(fmt, arg...) \
|
||||
BNX2FC_CHK_LOGGING(LOG_MISC, \
|
||||
printk(KERN_ALERT PFX fmt, ##arg))
|
||||
|
||||
#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
|
||||
do { \
|
||||
if (!io_req || !io_req->port || !io_req->port->lport || \
|
||||
!io_req->port->lport->host) \
|
||||
BNX2FC_CHK_LOGGING(LOG_IO, \
|
||||
printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_IO, \
|
||||
shost_printk(KERN_ALERT, \
|
||||
(io_req)->port->lport->host, \
|
||||
PFX "xid:0x%x " fmt, \
|
||||
(io_req)->xid, ##arg)); \
|
||||
} while (0)
|
||||
|
||||
#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
|
||||
do { \
|
||||
if (!tgt || !tgt->port || !tgt->port->lport || \
|
||||
!tgt->port->lport->host || !tgt->rport) \
|
||||
BNX2FC_CHK_LOGGING(LOG_TGT, \
|
||||
printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_TGT, \
|
||||
shost_printk(KERN_ALERT, \
|
||||
(tgt)->port->lport->host, \
|
||||
PFX "port:%x " fmt, \
|
||||
(tgt)->rport->port_id, ##arg)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
|
||||
do { \
|
||||
if (!lport || !lport->host) \
|
||||
BNX2FC_CHK_LOGGING(LOG_HBA, \
|
||||
printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_HBA, \
|
||||
shost_printk(KERN_ALERT, lport->host, \
|
||||
PFX fmt, ##arg)); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
|
@ -0,0 +1,515 @@
|
|||
/*
|
||||
* bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
|
||||
* This file contains helper routines that handle ELS requests
|
||||
* and responses.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
|
||||
*/
|
||||
|
||||
#include "bnx2fc.h"
|
||||
|
||||
static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
|
||||
void *arg);
|
||||
static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
|
||||
void *arg);
|
||||
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
void *data, u32 data_len,
|
||||
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
|
||||
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
|
||||
|
||||
static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
|
||||
{
|
||||
struct bnx2fc_cmd *orig_io_req;
|
||||
struct bnx2fc_cmd *rrq_req;
|
||||
int rc = 0;
|
||||
|
||||
BUG_ON(!cb_arg);
|
||||
rrq_req = cb_arg->io_req;
|
||||
orig_io_req = cb_arg->aborted_io_req;
|
||||
BUG_ON(!orig_io_req);
|
||||
BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
|
||||
orig_io_req->xid, rrq_req->xid);
|
||||
|
||||
kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
|
||||
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
|
||||
/*
|
||||
* els req is timed out. cleanup the IO with FW and
|
||||
* drop the completion. Remove from active_cmd_queue.
|
||||
*/
|
||||
BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
|
||||
rrq_req->xid);
|
||||
|
||||
if (rrq_req->on_active_queue) {
|
||||
list_del_init(&rrq_req->link);
|
||||
rrq_req->on_active_queue = 0;
|
||||
rc = bnx2fc_initiate_cleanup(rrq_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
}
|
||||
kfree(cb_arg);
|
||||
}
|
||||
int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
|
||||
{
|
||||
|
||||
struct fc_els_rrq rrq;
|
||||
struct bnx2fc_rport *tgt = aborted_io_req->tgt;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
struct bnx2fc_els_cb_arg *cb_arg = NULL;
|
||||
u32 sid = tgt->sid;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
unsigned long start = jiffies;
|
||||
int rc;
|
||||
|
||||
BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
|
||||
aborted_io_req->xid);
|
||||
memset(&rrq, 0, sizeof(rrq));
|
||||
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
|
||||
rc = -ENOMEM;
|
||||
goto rrq_err;
|
||||
}
|
||||
|
||||
cb_arg->aborted_io_req = aborted_io_req;
|
||||
|
||||
rrq.rrq_cmd = ELS_RRQ;
|
||||
hton24(rrq.rrq_s_id, sid);
|
||||
rrq.rrq_ox_id = htons(aborted_io_req->xid);
|
||||
rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
|
||||
|
||||
retry_rrq:
|
||||
rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
|
||||
bnx2fc_rrq_compl, cb_arg,
|
||||
r_a_tov);
|
||||
if (rc == -ENOMEM) {
|
||||
if (time_after(jiffies, start + (10 * HZ))) {
|
||||
BNX2FC_ELS_DBG("rrq Failed\n");
|
||||
rc = FAILED;
|
||||
goto rrq_err;
|
||||
}
|
||||
msleep(20);
|
||||
goto retry_rrq;
|
||||
}
|
||||
rrq_err:
|
||||
if (rc) {
|
||||
BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
|
||||
aborted_io_req->xid);
|
||||
kfree(cb_arg);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
|
||||
{
|
||||
struct bnx2fc_cmd *els_req;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct bnx2fc_mp_req *mp_req;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
unsigned char *buf;
|
||||
void *resp_buf;
|
||||
u32 resp_len, hdr_len;
|
||||
u16 l2_oxid;
|
||||
int frame_len;
|
||||
int rc = 0;
|
||||
|
||||
l2_oxid = cb_arg->l2_oxid;
|
||||
BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
|
||||
|
||||
els_req = cb_arg->io_req;
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
|
||||
/*
|
||||
* els req is timed out. cleanup the IO with FW and
|
||||
* drop the completion. libfc will handle the els timeout
|
||||
*/
|
||||
if (els_req->on_active_queue) {
|
||||
list_del_init(&els_req->link);
|
||||
els_req->on_active_queue = 0;
|
||||
rc = bnx2fc_initiate_cleanup(els_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
goto free_arg;
|
||||
}
|
||||
|
||||
tgt = els_req->tgt;
|
||||
mp_req = &(els_req->mp_req);
|
||||
fc_hdr = &(mp_req->resp_fc_hdr);
|
||||
resp_len = mp_req->resp_len;
|
||||
resp_buf = mp_req->resp_buf;
|
||||
|
||||
buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR PFX "Unable to alloc mp buf\n");
|
||||
goto free_arg;
|
||||
}
|
||||
hdr_len = sizeof(*fc_hdr);
|
||||
if (hdr_len + resp_len > PAGE_SIZE) {
|
||||
printk(KERN_ERR PFX "l2_els_compl: resp len is "
|
||||
"beyond page size\n");
|
||||
goto free_buf;
|
||||
}
|
||||
memcpy(buf, fc_hdr, hdr_len);
|
||||
memcpy(buf + hdr_len, resp_buf, resp_len);
|
||||
frame_len = hdr_len + resp_len;
|
||||
|
||||
bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
|
||||
|
||||
free_buf:
|
||||
kfree(buf);
|
||||
free_arg:
|
||||
kfree(cb_arg);
|
||||
}
|
||||
|
||||
int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
|
||||
{
|
||||
struct fc_els_adisc *adisc;
|
||||
struct fc_frame_header *fh;
|
||||
struct bnx2fc_els_cb_arg *cb_arg;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
int rc;
|
||||
|
||||
fh = fc_frame_header_get(fp);
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
|
||||
|
||||
BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
|
||||
adisc = fc_frame_payload_get(fp, sizeof(*adisc));
|
||||
/* adisc is initialized by libfc */
|
||||
rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
|
||||
bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
|
||||
if (rc)
|
||||
kfree(cb_arg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
|
||||
{
|
||||
struct fc_els_logo *logo;
|
||||
struct fc_frame_header *fh;
|
||||
struct bnx2fc_els_cb_arg *cb_arg;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
int rc;
|
||||
|
||||
fh = fc_frame_header_get(fp);
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
|
||||
|
||||
BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
|
||||
logo = fc_frame_payload_get(fp, sizeof(*logo));
|
||||
/* logo is initialized by libfc */
|
||||
rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
|
||||
bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
|
||||
if (rc)
|
||||
kfree(cb_arg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
|
||||
{
|
||||
struct fc_els_rls *rls;
|
||||
struct fc_frame_header *fh;
|
||||
struct bnx2fc_els_cb_arg *cb_arg;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
int rc;
|
||||
|
||||
fh = fc_frame_header_get(fp);
|
||||
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
|
||||
if (!cb_arg) {
|
||||
printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
|
||||
|
||||
rls = fc_frame_payload_get(fp, sizeof(*rls));
|
||||
/* rls is initialized by libfc */
|
||||
rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
|
||||
bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
|
||||
if (rc)
|
||||
kfree(cb_arg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
|
||||
void *data, u32 data_len,
|
||||
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
|
||||
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
|
||||
{
|
||||
struct fcoe_port *port = tgt->port;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct fc_rport *rport = tgt->rport;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct bnx2fc_cmd *els_req;
|
||||
struct bnx2fc_mp_req *mp_req;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
struct fcoe_task_ctx_entry *task;
|
||||
struct fcoe_task_ctx_entry *task_page;
|
||||
int rc = 0;
|
||||
int task_idx, index;
|
||||
u32 did, sid;
|
||||
u16 xid;
|
||||
|
||||
rc = fc_remote_port_chkready(rport);
|
||||
if (rc) {
|
||||
printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
|
||||
rc = -EINVAL;
|
||||
goto els_err;
|
||||
}
|
||||
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
|
||||
printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
|
||||
rc = -EINVAL;
|
||||
goto els_err;
|
||||
}
|
||||
if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
|
||||
(test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
|
||||
printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
|
||||
rc = -EINVAL;
|
||||
goto els_err;
|
||||
}
|
||||
els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
|
||||
if (!els_req) {
|
||||
rc = -ENOMEM;
|
||||
goto els_err;
|
||||
}
|
||||
|
||||
els_req->sc_cmd = NULL;
|
||||
els_req->port = port;
|
||||
els_req->tgt = tgt;
|
||||
els_req->cb_func = cb_func;
|
||||
cb_arg->io_req = els_req;
|
||||
els_req->cb_arg = cb_arg;
|
||||
|
||||
mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
|
||||
rc = bnx2fc_init_mp_req(els_req);
|
||||
if (rc == FAILED) {
|
||||
printk(KERN_ALERT PFX "ELS MP request init failed\n");
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&els_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = -ENOMEM;
|
||||
goto els_err;
|
||||
} else {
|
||||
/* rc SUCCESS */
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
/* Set the data_xfer_len to the size of ELS payload */
|
||||
mp_req->req_len = data_len;
|
||||
els_req->data_xfer_len = mp_req->req_len;
|
||||
|
||||
/* Fill ELS Payload */
|
||||
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
|
||||
memcpy(mp_req->req_buf, data, data_len);
|
||||
} else {
|
||||
printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
|
||||
els_req->cb_func = NULL;
|
||||
els_req->cb_arg = NULL;
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
kref_put(&els_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
if (rc)
|
||||
goto els_err;
|
||||
|
||||
/* Fill FC header */
|
||||
fc_hdr = &(mp_req->req_fc_hdr);
|
||||
|
||||
did = tgt->rport->port_id;
|
||||
sid = tgt->sid;
|
||||
|
||||
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
|
||||
FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
|
||||
FC_FC_SEQ_INIT, 0);
|
||||
|
||||
/* Obtain exchange id */
|
||||
xid = els_req->xid;
|
||||
task_idx = xid/BNX2FC_TASKS_PER_PAGE;
|
||||
index = xid % BNX2FC_TASKS_PER_PAGE;
|
||||
|
||||
/* Initialize task context for this IO request */
|
||||
task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
|
||||
task = &(task_page[index]);
|
||||
bnx2fc_init_mp_task(els_req, task);
|
||||
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
|
||||
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
|
||||
printk(KERN_ERR PFX "initiate_els.. session not ready\n");
|
||||
els_req->cb_func = NULL;
|
||||
els_req->cb_arg = NULL;
|
||||
kref_put(&els_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (timer_msec)
|
||||
bnx2fc_cmd_timer_set(els_req, timer_msec);
|
||||
bnx2fc_add_2_sq(tgt, xid);
|
||||
|
||||
els_req->on_active_queue = 1;
|
||||
list_add_tail(&els_req->link, &tgt->els_queue);
|
||||
|
||||
/* Ring doorbell */
|
||||
bnx2fc_ring_doorbell(tgt);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
|
||||
els_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
|
||||
struct fcoe_task_ctx_entry *task, u8 num_rq)
|
||||
{
|
||||
struct bnx2fc_mp_req *mp_req;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
u64 *hdr;
|
||||
u64 *temp_hdr;
|
||||
|
||||
BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
|
||||
"cmd_type = %d\n", els_req->xid, els_req->cmd_type);
|
||||
|
||||
if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
|
||||
&els_req->req_flags)) {
|
||||
BNX2FC_ELS_DBG("Timer context finished processing this "
|
||||
"els - 0x%x\n", els_req->xid);
|
||||
/* This IO doesnt receive cleanup completion */
|
||||
kref_put(&els_req->refcount, bnx2fc_cmd_release);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Cancel the timeout_work, as we received the response */
|
||||
if (cancel_delayed_work(&els_req->timeout_work))
|
||||
kref_put(&els_req->refcount,
|
||||
bnx2fc_cmd_release); /* drop timer hold */
|
||||
|
||||
if (els_req->on_active_queue) {
|
||||
list_del_init(&els_req->link);
|
||||
els_req->on_active_queue = 0;
|
||||
}
|
||||
|
||||
mp_req = &(els_req->mp_req);
|
||||
fc_hdr = &(mp_req->resp_fc_hdr);
|
||||
|
||||
hdr = (u64 *)fc_hdr;
|
||||
temp_hdr = (u64 *)
|
||||
&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
|
||||
hdr[0] = cpu_to_be64(temp_hdr[0]);
|
||||
hdr[1] = cpu_to_be64(temp_hdr[1]);
|
||||
hdr[2] = cpu_to_be64(temp_hdr[2]);
|
||||
|
||||
mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
|
||||
|
||||
/* Parse ELS response */
|
||||
if ((els_req->cb_func) && (els_req->cb_arg)) {
|
||||
els_req->cb_func(els_req->cb_arg);
|
||||
els_req->cb_arg = NULL;
|
||||
}
|
||||
|
||||
kref_put(&els_req->refcount, bnx2fc_cmd_release);
|
||||
}
|
||||
|
||||
static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
|
||||
void *arg)
|
||||
{
|
||||
struct fcoe_ctlr *fip = arg;
|
||||
struct fc_exch *exch = fc_seq_exch(seq);
|
||||
struct fc_lport *lport = exch->lp;
|
||||
u8 *mac;
|
||||
struct fc_frame_header *fh;
|
||||
u8 op;
|
||||
|
||||
if (IS_ERR(fp))
|
||||
goto done;
|
||||
|
||||
mac = fr_cb(fp)->granted_mac;
|
||||
if (is_zero_ether_addr(mac)) {
|
||||
fh = fc_frame_header_get(fp);
|
||||
if (fh->fh_type != FC_TYPE_ELS) {
|
||||
printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
|
||||
"fh_type != FC_TYPE_ELS\n");
|
||||
fc_frame_free(fp);
|
||||
return;
|
||||
}
|
||||
op = fc_frame_payload_op(fp);
|
||||
if (lport->vport) {
|
||||
if (op == ELS_LS_RJT) {
|
||||
printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
|
||||
fc_vport_terminate(lport->vport);
|
||||
fc_frame_free(fp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
|
||||
fc_frame_free(fp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
fip->update_mac(lport, mac);
|
||||
done:
|
||||
fc_lport_flogi_resp(seq, fp, lport);
|
||||
}
|
||||
|
||||
static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
|
||||
void *arg)
|
||||
{
|
||||
struct fcoe_ctlr *fip = arg;
|
||||
struct fc_exch *exch = fc_seq_exch(seq);
|
||||
struct fc_lport *lport = exch->lp;
|
||||
static u8 zero_mac[ETH_ALEN] = { 0 };
|
||||
|
||||
if (!IS_ERR(fp))
|
||||
fip->update_mac(lport, zero_mac);
|
||||
fc_lport_logo_resp(seq, fp, lport);
|
||||
}
|
||||
|
||||
struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
|
||||
struct fc_frame *fp, unsigned int op,
|
||||
void (*resp)(struct fc_seq *,
|
||||
struct fc_frame *,
|
||||
void *),
|
||||
void *arg, u32 timeout)
|
||||
{
|
||||
struct fcoe_port *port = lport_priv(lport);
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct fcoe_ctlr *fip = &hba->ctlr;
|
||||
struct fc_frame_header *fh = fc_frame_header_get(fp);
|
||||
|
||||
switch (op) {
|
||||
case ELS_FLOGI:
|
||||
case ELS_FDISC:
|
||||
return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
|
||||
fip, timeout);
|
||||
case ELS_LOGO:
|
||||
/* only hook onto fabric logouts, not port logouts */
|
||||
if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
|
||||
break;
|
||||
return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
|
||||
fip, timeout);
|
||||
}
|
||||
return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,844 @@
|
|||
/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
|
||||
* Handles operations such as session offload/upload etc, and manages
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008 - 2010 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
|
||||
*/
|
||||
|
||||
#include "bnx2fc.h"
|
||||
static void bnx2fc_upld_timer(unsigned long data);
|
||||
static void bnx2fc_ofld_timer(unsigned long data);
|
||||
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
|
||||
struct fcoe_port *port,
|
||||
struct fc_rport_priv *rdata);
|
||||
static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt);
|
||||
static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt);
|
||||
static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt);
|
||||
static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
|
||||
|
||||
static void bnx2fc_upld_timer(unsigned long data)
|
||||
{
|
||||
|
||||
struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
|
||||
/* fake upload completion */
|
||||
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->upld_wait);
|
||||
}
|
||||
|
||||
static void bnx2fc_ofld_timer(unsigned long data)
|
||||
{
|
||||
|
||||
struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
|
||||
/* NOTE: This function should never be called, as
|
||||
* offload should never timeout
|
||||
*/
|
||||
/*
|
||||
* If the timer has expired, this session is dead
|
||||
* Clear offloaded flag and logout of this device.
|
||||
* Since OFFLOADED flag is cleared, this case
|
||||
* will be considered as offload error and the
|
||||
* port will be logged off, and conn_id, session
|
||||
* resources are freed up in bnx2fc_offload_session
|
||||
*/
|
||||
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->ofld_wait);
|
||||
}
|
||||
|
||||
static void bnx2fc_offload_session(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt,
|
||||
struct fc_rport_priv *rdata)
|
||||
{
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
struct fc_rport *rport = rdata->rport;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
int rval;
|
||||
int i = 0;
|
||||
|
||||
/* Initialize bnx2fc_rport */
|
||||
/* NOTE: tgt is already bzero'd */
|
||||
rval = bnx2fc_init_tgt(tgt, port, rdata);
|
||||
if (rval) {
|
||||
printk(KERN_ERR PFX "Failed to allocate conn id for "
|
||||
"port_id (%6x)\n", rport->port_id);
|
||||
goto ofld_err;
|
||||
}
|
||||
|
||||
/* Allocate session resources */
|
||||
rval = bnx2fc_alloc_session_resc(hba, tgt);
|
||||
if (rval) {
|
||||
printk(KERN_ERR PFX "Failed to allocate resources\n");
|
||||
goto ofld_err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize FCoE session offload process.
|
||||
* Upon completion of offload process add
|
||||
* rport to list of rports
|
||||
*/
|
||||
retry_ofld:
|
||||
clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
rval = bnx2fc_send_session_ofld_req(port, tgt);
|
||||
if (rval) {
|
||||
printk(KERN_ERR PFX "ofld_req failed\n");
|
||||
goto ofld_err;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for the session is offloaded and enabled. 3 Secs
|
||||
* should be ample time for this process to complete.
|
||||
*/
|
||||
setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
wait_event_interruptible(tgt->ofld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_OFLD_REQ_CMPL,
|
||||
&tgt->flags)));
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
del_timer_sync(&tgt->ofld_timer);
|
||||
|
||||
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
|
||||
&tgt->flags)) {
|
||||
BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
|
||||
"retry ofld..%d\n", i++);
|
||||
msleep_interruptible(1000);
|
||||
if (i > 3) {
|
||||
i = 0;
|
||||
goto ofld_err;
|
||||
}
|
||||
goto retry_ofld;
|
||||
}
|
||||
goto ofld_err;
|
||||
}
|
||||
if (bnx2fc_map_doorbell(tgt)) {
|
||||
printk(KERN_ERR PFX "map doorbell failed - no mem\n");
|
||||
/* upload will take care of cleaning up sess resc */
|
||||
lport->tt.rport_logoff(rdata);
|
||||
}
|
||||
return;
|
||||
|
||||
ofld_err:
|
||||
/* couldn't offload the session. log off from this rport */
|
||||
BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
|
||||
lport->tt.rport_logoff(rdata);
|
||||
/* Free session resources */
|
||||
bnx2fc_free_session_resc(hba, tgt);
|
||||
if (tgt->fcoe_conn_id != -1)
|
||||
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
|
||||
}
|
||||
|
||||
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct bnx2fc_cmd *io_req;
|
||||
struct list_head *list;
|
||||
struct list_head *tmp;
|
||||
int rc;
|
||||
int i = 0;
|
||||
BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
|
||||
tgt->num_active_ios.counter);
|
||||
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
tgt->flush_in_prog = 1;
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
io_req->on_active_queue = 0;
|
||||
BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
|
||||
|
||||
if (cancel_delayed_work(&io_req->timeout_work)) {
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
|
||||
&io_req->req_flags)) {
|
||||
/* Handle eh_abort timeout */
|
||||
BNX2FC_IO_DBG(io_req, "eh_abort for IO "
|
||||
"cleaned up\n");
|
||||
complete(&io_req->tm_done);
|
||||
}
|
||||
kref_put(&io_req->refcount,
|
||||
bnx2fc_cmd_release); /* drop timer hold */
|
||||
}
|
||||
|
||||
set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
|
||||
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->els_queue) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
io_req->on_active_queue = 0;
|
||||
|
||||
BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
|
||||
|
||||
if (cancel_delayed_work(&io_req->timeout_work))
|
||||
kref_put(&io_req->refcount,
|
||||
bnx2fc_cmd_release); /* drop timer hold */
|
||||
|
||||
if ((io_req->cb_func) && (io_req->cb_arg)) {
|
||||
io_req->cb_func(io_req->cb_arg);
|
||||
io_req->cb_arg = NULL;
|
||||
}
|
||||
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
|
||||
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
|
||||
|
||||
if (cancel_delayed_work(&io_req->timeout_work))
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
|
||||
clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
|
||||
}
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
|
||||
i = 0;
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
/* wait for active_ios to go to 0 */
|
||||
while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
|
||||
msleep(25);
|
||||
if (tgt->num_active_ios.counter != 0)
|
||||
printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
|
||||
" active_ios = %d\n",
|
||||
tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
|
||||
spin_lock_bh(&tgt->tgt_lock);
|
||||
tgt->flush_in_prog = 0;
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
}
|
||||
|
||||
static void bnx2fc_upload_session(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
|
||||
tgt->num_active_ios.counter);
|
||||
|
||||
/*
|
||||
* Called with hba->hba_mutex held.
|
||||
* This is a blocking call
|
||||
*/
|
||||
clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
|
||||
bnx2fc_send_session_disable_req(port, tgt);
|
||||
|
||||
/*
|
||||
* wait for upload to complete. 3 Secs
|
||||
* should be sufficient time for this process to complete.
|
||||
*/
|
||||
setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
|
||||
wait_event_interruptible(tgt->upld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_UPLD_REQ_COMPL,
|
||||
&tgt->flags)));
|
||||
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
del_timer_sync(&tgt->upld_timer);
|
||||
|
||||
/*
|
||||
* traverse thru the active_q and tmf_q and cleanup
|
||||
* IOs in these lists
|
||||
*/
|
||||
BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
|
||||
tgt->flags);
|
||||
bnx2fc_flush_active_ios(tgt);
|
||||
|
||||
/* Issue destroy KWQE */
|
||||
if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
|
||||
BNX2FC_TGT_DBG(tgt, "send destroy req\n");
|
||||
clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
|
||||
bnx2fc_send_session_destroy_req(hba, tgt);
|
||||
|
||||
/* wait for destroy to complete */
|
||||
setup_timer(&tgt->upld_timer,
|
||||
bnx2fc_upld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
wait_event_interruptible(tgt->upld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_UPLD_REQ_COMPL,
|
||||
&tgt->flags)));
|
||||
|
||||
if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
|
||||
printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
|
||||
tgt->flags);
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
del_timer_sync(&tgt->upld_timer);
|
||||
|
||||
} else
|
||||
printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
|
||||
" not sent to FW\n");
|
||||
|
||||
/* Free session resources */
|
||||
spin_lock_bh(&tgt->cq_lock);
|
||||
bnx2fc_free_session_resc(hba, tgt);
|
||||
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
|
||||
spin_unlock_bh(&tgt->cq_lock);
|
||||
}
|
||||
|
||||
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
|
||||
struct fcoe_port *port,
|
||||
struct fc_rport_priv *rdata)
|
||||
{
|
||||
|
||||
struct fc_rport *rport = rdata->rport;
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
|
||||
tgt->rport = rport;
|
||||
tgt->rdata = rdata;
|
||||
tgt->port = port;
|
||||
|
||||
if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
|
||||
BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
|
||||
tgt->fcoe_conn_id = -1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
|
||||
if (tgt->fcoe_conn_id == -1)
|
||||
return -1;
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
|
||||
|
||||
tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
|
||||
tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
|
||||
tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
|
||||
|
||||
/* Initialize the toggle bit */
|
||||
tgt->sq_curr_toggle_bit = 1;
|
||||
tgt->cq_curr_toggle_bit = 1;
|
||||
tgt->sq_prod_idx = 0;
|
||||
tgt->cq_cons_idx = 0;
|
||||
tgt->rq_prod_idx = 0x8000;
|
||||
tgt->rq_cons_idx = 0;
|
||||
atomic_set(&tgt->num_active_ios, 0);
|
||||
|
||||
tgt->work_time_slice = 2;
|
||||
|
||||
spin_lock_init(&tgt->tgt_lock);
|
||||
spin_lock_init(&tgt->cq_lock);
|
||||
|
||||
/* Initialize active_cmd_queue list */
|
||||
INIT_LIST_HEAD(&tgt->active_cmd_queue);
|
||||
|
||||
/* Initialize IO retire queue */
|
||||
INIT_LIST_HEAD(&tgt->io_retire_queue);
|
||||
|
||||
INIT_LIST_HEAD(&tgt->els_queue);
|
||||
|
||||
/* Initialize active_tm_queue list */
|
||||
INIT_LIST_HEAD(&tgt->active_tm_queue);
|
||||
|
||||
init_waitqueue_head(&tgt->ofld_wait);
|
||||
init_waitqueue_head(&tgt->upld_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* This event_callback is called after successful completion of libfc
|
||||
* initiated target login. bnx2fc can proceed with initiating the session
|
||||
* establishment.
|
||||
*/
|
||||
void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
||||
struct fc_rport_priv *rdata,
|
||||
enum fc_rport_event event)
|
||||
{
|
||||
struct fcoe_port *port = lport_priv(lport);
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct fc_rport *rport = rdata->rport;
|
||||
struct fc_rport_libfc_priv *rp;
|
||||
struct bnx2fc_rport *tgt;
|
||||
u32 port_id;
|
||||
|
||||
BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
|
||||
event, rdata->ids.port_id);
|
||||
switch (event) {
|
||||
case RPORT_EV_READY:
|
||||
if (!rport) {
|
||||
printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
rp = rport->dd_data;
|
||||
if (rport->port_id == FC_FID_DIR_SERV) {
|
||||
/*
|
||||
* bnx2fc_rport structure doesnt exist for
|
||||
* directory server.
|
||||
* We should not come here, as lport will
|
||||
* take care of fabric login
|
||||
*/
|
||||
printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
|
||||
rdata->ids.port_id);
|
||||
break;
|
||||
}
|
||||
|
||||
if (rdata->spp_type != FC_TYPE_FCP) {
|
||||
BNX2FC_HBA_DBG(lport, "not FCP type target."
|
||||
" not offloading\n");
|
||||
break;
|
||||
}
|
||||
if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
|
||||
BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
|
||||
" not offloading\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Offlaod process is protected with hba mutex.
|
||||
* Use the same mutex_lock for upload process too
|
||||
*/
|
||||
mutex_lock(&hba->hba_mutex);
|
||||
tgt = (struct bnx2fc_rport *)&rp[1];
|
||||
|
||||
/* This can happen when ADISC finds the same target */
|
||||
if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
|
||||
BNX2FC_TGT_DBG(tgt, "already offloaded\n");
|
||||
mutex_unlock(&hba->hba_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Offload the session. This is a blocking call, and will
|
||||
* wait until the session is offloaded.
|
||||
*/
|
||||
bnx2fc_offload_session(port, tgt, rdata);
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
|
||||
hba->num_ofld_sess);
|
||||
|
||||
if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
|
||||
/*
|
||||
* Session is offloaded and enabled. Map
|
||||
* doorbell register for this target
|
||||
*/
|
||||
BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
|
||||
/* This counter is protected with hba mutex */
|
||||
hba->num_ofld_sess++;
|
||||
|
||||
set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
|
||||
} else {
|
||||
/*
|
||||
* Offload or enable would have failed.
|
||||
* In offload/enable completion path, the
|
||||
* rport would have already been removed
|
||||
*/
|
||||
BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
|
||||
"offloaded flag not set\n");
|
||||
}
|
||||
mutex_unlock(&hba->hba_mutex);
|
||||
break;
|
||||
case RPORT_EV_LOGO:
|
||||
case RPORT_EV_FAILED:
|
||||
case RPORT_EV_STOP:
|
||||
port_id = rdata->ids.port_id;
|
||||
if (port_id == FC_FID_DIR_SERV)
|
||||
break;
|
||||
|
||||
if (!rport) {
|
||||
printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
|
||||
port_id);
|
||||
break;
|
||||
}
|
||||
rp = rport->dd_data;
|
||||
mutex_lock(&hba->hba_mutex);
|
||||
/*
|
||||
* Perform session upload. Note that rdata->peers is already
|
||||
* removed from disc->rports list before we get this event.
|
||||
*/
|
||||
tgt = (struct bnx2fc_rport *)&rp[1];
|
||||
|
||||
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
|
||||
mutex_unlock(&hba->hba_mutex);
|
||||
break;
|
||||
}
|
||||
clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
|
||||
|
||||
bnx2fc_upload_session(port, tgt);
|
||||
hba->num_ofld_sess--;
|
||||
BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
|
||||
hba->num_ofld_sess);
|
||||
/*
|
||||
* Try to wake up the linkdown wait thread. If num_ofld_sess
|
||||
* is 0, the waiting therad wakes up
|
||||
*/
|
||||
if ((hba->wait_for_link_down) &&
|
||||
(hba->num_ofld_sess == 0)) {
|
||||
wake_up_interruptible(&hba->shutdown_wait);
|
||||
}
|
||||
if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
|
||||
printk(KERN_ERR PFX "Relogin to the tgt\n");
|
||||
mutex_lock(&lport->disc.disc_mutex);
|
||||
lport->tt.rport_login(rdata);
|
||||
mutex_unlock(&lport->disc.disc_mutex);
|
||||
}
|
||||
mutex_unlock(&hba->hba_mutex);
|
||||
|
||||
break;
|
||||
|
||||
case RPORT_EV_NONE:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
|
||||
*
|
||||
* @port: fcoe_port struct to lookup the target port on
|
||||
* @port_id: The remote port ID to look up
|
||||
*/
|
||||
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
|
||||
u32 port_id)
|
||||
{
|
||||
struct bnx2fc_hba *hba = port->priv;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct fc_rport_priv *rdata;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
|
||||
tgt = hba->tgt_ofld_list[i];
|
||||
if ((tgt) && (tgt->port == port)) {
|
||||
rdata = tgt->rdata;
|
||||
if (rdata->ids.port_id == port_id) {
|
||||
if (rdata->rp_state != RPORT_ST_DELETE) {
|
||||
BNX2FC_TGT_DBG(tgt, "rport "
|
||||
"obtained\n");
|
||||
return tgt;
|
||||
} else {
|
||||
printk(KERN_ERR PFX "rport 0x%x "
|
||||
"is in DELETED state\n",
|
||||
rdata->ids.port_id);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* bnx2fc_alloc_conn_id - allocates FCOE Connection id
|
||||
*
|
||||
* @hba: pointer to adapter structure
|
||||
* @tgt: pointer to bnx2fc_rport structure
|
||||
*/
|
||||
static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
u32 conn_id, next;
|
||||
|
||||
/* called with hba mutex held */
|
||||
|
||||
/*
|
||||
* tgt_ofld_list access is synchronized using
|
||||
* both hba mutex and hba lock. Atleast hba mutex or
|
||||
* hba lock needs to be held for read access.
|
||||
*/
|
||||
|
||||
spin_lock_bh(&hba->hba_lock);
|
||||
next = hba->next_conn_id;
|
||||
conn_id = hba->next_conn_id++;
|
||||
if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
|
||||
hba->next_conn_id = 0;
|
||||
|
||||
while (hba->tgt_ofld_list[conn_id] != NULL) {
|
||||
conn_id++;
|
||||
if (conn_id == BNX2FC_NUM_MAX_SESS)
|
||||
conn_id = 0;
|
||||
|
||||
if (conn_id == next) {
|
||||
/* No free conn_ids are available */
|
||||
spin_unlock_bh(&hba->hba_lock);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
hba->tgt_ofld_list[conn_id] = tgt;
|
||||
tgt->fcoe_conn_id = conn_id;
|
||||
spin_unlock_bh(&hba->hba_lock);
|
||||
return conn_id;
|
||||
}
|
||||
|
||||
static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
|
||||
{
|
||||
/* called with hba mutex held */
|
||||
spin_lock_bh(&hba->hba_lock);
|
||||
hba->tgt_ofld_list[conn_id] = NULL;
|
||||
hba->next_conn_id = conn_id;
|
||||
spin_unlock_bh(&hba->hba_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
*bnx2fc_alloc_session_resc - Allocate qp resources for the session
|
||||
*
|
||||
*/
|
||||
static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
dma_addr_t page;
|
||||
int num_pages;
|
||||
u32 *pbl;
|
||||
|
||||
/* Allocate and map SQ */
|
||||
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
|
||||
tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
|
||||
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
|
||||
&tgt->sq_dma, GFP_KERNEL);
|
||||
if (!tgt->sq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
|
||||
tgt->sq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->sq, 0, tgt->sq_mem_size);
|
||||
|
||||
/* Allocate and map CQ */
|
||||
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
|
||||
tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
|
||||
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
|
||||
&tgt->cq_dma, GFP_KERNEL);
|
||||
if (!tgt->cq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
|
||||
tgt->cq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->cq, 0, tgt->cq_mem_size);
|
||||
|
||||
/* Allocate and map RQ and RQ PBL */
|
||||
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
|
||||
tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
|
||||
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
|
||||
&tgt->rq_dma, GFP_KERNEL);
|
||||
if (!tgt->rq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
|
||||
tgt->rq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->rq, 0, tgt->rq_mem_size);
|
||||
|
||||
tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
|
||||
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
|
||||
&tgt->rq_pbl_dma, GFP_KERNEL);
|
||||
if (!tgt->rq_pbl) {
|
||||
printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
|
||||
tgt->rq_pbl_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
||||
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
|
||||
num_pages = tgt->rq_mem_size / PAGE_SIZE;
|
||||
page = tgt->rq_dma;
|
||||
pbl = (u32 *)tgt->rq_pbl;
|
||||
|
||||
while (num_pages--) {
|
||||
*pbl = (u32)page;
|
||||
pbl++;
|
||||
*pbl = (u32)((u64)page >> 32);
|
||||
pbl++;
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Allocate and map XFERQ */
|
||||
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
|
||||
tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
|
||||
PAGE_MASK;
|
||||
|
||||
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
|
||||
&tgt->xferq_dma, GFP_KERNEL);
|
||||
if (!tgt->xferq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
|
||||
tgt->xferq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->xferq, 0, tgt->xferq_mem_size);
|
||||
|
||||
/* Allocate and map CONFQ & CONFQ PBL */
|
||||
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
|
||||
tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
|
||||
PAGE_MASK;
|
||||
|
||||
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
|
||||
&tgt->confq_dma, GFP_KERNEL);
|
||||
if (!tgt->confq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
|
||||
tgt->confq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->confq, 0, tgt->confq_mem_size);
|
||||
|
||||
tgt->confq_pbl_size =
|
||||
(tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
|
||||
tgt->confq_pbl_size =
|
||||
(tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
|
||||
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
tgt->confq_pbl_size,
|
||||
&tgt->confq_pbl_dma, GFP_KERNEL);
|
||||
if (!tgt->confq_pbl) {
|
||||
printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
|
||||
tgt->confq_pbl_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
||||
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
|
||||
num_pages = tgt->confq_mem_size / PAGE_SIZE;
|
||||
page = tgt->confq_dma;
|
||||
pbl = (u32 *)tgt->confq_pbl;
|
||||
|
||||
while (num_pages--) {
|
||||
*pbl = (u32)page;
|
||||
pbl++;
|
||||
*pbl = (u32)((u64)page >> 32);
|
||||
pbl++;
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Allocate and map ConnDB */
|
||||
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
|
||||
|
||||
tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
tgt->conn_db_mem_size,
|
||||
&tgt->conn_db_dma, GFP_KERNEL);
|
||||
if (!tgt->conn_db) {
|
||||
printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
|
||||
tgt->conn_db_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
|
||||
|
||||
|
||||
/* Allocate and map LCQ */
|
||||
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
|
||||
tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
|
||||
PAGE_MASK;
|
||||
|
||||
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
|
||||
&tgt->lcq_dma, GFP_KERNEL);
|
||||
|
||||
if (!tgt->lcq) {
|
||||
printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
|
||||
tgt->lcq_mem_size);
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
memset(tgt->lcq, 0, tgt->lcq_mem_size);
|
||||
|
||||
/* Arm CQ */
|
||||
tgt->conn_db->cq_arm.lo = -1;
|
||||
tgt->conn_db->rq_prod = 0x8000;
|
||||
|
||||
return 0;
|
||||
|
||||
mem_alloc_failure:
|
||||
bnx2fc_free_session_resc(hba, tgt);
|
||||
bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* bnx2i_free_session_resc - free qp resources for the session
|
||||
*
|
||||
* @hba: adapter structure pointer
|
||||
* @tgt: bnx2fc_rport structure pointer
|
||||
*
|
||||
* Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
|
||||
*/
|
||||
static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
|
||||
|
||||
if (tgt->ctx_base) {
|
||||
iounmap(tgt->ctx_base);
|
||||
tgt->ctx_base = NULL;
|
||||
}
|
||||
/* Free LCQ */
|
||||
if (tgt->lcq) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
|
||||
tgt->lcq, tgt->lcq_dma);
|
||||
tgt->lcq = NULL;
|
||||
}
|
||||
/* Free connDB */
|
||||
if (tgt->conn_db) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
|
||||
tgt->conn_db, tgt->conn_db_dma);
|
||||
tgt->conn_db = NULL;
|
||||
}
|
||||
/* Free confq and confq pbl */
|
||||
if (tgt->confq_pbl) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
|
||||
tgt->confq_pbl, tgt->confq_pbl_dma);
|
||||
tgt->confq_pbl = NULL;
|
||||
}
|
||||
if (tgt->confq) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
|
||||
tgt->confq, tgt->confq_dma);
|
||||
tgt->confq = NULL;
|
||||
}
|
||||
/* Free XFERQ */
|
||||
if (tgt->xferq) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
|
||||
tgt->xferq, tgt->xferq_dma);
|
||||
tgt->xferq = NULL;
|
||||
}
|
||||
/* Free RQ PBL and RQ */
|
||||
if (tgt->rq_pbl) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
|
||||
tgt->rq_pbl, tgt->rq_pbl_dma);
|
||||
tgt->rq_pbl = NULL;
|
||||
}
|
||||
if (tgt->rq) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
|
||||
tgt->rq, tgt->rq_dma);
|
||||
tgt->rq = NULL;
|
||||
}
|
||||
/* Free CQ */
|
||||
if (tgt->cq) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
|
||||
tgt->cq, tgt->cq_dma);
|
||||
tgt->cq = NULL;
|
||||
}
|
||||
/* Free SQ */
|
||||
if (tgt->sq) {
|
||||
dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
|
||||
tgt->sq, tgt->sq_dma);
|
||||
tgt->sq = NULL;
|
||||
}
|
||||
}
|
|
@ -360,7 +360,7 @@ struct bnx2i_hba {
|
|||
#define ADAPTER_STATE_LINK_DOWN 2
|
||||
#define ADAPTER_STATE_INIT_FAILED 31
|
||||
unsigned int mtu_supported;
|
||||
#define BNX2I_MAX_MTU_SUPPORTED 1500
|
||||
#define BNX2I_MAX_MTU_SUPPORTED 9000
|
||||
|
||||
struct Scsi_Host *shost;
|
||||
|
||||
|
@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
|
|||
struct iscsi_task *mtask);
|
||||
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
|
||||
struct iscsi_task *mtask);
|
||||
extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
|
||||
struct iscsi_task *mtask);
|
||||
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
|
||||
struct bnx2i_cmd *cmnd);
|
||||
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
|
||||
|
|
|
@ -444,6 +444,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
|
||||
* @conn: iscsi connection
|
||||
* @mtask: driver command structure which is requesting
|
||||
* a WQE to sent to chip for further processing
|
||||
*
|
||||
* prepare and post an iSCSI Text request WQE to CNIC firmware
|
||||
*/
|
||||
int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
|
||||
struct iscsi_task *mtask)
|
||||
{
|
||||
struct bnx2i_cmd *bnx2i_cmd;
|
||||
struct bnx2i_text_request *text_wqe;
|
||||
struct iscsi_text *text_hdr;
|
||||
u32 dword;
|
||||
|
||||
bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
|
||||
text_hdr = (struct iscsi_text *)mtask->hdr;
|
||||
text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
|
||||
|
||||
memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
|
||||
|
||||
text_wqe->op_code = text_hdr->opcode;
|
||||
text_wqe->op_attr = text_hdr->flags;
|
||||
text_wqe->data_length = ntoh24(text_hdr->dlength);
|
||||
text_wqe->itt = mtask->itt |
|
||||
(ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
|
||||
text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
|
||||
|
||||
text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
|
||||
|
||||
text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
|
||||
text_wqe->resp_bd_list_addr_hi =
|
||||
(u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
|
||||
|
||||
dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
|
||||
(bnx2i_conn->gen_pdu.resp_buf_size <<
|
||||
ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
|
||||
text_wqe->resp_buffer = dword;
|
||||
text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
|
||||
text_wqe->bd_list_addr_hi =
|
||||
(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
|
||||
text_wqe->num_bds = 1;
|
||||
text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
|
||||
|
||||
bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
|
||||
* @conn: iscsi connection
|
||||
|
@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
|
|||
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
|
||||
nopout_hdr = (struct iscsi_nopout *)task->hdr;
|
||||
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
|
||||
|
||||
memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
|
||||
|
||||
nopout_wqe->op_code = nopout_hdr->opcode;
|
||||
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
|
||||
memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
|
||||
|
||||
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
|
||||
u32 tmp = nopout_hdr->lun[0];
|
||||
u32 tmp = nopout_wqe->lun[0];
|
||||
/* 57710 requires LUN field to be swapped */
|
||||
nopout_hdr->lun[0] = nopout_hdr->lun[1];
|
||||
nopout_hdr->lun[1] = tmp;
|
||||
nopout_wqe->lun[0] = nopout_wqe->lun[1];
|
||||
nopout_wqe->lun[1] = tmp;
|
||||
}
|
||||
|
||||
nopout_wqe->itt = ((u16)task->itt |
|
||||
|
@ -1425,6 +1478,68 @@ done:
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* bnx2i_process_text_resp - this function handles iscsi text response
|
||||
* @session: iscsi session pointer
|
||||
* @bnx2i_conn: iscsi connection pointer
|
||||
* @cqe: pointer to newly DMA'ed CQE entry for processing
|
||||
*
|
||||
* process iSCSI Text Response CQE& complete it to open-iscsi user daemon
|
||||
*/
|
||||
static int bnx2i_process_text_resp(struct iscsi_session *session,
|
||||
struct bnx2i_conn *bnx2i_conn,
|
||||
struct cqe *cqe)
|
||||
{
|
||||
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
|
||||
struct iscsi_task *task;
|
||||
struct bnx2i_text_response *text;
|
||||
struct iscsi_text_rsp *resp_hdr;
|
||||
int pld_len;
|
||||
int pad_len;
|
||||
|
||||
text = (struct bnx2i_text_response *) cqe;
|
||||
spin_lock(&session->lock);
|
||||
task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
|
||||
if (!task)
|
||||
goto done;
|
||||
|
||||
resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
|
||||
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
|
||||
resp_hdr->opcode = text->op_code;
|
||||
resp_hdr->flags = text->response_flags;
|
||||
resp_hdr->hlength = 0;
|
||||
|
||||
hton24(resp_hdr->dlength, text->data_length);
|
||||
resp_hdr->itt = task->hdr->itt;
|
||||
resp_hdr->ttt = cpu_to_be32(text->ttt);
|
||||
resp_hdr->statsn = task->hdr->exp_statsn;
|
||||
resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
|
||||
resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
|
||||
pld_len = text->data_length;
|
||||
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
|
||||
pld_len;
|
||||
pad_len = 0;
|
||||
if (pld_len & 0x3)
|
||||
pad_len = 4 - (pld_len % 4);
|
||||
|
||||
if (pad_len) {
|
||||
int i = 0;
|
||||
for (i = 0; i < pad_len; i++) {
|
||||
bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
|
||||
bnx2i_conn->gen_pdu.resp_wr_ptr++;
|
||||
}
|
||||
}
|
||||
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
|
||||
bnx2i_conn->gen_pdu.resp_buf,
|
||||
bnx2i_conn->gen_pdu.resp_wr_ptr -
|
||||
bnx2i_conn->gen_pdu.resp_buf);
|
||||
done:
|
||||
spin_unlock(&session->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* bnx2i_process_tmf_resp - this function handles iscsi TMF response
|
||||
* @session: iscsi session pointer
|
||||
|
@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
|
|||
bnx2i_process_tmf_resp(session, bnx2i_conn,
|
||||
qp->cq_cons_qe);
|
||||
break;
|
||||
case ISCSI_OP_TEXT_RSP:
|
||||
bnx2i_process_text_resp(session, bnx2i_conn,
|
||||
qp->cq_cons_qe);
|
||||
break;
|
||||
case ISCSI_OP_LOGOUT_RSP:
|
||||
bnx2i_process_logout_resp(session, bnx2i_conn,
|
||||
qp->cq_cons_qe);
|
||||
|
|
|
@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
|
|||
static u32 adapter_count;
|
||||
|
||||
#define DRV_MODULE_NAME "bnx2i"
|
||||
#define DRV_MODULE_VERSION "2.6.2.2"
|
||||
#define DRV_MODULE_RELDATE "Nov 23, 2010"
|
||||
#define DRV_MODULE_VERSION "2.6.2.3"
|
||||
#define DRV_MODULE_RELDATE "Dec 31, 2010"
|
||||
|
||||
static char version[] __devinitdata =
|
||||
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
|
||||
|
@ -29,7 +29,7 @@ static char version[] __devinitdata =
|
|||
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
|
||||
"Eddie Wai <eddie.wai@broadcom.com>");
|
||||
|
||||
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
|
||||
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
|
||||
" iSCSI Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
|
|||
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
|
||||
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
|
||||
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
|
||||
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
|
||||
hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
|
||||
hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
|
||||
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
|
||||
hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
|
||||
hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
|
||||
hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
|
||||
hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
|
||||
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
|
||||
else
|
||||
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
|
||||
|
@ -161,6 +163,21 @@ void bnx2i_start(void *handle)
|
|||
struct bnx2i_hba *hba = handle;
|
||||
int i = HZ;
|
||||
|
||||
if (!hba->cnic->max_iscsi_conn) {
|
||||
printk(KERN_ALERT "bnx2i: dev %s does not support "
|
||||
"iSCSI\n", hba->netdev->name);
|
||||
|
||||
if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
|
||||
mutex_lock(&bnx2i_dev_lock);
|
||||
list_del_init(&hba->link);
|
||||
adapter_count--;
|
||||
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
|
||||
clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
|
||||
mutex_unlock(&bnx2i_dev_lock);
|
||||
bnx2i_free_hba(hba);
|
||||
}
|
||||
return;
|
||||
}
|
||||
bnx2i_send_fw_iscsi_init_msg(hba);
|
||||
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
|
||||
msleep(BNX2I_INIT_POLL_TIME);
|
||||
|
|
|
@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
|
|||
case ISCSI_OP_SCSI_TMFUNC:
|
||||
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
|
||||
break;
|
||||
case ISCSI_OP_TEXT:
|
||||
rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
|
||||
break;
|
||||
default:
|
||||
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
|
||||
"send_gen: unsupported op 0x%x\n",
|
||||
|
@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
|
|||
|
||||
|
||||
/**
|
||||
* bnx2i_conn_get_param - return iscsi connection parameter to caller
|
||||
* @cls_conn: pointer to iscsi cls conn
|
||||
* bnx2i_ep_get_param - return iscsi ep parameter to caller
|
||||
* @ep: pointer to iscsi endpoint
|
||||
* @param: parameter type identifier
|
||||
* @buf: buffer pointer
|
||||
*
|
||||
* returns iSCSI connection parameters
|
||||
* returns iSCSI ep parameters
|
||||
*/
|
||||
static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
||||
enum iscsi_param param, char *buf)
|
||||
static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
|
||||
enum iscsi_param param, char *buf)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
|
||||
int len = 0;
|
||||
struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
|
||||
struct bnx2i_hba *hba = bnx2i_ep->hba;
|
||||
int len = -ENOTCONN;
|
||||
|
||||
if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
|
||||
goto out;
|
||||
if (!hba)
|
||||
return -ENOTCONN;
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
|
||||
if (bnx2i_conn->ep->cm_sk)
|
||||
len = sprintf(buf, "%hu\n",
|
||||
bnx2i_conn->ep->cm_sk->dst_port);
|
||||
mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
|
||||
mutex_lock(&hba->net_dev_lock);
|
||||
if (bnx2i_ep->cm_sk)
|
||||
len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
|
||||
mutex_unlock(&hba->net_dev_lock);
|
||||
break;
|
||||
case ISCSI_PARAM_CONN_ADDRESS:
|
||||
mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
|
||||
if (bnx2i_conn->ep->cm_sk)
|
||||
len = sprintf(buf, "%pI4\n",
|
||||
&bnx2i_conn->ep->cm_sk->dst_ip);
|
||||
mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
|
||||
mutex_lock(&hba->net_dev_lock);
|
||||
if (bnx2i_ep->cm_sk)
|
||||
len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
|
||||
mutex_unlock(&hba->net_dev_lock);
|
||||
break;
|
||||
default:
|
||||
return iscsi_conn_get_param(cls_conn, param, buf);
|
||||
return -ENOSYS;
|
||||
}
|
||||
out:
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
|
|||
cnic_dev_10g = 1;
|
||||
|
||||
switch (bnx2i_ep->state) {
|
||||
case EP_STATE_CONNECT_FAILED:
|
||||
case EP_STATE_CLEANUP_FAILED:
|
||||
case EP_STATE_OFLD_FAILED:
|
||||
case EP_STATE_DISCONN_TIMEDOUT:
|
||||
ret = 0;
|
||||
break;
|
||||
case EP_STATE_CONNECT_START:
|
||||
case EP_STATE_CONNECT_FAILED:
|
||||
case EP_STATE_CONNECT_COMPL:
|
||||
case EP_STATE_ULP_UPDATE_START:
|
||||
case EP_STATE_ULP_UPDATE_COMPL:
|
||||
|
@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
|
|||
.name = "bnx2i",
|
||||
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
|
||||
CAP_MULTI_R2T | CAP_DATADGST |
|
||||
CAP_DATA_PATH_OFFLOAD,
|
||||
CAP_DATA_PATH_OFFLOAD |
|
||||
CAP_TEXT_NEGO,
|
||||
.param_mask = ISCSI_MAX_RECV_DLENGTH |
|
||||
ISCSI_MAX_XMIT_DLENGTH |
|
||||
ISCSI_HDRDGST_EN |
|
||||
|
@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
|
|||
.bind_conn = bnx2i_conn_bind,
|
||||
.destroy_conn = bnx2i_conn_destroy,
|
||||
.set_param = iscsi_set_param,
|
||||
.get_conn_param = bnx2i_conn_get_param,
|
||||
.get_conn_param = iscsi_conn_get_param,
|
||||
.get_session_param = iscsi_session_get_param,
|
||||
.get_host_param = bnx2i_host_get_param,
|
||||
.start_conn = bnx2i_conn_start,
|
||||
|
@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
|
|||
.xmit_task = bnx2i_task_xmit,
|
||||
.get_stats = bnx2i_conn_get_stats,
|
||||
/* TCP connect - disconnect - option-2 interface calls */
|
||||
.get_ep_param = bnx2i_ep_get_param,
|
||||
.ep_connect = bnx2i_ep_connect,
|
||||
.ep_poll = bnx2i_ep_poll,
|
||||
.ep_disconnect = bnx2i_ep_disconnect,
|
||||
|
|
|
@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
|
|||
/* owner and name should be set already */
|
||||
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
|
||||
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
|
||||
CAP_PADDING_OFFLOAD,
|
||||
CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
|
||||
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
|
||||
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
|
||||
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
|
||||
|
@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
|
|||
.destroy_conn = iscsi_tcp_conn_teardown,
|
||||
.start_conn = iscsi_conn_start,
|
||||
.stop_conn = iscsi_conn_stop,
|
||||
.get_conn_param = cxgbi_get_conn_param,
|
||||
.get_conn_param = iscsi_conn_get_param,
|
||||
.set_param = cxgbi_set_conn_param,
|
||||
.get_stats = cxgbi_get_conn_stats,
|
||||
/* pdu xmit req from user space */
|
||||
|
@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
|
|||
.xmit_pdu = cxgbi_conn_xmit_pdu,
|
||||
.parse_pdu_itt = cxgbi_parse_pdu_itt,
|
||||
/* TCP connect/disconnect */
|
||||
.get_ep_param = cxgbi_get_ep_param,
|
||||
.ep_connect = cxgbi_ep_connect,
|
||||
.ep_poll = cxgbi_ep_poll,
|
||||
.ep_disconnect = cxgbi_ep_disconnect,
|
||||
|
@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
|
|||
csk, idx, npods, gl);
|
||||
|
||||
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
|
||||
struct sk_buff *skb = ddp->gl_skb[idx];
|
||||
struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
|
||||
PPOD_SIZE, 0, GFP_ATOMIC);
|
||||
|
||||
/* hold on to the skb until we clear the ddp mapping */
|
||||
skb_get(skb);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
ulp_mem_io_set_hdr(skb, pm_addr);
|
||||
cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
|
||||
|
@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
|
|||
cdev, idx, npods, tag);
|
||||
|
||||
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
|
||||
struct sk_buff *skb = ddp->gl_skb[idx];
|
||||
struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
|
||||
PPOD_SIZE, 0, GFP_ATOMIC);
|
||||
|
||||
if (!skb) {
|
||||
pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
|
||||
pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
|
||||
tag, idx, i, npods);
|
||||
continue;
|
||||
}
|
||||
ddp->gl_skb[idx] = NULL;
|
||||
memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
|
||||
ulp_mem_io_set_hdr(skb, pm_addr);
|
||||
skb->priority = CPL_PRIORITY_CONTROL;
|
||||
cxgb3_ofld_send(cdev->lldev, skb);
|
||||
}
|
||||
}
|
||||
|
||||
static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
log_debug(1 << CXGBI_DBG_DDP,
|
||||
"ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
|
||||
|
||||
for (i = 0; i < cnt; i++, idx++)
|
||||
if (ddp->gl_skb[idx]) {
|
||||
kfree_skb(ddp->gl_skb[idx]);
|
||||
ddp->gl_skb[idx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
|
||||
int cnt, gfp_t gfp)
|
||||
{
|
||||
int i;
|
||||
|
||||
log_debug(1 << CXGBI_DBG_DDP,
|
||||
"ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
|
||||
PPOD_SIZE, 0, gfp);
|
||||
if (skb)
|
||||
ddp->gl_skb[idx + i] = skb;
|
||||
else {
|
||||
ddp_free_gl_skb(ddp, idx, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
||||
unsigned int tid, int pg_idx, bool reply)
|
||||
{
|
||||
|
@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
|
|||
}
|
||||
tdev->ulp_iscsi = ddp;
|
||||
|
||||
cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
|
||||
cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
|
||||
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
|
||||
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
|
||||
cdev->csk_ddp_set = ddp_set_map;
|
||||
|
|
|
@ -24,10 +24,21 @@
|
|||
|
||||
extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
|
||||
|
||||
#define cxgb3i_get_private_ipv4addr(ndev) \
|
||||
(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
|
||||
#define cxgb3i_set_private_ipv4addr(ndev, addr) \
|
||||
(((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
|
||||
static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
|
||||
{
|
||||
return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
|
||||
}
|
||||
|
||||
static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
|
||||
unsigned int addr)
|
||||
{
|
||||
struct port_info *pi = (struct port_info *)netdev_priv(ndev);
|
||||
|
||||
pi->iscsic.flags = addr ? 1 : 0;
|
||||
pi->iscsi_ipv4addr = addr;
|
||||
if (addr)
|
||||
memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
struct cpl_iscsi_hdr_norss {
|
||||
union opcode_tid ot;
|
||||
|
|
|
@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
|
|||
.name = DRV_MODULE_NAME,
|
||||
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
|
||||
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
|
||||
CAP_PADDING_OFFLOAD,
|
||||
CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
|
||||
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
|
||||
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
|
||||
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
|
||||
|
@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
|
|||
.destroy_conn = iscsi_tcp_conn_teardown,
|
||||
.start_conn = iscsi_conn_start,
|
||||
.stop_conn = iscsi_conn_stop,
|
||||
.get_conn_param = cxgbi_get_conn_param,
|
||||
.get_conn_param = iscsi_conn_get_param,
|
||||
.set_param = cxgbi_set_conn_param,
|
||||
.get_stats = cxgbi_get_conn_stats,
|
||||
/* pdu xmit req from user space */
|
||||
|
@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
|
|||
.xmit_pdu = cxgbi_conn_xmit_pdu,
|
||||
.parse_pdu_itt = cxgbi_parse_pdu_itt,
|
||||
/* TCP connect/disconnect */
|
||||
.get_ep_param = cxgbi_get_ep_param,
|
||||
.ep_connect = cxgbi_ep_connect,
|
||||
.ep_poll = cxgbi_ep_poll,
|
||||
.ep_disconnect = cxgbi_ep_disconnect,
|
||||
|
@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
|
|||
cxgbi_ddp_page_size_factor(pgsz_factor);
|
||||
cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
|
||||
|
||||
cdev->csk_ddp_free_gl_skb = NULL;
|
||||
cdev->csk_ddp_alloc_gl_skb = NULL;
|
||||
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
|
||||
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
|
||||
cdev->csk_ddp_set = ddp_set_map;
|
||||
|
|
|
@ -530,6 +530,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
|
|||
csk->dst = dst;
|
||||
csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
|
||||
csk->daddr.sin_port = daddr->sin_port;
|
||||
csk->daddr.sin_family = daddr->sin_family;
|
||||
csk->saddr.sin_addr.s_addr = rt->rt_src;
|
||||
|
||||
return csk;
|
||||
|
@ -1264,12 +1265,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
|
|||
return idx;
|
||||
}
|
||||
|
||||
if (cdev->csk_ddp_alloc_gl_skb) {
|
||||
err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
|
||||
if (err < 0)
|
||||
goto unmark_entries;
|
||||
}
|
||||
|
||||
tag = cxgbi_ddp_tag_base(tformat, sw_tag);
|
||||
tag |= idx << PPOD_IDX_SHIFT;
|
||||
|
||||
|
@ -1280,11 +1275,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
|
|||
hdr.page_offset = htonl(gl->offset);
|
||||
|
||||
err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
|
||||
if (err < 0) {
|
||||
if (cdev->csk_ddp_free_gl_skb)
|
||||
cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
|
||||
if (err < 0)
|
||||
goto unmark_entries;
|
||||
}
|
||||
|
||||
ddp->idx_last = idx;
|
||||
log_debug(1 << CXGBI_DBG_DDP,
|
||||
|
@ -1350,8 +1342,6 @@ static void ddp_destroy(struct kref *kref)
|
|||
>> PPOD_PAGES_SHIFT;
|
||||
pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
|
||||
kfree(gl);
|
||||
if (cdev->csk_ddp_free_gl_skb)
|
||||
cdev->csk_ddp_free_gl_skb(ddp, i, npods);
|
||||
i += npods;
|
||||
} else
|
||||
i++;
|
||||
|
@ -1394,8 +1384,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
|
||||
ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
|
||||
ppmax * sizeof(struct cxgbi_gather_list *));
|
||||
cdev->ddp = ddp;
|
||||
|
||||
spin_lock_init(&ddp->map_lock);
|
||||
|
@ -1895,13 +1883,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
|
|||
|
||||
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
|
||||
{
|
||||
u8 submode = 0;
|
||||
if (hcrc || dcrc) {
|
||||
u8 submode = 0;
|
||||
|
||||
if (hcrc)
|
||||
submode |= 1;
|
||||
if (dcrc)
|
||||
submode |= 2;
|
||||
cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
|
||||
if (hcrc)
|
||||
submode |= 1;
|
||||
if (dcrc)
|
||||
submode |= 2;
|
||||
cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
|
||||
} else
|
||||
cxgbi_skcb_ulp_mode(skb) = 0;
|
||||
}
|
||||
|
||||
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
|
||||
|
@ -2197,32 +2188,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
|
||||
|
||||
int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
|
||||
enum iscsi_param param, char *buf)
|
||||
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
|
||||
char *buf)
|
||||
{
|
||||
struct iscsi_conn *iconn = cls_conn->dd_data;
|
||||
struct cxgbi_endpoint *cep = ep->dd_data;
|
||||
struct cxgbi_sock *csk;
|
||||
int len;
|
||||
|
||||
log_debug(1 << CXGBI_DBG_ISCSI,
|
||||
"cls_conn 0x%p, param %d.\n", cls_conn, param);
|
||||
"cls_conn 0x%p, param %d.\n", ep, param);
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
spin_lock_bh(&iconn->session->lock);
|
||||
len = sprintf(buf, "%hu\n", iconn->portal_port);
|
||||
spin_unlock_bh(&iconn->session->lock);
|
||||
break;
|
||||
case ISCSI_PARAM_CONN_ADDRESS:
|
||||
spin_lock_bh(&iconn->session->lock);
|
||||
len = sprintf(buf, "%s\n", iconn->portal_address);
|
||||
spin_unlock_bh(&iconn->session->lock);
|
||||
break;
|
||||
if (!cep)
|
||||
return -ENOTCONN;
|
||||
|
||||
csk = cep->csk;
|
||||
if (!csk)
|
||||
return -ENOTCONN;
|
||||
|
||||
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
|
||||
&csk->daddr, param, buf);
|
||||
default:
|
||||
return iscsi_conn_get_param(cls_conn, param, buf);
|
||||
return -ENOSYS;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
|
||||
EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
|
||||
|
||||
struct iscsi_cls_conn *
|
||||
cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
|
||||
|
@ -2289,11 +2282,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
|
|||
cxgbi_conn_max_xmit_dlength(conn);
|
||||
cxgbi_conn_max_recv_dlength(conn);
|
||||
|
||||
spin_lock_bh(&conn->session->lock);
|
||||
sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
|
||||
conn->portal_port = ntohs(csk->daddr.sin_port);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
|
||||
log_debug(1 << CXGBI_DBG_ISCSI,
|
||||
"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
|
||||
cls_session, cls_conn, ep, cconn, csk);
|
||||
|
|
|
@ -131,7 +131,6 @@ struct cxgbi_ddp_info {
|
|||
unsigned int rsvd_tag_mask;
|
||||
spinlock_t map_lock;
|
||||
struct cxgbi_gather_list **gl_map;
|
||||
struct sk_buff **gl_skb;
|
||||
};
|
||||
|
||||
#define DDP_PGIDX_MAX 4
|
||||
|
@ -536,8 +535,6 @@ struct cxgbi_device {
|
|||
struct cxgbi_ddp_info *ddp;
|
||||
|
||||
void (*dev_ddp_cleanup)(struct cxgbi_device *);
|
||||
void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
|
||||
int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
|
||||
int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
|
||||
unsigned int, unsigned int,
|
||||
struct cxgbi_gather_list *);
|
||||
|
@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task);
|
|||
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
|
||||
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
|
||||
enum iscsi_param, char *, int);
|
||||
int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
|
||||
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
|
||||
struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
|
||||
int cxgbi_bind_conn(struct iscsi_cls_session *,
|
||||
struct iscsi_cls_conn *, u64, int);
|
||||
|
|
|
@ -25,16 +25,9 @@
|
|||
#include <scsi/scsi_dh.h>
|
||||
#include "../scsi_priv.h"
|
||||
|
||||
struct scsi_dh_devinfo_list {
|
||||
struct list_head node;
|
||||
char vendor[9];
|
||||
char model[17];
|
||||
struct scsi_device_handler *handler;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
static LIST_HEAD(scsi_dh_list);
|
||||
static LIST_HEAD(scsi_dh_dev_list);
|
||||
static int scsi_dh_list_idx = 1;
|
||||
|
||||
static struct scsi_device_handler *get_device_handler(const char *name)
|
||||
{
|
||||
|
@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name)
|
|||
return found;
|
||||
}
|
||||
|
||||
|
||||
static struct scsi_device_handler *
|
||||
scsi_dh_cache_lookup(struct scsi_device *sdev)
|
||||
static struct scsi_device_handler *get_device_handler_by_idx(int idx)
|
||||
{
|
||||
struct scsi_dh_devinfo_list *tmp;
|
||||
struct scsi_device_handler *found_dh = NULL;
|
||||
struct scsi_device_handler *tmp, *found = NULL;
|
||||
|
||||
spin_lock(&list_lock);
|
||||
list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
|
||||
if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
|
||||
!strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
|
||||
found_dh = tmp->handler;
|
||||
list_for_each_entry(tmp, &scsi_dh_list, list) {
|
||||
if (tmp->idx == idx) {
|
||||
found = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&list_lock);
|
||||
|
||||
return found_dh;
|
||||
}
|
||||
|
||||
static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
|
||||
struct scsi_device *sdev)
|
||||
{
|
||||
int i, found = 0;
|
||||
|
||||
for(i = 0; scsi_dh->devlist[i].vendor; i++) {
|
||||
if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
|
||||
strlen(scsi_dh->devlist[i].vendor)) &&
|
||||
!strncmp(sdev->model, scsi_dh->devlist[i].model,
|
||||
strlen(scsi_dh->devlist[i].model))) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
|
@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
|
|||
struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_device_handler *found_dh = NULL;
|
||||
struct scsi_dh_devinfo_list *tmp;
|
||||
int idx;
|
||||
|
||||
found_dh = scsi_dh_cache_lookup(sdev);
|
||||
if (found_dh)
|
||||
return found_dh;
|
||||
idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
|
||||
SCSI_DEVINFO_DH);
|
||||
found_dh = get_device_handler_by_idx(idx);
|
||||
|
||||
if (scsi_dh) {
|
||||
if (scsi_dh_handler_lookup(scsi_dh, sdev))
|
||||
found_dh = scsi_dh;
|
||||
} else {
|
||||
struct scsi_device_handler *tmp_dh;
|
||||
|
||||
spin_lock(&list_lock);
|
||||
list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
|
||||
if (scsi_dh_handler_lookup(tmp_dh, sdev))
|
||||
found_dh = tmp_dh;
|
||||
}
|
||||
spin_unlock(&list_lock);
|
||||
}
|
||||
|
||||
if (found_dh) { /* If device is found, add it to the cache */
|
||||
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
if (tmp) {
|
||||
strncpy(tmp->vendor, sdev->vendor, 8);
|
||||
strncpy(tmp->model, sdev->model, 16);
|
||||
tmp->vendor[8] = '\0';
|
||||
tmp->model[16] = '\0';
|
||||
tmp->handler = found_dh;
|
||||
spin_lock(&list_lock);
|
||||
list_add(&tmp->node, &scsi_dh_dev_list);
|
||||
spin_unlock(&list_lock);
|
||||
} else {
|
||||
found_dh = NULL;
|
||||
}
|
||||
}
|
||||
if (scsi_dh && found_dh != scsi_dh)
|
||||
found_dh = NULL;
|
||||
|
||||
return found_dh;
|
||||
}
|
||||
|
@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
|
|||
*/
|
||||
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (get_device_handler(scsi_dh->name))
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock(&list_lock);
|
||||
scsi_dh->idx = scsi_dh_list_idx++;
|
||||
list_add(&scsi_dh->list, &scsi_dh_list);
|
||||
spin_unlock(&list_lock);
|
||||
|
||||
for (i = 0; scsi_dh->devlist[i].vendor; i++) {
|
||||
scsi_dev_info_list_add_keyed(0,
|
||||
scsi_dh->devlist[i].vendor,
|
||||
scsi_dh->devlist[i].model,
|
||||
NULL,
|
||||
scsi_dh->idx,
|
||||
SCSI_DEVINFO_DH);
|
||||
}
|
||||
|
||||
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
|
||||
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
|
||||
|
||||
|
@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
|
|||
*/
|
||||
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
|
||||
{
|
||||
struct scsi_dh_devinfo_list *tmp, *pos;
|
||||
int i;
|
||||
|
||||
if (!get_device_handler(scsi_dh->name))
|
||||
return -ENODEV;
|
||||
|
@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
|
|||
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
|
||||
scsi_dh_notifier_remove);
|
||||
|
||||
for (i = 0; scsi_dh->devlist[i].vendor; i++) {
|
||||
scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
|
||||
scsi_dh->devlist[i].model,
|
||||
SCSI_DEVINFO_DH);
|
||||
}
|
||||
|
||||
spin_lock(&list_lock);
|
||||
list_del(&scsi_dh->list);
|
||||
list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
|
||||
if (pos->handler == scsi_dh) {
|
||||
list_del(&pos->node);
|
||||
kfree(pos);
|
||||
}
|
||||
}
|
||||
spin_unlock(&list_lock);
|
||||
printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
|
||||
|
||||
|
@ -576,6 +533,10 @@ static int __init scsi_dh_init(void)
|
|||
{
|
||||
int r;
|
||||
|
||||
r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
|
||||
|
||||
if (!r)
|
||||
|
@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void)
|
|||
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
|
||||
scsi_dh_sysfs_attr_remove);
|
||||
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
|
||||
scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
|
||||
}
|
||||
|
||||
module_init(scsi_dh_init);
|
||||
|
|
|
@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error)
|
|||
{
|
||||
struct alua_dh_data *h = req->end_io_data;
|
||||
struct scsi_sense_hdr sense_hdr;
|
||||
unsigned err = SCSI_DH_IO;
|
||||
unsigned err = SCSI_DH_OK;
|
||||
|
||||
if (error || host_byte(req->errors) != DID_OK ||
|
||||
msg_byte(req->errors) != COMMAND_COMPLETE)
|
||||
msg_byte(req->errors) != COMMAND_COMPLETE) {
|
||||
err = SCSI_DH_IO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (err == SCSI_DH_IO && h->senselen > 0) {
|
||||
if (h->senselen > 0) {
|
||||
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
|
||||
&sense_hdr);
|
||||
if (!err) {
|
||||
|
@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error)
|
|||
print_alua_state(h->state));
|
||||
}
|
||||
done:
|
||||
blk_put_request(req);
|
||||
req->end_io_data = NULL;
|
||||
__blk_put_request(req->q, req);
|
||||
if (h->callback_fn) {
|
||||
h->callback_fn(h->callback_data, err);
|
||||
h->callback_fn = h->callback_data = NULL;
|
||||
|
@ -303,7 +306,6 @@ done:
|
|||
static unsigned submit_stpg(struct alua_dh_data *h)
|
||||
{
|
||||
struct request *rq;
|
||||
int err = SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
int stpg_len = 8;
|
||||
struct scsi_device *sdev = h->sdev;
|
||||
|
||||
|
@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h)
|
|||
rq->end_io_data = h;
|
||||
|
||||
blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
|
||||
return err;
|
||||
return SCSI_DH_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
|
|||
{"Pillar", "Axiom" },
|
||||
{"Intel", "Multi-Flex"},
|
||||
{"NETAPP", "LUN"},
|
||||
{"NETAPP", "LUN C-Mode"},
|
||||
{"AIX", "NVDISK"},
|
||||
{"Promise", "VTrak"},
|
||||
{NULL, NULL}
|
||||
};
|
||||
|
||||
|
@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
|
|||
unsigned long flags;
|
||||
int err = SCSI_DH_OK;
|
||||
|
||||
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
|
||||
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
|
||||
+ sizeof(*h) , GFP_KERNEL);
|
||||
if (!scsi_dh_data) {
|
||||
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
|
||||
|
|
|
@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
|
|||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
|
||||
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
|
||||
+ sizeof(*h) , GFP_KERNEL);
|
||||
if (!scsi_dh_data) {
|
||||
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
|
||||
|
|
|
@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error)
|
|||
}
|
||||
}
|
||||
done:
|
||||
blk_put_request(req);
|
||||
req->end_io_data = NULL;
|
||||
__blk_put_request(req->q, req);
|
||||
if (h->callback_fn) {
|
||||
h->callback_fn(h->callback_data, err);
|
||||
h->callback_fn = h->callback_data = NULL;
|
||||
|
@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
|
||||
+ sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
|
||||
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
|
||||
+ sizeof(*h) , GFP_KERNEL);
|
||||
if (!scsi_dh_data) {
|
||||
sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
|
||||
HP_SW_NAME);
|
||||
|
|
|
@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
|
|||
}
|
||||
|
||||
static struct request *rdac_failover_get(struct scsi_device *sdev,
|
||||
struct rdac_dh_data *h)
|
||||
struct rdac_dh_data *h, struct list_head *list)
|
||||
{
|
||||
struct request *rq;
|
||||
struct rdac_mode_common *common;
|
||||
unsigned data_size;
|
||||
struct rdac_queue_data *qdata;
|
||||
u8 *lun_table;
|
||||
|
||||
if (h->ctlr->use_ms10) {
|
||||
struct rdac_pg_expanded *rdac_pg;
|
||||
|
@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
|
|||
rdac_pg->subpage_code = 0x1;
|
||||
rdac_pg->page_len[0] = 0x01;
|
||||
rdac_pg->page_len[1] = 0x28;
|
||||
lun_table = rdac_pg->lun_table;
|
||||
} else {
|
||||
struct rdac_pg_legacy *rdac_pg;
|
||||
|
||||
|
@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
|
|||
common = &rdac_pg->common;
|
||||
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
|
||||
rdac_pg->page_len = 0x68;
|
||||
lun_table = rdac_pg->lun_table;
|
||||
}
|
||||
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
|
||||
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
|
||||
common->rdac_options = RDAC_FORCED_QUIESENCE;
|
||||
|
||||
list_for_each_entry(qdata, list, entry) {
|
||||
lun_table[qdata->h->lun] = 0x81;
|
||||
}
|
||||
|
||||
/* get request for block layer packet command */
|
||||
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
|
||||
if (!rq)
|
||||
|
@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work)
|
|||
int err, retry_cnt = RDAC_RETRY_COUNT;
|
||||
struct rdac_queue_data *tmp, *qdata;
|
||||
LIST_HEAD(list);
|
||||
u8 *lun_table;
|
||||
|
||||
spin_lock(&ctlr->ms_lock);
|
||||
list_splice_init(&ctlr->ms_head, &list);
|
||||
|
@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work)
|
|||
ctlr->ms_sdev = NULL;
|
||||
spin_unlock(&ctlr->ms_lock);
|
||||
|
||||
if (ctlr->use_ms10)
|
||||
lun_table = ctlr->mode_select.expanded.lun_table;
|
||||
else
|
||||
lun_table = ctlr->mode_select.legacy.lun_table;
|
||||
|
||||
retry:
|
||||
err = SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
rq = rdac_failover_get(sdev, h);
|
||||
rq = rdac_failover_get(sdev, h, &list);
|
||||
if (!rq)
|
||||
goto done;
|
||||
|
||||
list_for_each_entry(qdata, &list, entry) {
|
||||
lun_table[qdata->h->lun] = 0x81;
|
||||
}
|
||||
|
||||
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
|
||||
"%s MODE_SELECT command",
|
||||
(char *) h->ctlr->array_name, h->ctlr->index,
|
||||
|
@ -769,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
|
|||
{"DELL", "MD32xx"},
|
||||
{"DELL", "MD32xxi"},
|
||||
{"DELL", "MD36xxi"},
|
||||
{"DELL", "MD36xxf"},
|
||||
{"LSI", "INF-01-00"},
|
||||
{"ENGENIO", "INF-01-00"},
|
||||
{"STK", "FLEXLINE 380"},
|
||||
|
@ -800,7 +799,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
|
|||
int err;
|
||||
char array_name[ARRAY_LABEL_LEN];
|
||||
|
||||
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
|
||||
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
|
||||
+ sizeof(*h) , GFP_KERNEL);
|
||||
if (!scsi_dh_data) {
|
||||
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
|
||||
|
@ -906,4 +905,5 @@ module_exit(rdac_exit);
|
|||
|
||||
MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
|
||||
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
|
||||
MODULE_VERSION("01.00.0000.0000");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
obj-$(CONFIG_FCOE) += fcoe.o
|
||||
obj-$(CONFIG_LIBFCOE) += libfcoe.o
|
||||
|
||||
libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -24,7 +24,7 @@
|
|||
#include <linux/kthread.h>
|
||||
|
||||
#define FCOE_MAX_QUEUE_DEPTH 256
|
||||
#define FCOE_LOW_QUEUE_DEPTH 32
|
||||
#define FCOE_MIN_QUEUE_DEPTH 32
|
||||
|
||||
#define FCOE_WORD_TO_BYTE 4
|
||||
|
||||
|
@ -40,12 +40,6 @@
|
|||
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
|
||||
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
|
||||
|
||||
/*
|
||||
* Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
|
||||
* + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
|
||||
*/
|
||||
#define FCOE_MTU 2158
|
||||
|
||||
unsigned int fcoe_debug_logging;
|
||||
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
|
||||
|
@ -70,21 +64,6 @@ do { \
|
|||
printk(KERN_INFO "fcoe: %s: " fmt, \
|
||||
netdev->name, ##args);)
|
||||
|
||||
/**
|
||||
* struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
|
||||
* @thread: The thread context
|
||||
* @fcoe_rx_list: The queue of pending packets to process
|
||||
* @page: The memory page for calculating frame trailer CRCs
|
||||
* @crc_eof_offset: The offset into the CRC page pointing to available
|
||||
* memory for a new trailer
|
||||
*/
|
||||
struct fcoe_percpu_s {
|
||||
struct task_struct *thread;
|
||||
struct sk_buff_head fcoe_rx_list;
|
||||
struct page *crc_eof_page;
|
||||
int crc_eof_offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fcoe_interface - A FCoE interface
|
||||
* @list: Handle for a list of FCoE interfaces
|
||||
|
@ -108,30 +87,6 @@ struct fcoe_interface {
|
|||
struct kref kref;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fcoe_port - The FCoE private structure
|
||||
* @fcoe: The associated fcoe interface
|
||||
* @lport: The associated local port
|
||||
* @fcoe_pending_queue: The pending Rx queue of skbs
|
||||
* @fcoe_pending_queue_active: Indicates if the pending queue is active
|
||||
* @timer: The queue timer
|
||||
* @destroy_work: Handle for work context
|
||||
* (to prevent RTNL deadlocks)
|
||||
* @data_srt_addr: Source address for data
|
||||
*
|
||||
* An instance of this structure is to be allocated along with the
|
||||
* Scsi_Host and libfc fc_lport structures.
|
||||
*/
|
||||
struct fcoe_port {
|
||||
struct fcoe_interface *fcoe;
|
||||
struct fc_lport *lport;
|
||||
struct sk_buff_head fcoe_pending_queue;
|
||||
u8 fcoe_pending_queue_active;
|
||||
struct timer_list timer;
|
||||
struct work_struct destroy_work;
|
||||
u8 data_src_addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
|
||||
|
||||
/**
|
||||
|
@ -140,7 +95,8 @@ struct fcoe_port {
|
|||
*/
|
||||
static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
|
||||
{
|
||||
return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
|
||||
return ((struct fcoe_interface *)
|
||||
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
|
||||
}
|
||||
|
||||
#endif /* _FCOE_H_ */
|
||||
|
|
|
@ -44,9 +44,7 @@
|
|||
#include <scsi/libfc.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
|
||||
MODULE_AUTHOR("Open-FCoE.org");
|
||||
MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
#include "libfcoe.h"
|
||||
|
||||
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
|
||||
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
|
||||
|
@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
|
|||
static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
|
||||
static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
|
||||
|
||||
unsigned int libfcoe_debug_logging;
|
||||
module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
|
||||
|
||||
#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
|
||||
#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
|
||||
|
||||
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
|
||||
do { \
|
||||
if (unlikely(libfcoe_debug_logging & LEVEL)) \
|
||||
do { \
|
||||
CMD; \
|
||||
} while (0); \
|
||||
} while (0)
|
||||
|
||||
#define LIBFCOE_DBG(fmt, args...) \
|
||||
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
|
||||
printk(KERN_INFO "libfcoe: " fmt, ##args);)
|
||||
|
||||
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
|
||||
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
|
||||
printk(KERN_INFO "host%d: fip: " fmt, \
|
||||
(fip)->lp->host->host_no, ##args);)
|
||||
|
||||
static const char *fcoe_ctlr_states[] = {
|
||||
static const char * const fcoe_ctlr_states[] = {
|
||||
[FIP_ST_DISABLED] = "DISABLED",
|
||||
[FIP_ST_LINK_WAIT] = "LINK_WAIT",
|
||||
[FIP_ST_AUTO] = "AUTO",
|
||||
|
@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
|
|||
struct fip_mac_desc mac;
|
||||
struct fip_wwn_desc wwnn;
|
||||
struct fip_size_desc size;
|
||||
} __attribute__((packed)) desc;
|
||||
} __attribute__((packed)) *sol;
|
||||
} __packed desc;
|
||||
} __packed * sol;
|
||||
u32 fcoe_size;
|
||||
|
||||
skb = dev_alloc_skb(sizeof(*sol));
|
||||
|
@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
|
|||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_mac_desc mac;
|
||||
} __attribute__((packed)) *kal;
|
||||
} __packed * kal;
|
||||
struct fip_vn_desc *vn;
|
||||
u32 len;
|
||||
struct fc_lport *lp;
|
||||
|
@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
|
|||
struct ethhdr eth;
|
||||
struct fip_header fip;
|
||||
struct fip_encaps encaps;
|
||||
} __attribute__((packed)) *cap;
|
||||
} __packed * cap;
|
||||
struct fc_frame_header *fh;
|
||||
struct fip_mac_desc *mac;
|
||||
struct fcoe_fcf *fcf;
|
||||
|
@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
|
|||
struct fip_mac_desc mac;
|
||||
struct fip_wwn_desc wwnn;
|
||||
struct fip_vn_desc vn;
|
||||
} __attribute__((packed)) *frame;
|
||||
} __packed * frame;
|
||||
struct fip_fc4_feat *ff;
|
||||
struct fip_size_desc *size;
|
||||
u32 fcp_feat;
|
|
@ -0,0 +1,770 @@
|
|||
/*
|
||||
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Maintained at www.Open-FCoE.org
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <scsi/libfcoe.h>
|
||||
|
||||
#include "libfcoe.h"
|
||||
|
||||
MODULE_AUTHOR("Open-FCoE.org");
|
||||
MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
static int fcoe_transport_create(const char *, struct kernel_param *);
|
||||
static int fcoe_transport_destroy(const char *, struct kernel_param *);
|
||||
static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
|
||||
static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
|
||||
static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
|
||||
static int fcoe_transport_enable(const char *, struct kernel_param *);
|
||||
static int fcoe_transport_disable(const char *, struct kernel_param *);
|
||||
static int libfcoe_device_notification(struct notifier_block *notifier,
|
||||
ulong event, void *ptr);
|
||||
|
||||
static LIST_HEAD(fcoe_transports);
|
||||
static DEFINE_MUTEX(ft_mutex);
|
||||
static LIST_HEAD(fcoe_netdevs);
|
||||
static DEFINE_MUTEX(fn_mutex);
|
||||
|
||||
unsigned int libfcoe_debug_logging;
|
||||
module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
|
||||
|
||||
module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
|
||||
__MODULE_PARM_TYPE(show, "string");
|
||||
MODULE_PARM_DESC(show, " Show attached FCoE transports");
|
||||
|
||||
module_param_call(create, fcoe_transport_create, NULL,
|
||||
(void *)FIP_MODE_FABRIC, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(create, "string");
|
||||
MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
|
||||
|
||||
module_param_call(create_vn2vn, fcoe_transport_create, NULL,
|
||||
(void *)FIP_MODE_VN2VN, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(create_vn2vn, "string");
|
||||
MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
|
||||
"on an Ethernet interface");
|
||||
|
||||
module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(destroy, "string");
|
||||
MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
|
||||
|
||||
module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(enable, "string");
|
||||
MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
|
||||
|
||||
module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
|
||||
__MODULE_PARM_TYPE(disable, "string");
|
||||
MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
|
||||
|
||||
/* notification function for packets from net device */
|
||||
static struct notifier_block libfcoe_notifier = {
|
||||
.notifier_call = libfcoe_device_notification,
|
||||
};
|
||||
|
||||
/**
|
||||
* fcoe_fc_crc() - Calculates the CRC for a given frame
|
||||
* @fp: The frame to be checksumed
|
||||
*
|
||||
* This uses crc32() routine to calculate the CRC for a frame
|
||||
*
|
||||
* Return: The 32 bit CRC value
|
||||
*/
|
||||
u32 fcoe_fc_crc(struct fc_frame *fp)
|
||||
{
|
||||
struct sk_buff *skb = fp_skb(fp);
|
||||
struct skb_frag_struct *frag;
|
||||
unsigned char *data;
|
||||
unsigned long off, len, clen;
|
||||
u32 crc;
|
||||
unsigned i;
|
||||
|
||||
crc = crc32(~0, skb->data, skb_headlen(skb));
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
off = frag->page_offset;
|
||||
len = frag->size;
|
||||
while (len > 0) {
|
||||
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
|
||||
data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
|
||||
KM_SKB_DATA_SOFTIRQ);
|
||||
crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
|
||||
kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
|
||||
off += clen;
|
||||
len -= clen;
|
||||
}
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_fc_crc);
|
||||
|
||||
/**
|
||||
* fcoe_start_io() - Start FCoE I/O
|
||||
* @skb: The packet to be transmitted
|
||||
*
|
||||
* This routine is called from the net device to start transmitting
|
||||
* FCoE packets.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_start_io(struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *nskb;
|
||||
int rc;
|
||||
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return -ENOMEM;
|
||||
rc = dev_queue_xmit(nskb);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_start_io);
|
||||
|
||||
|
||||
/**
|
||||
* fcoe_clean_pending_queue() - Dequeue a skb and free it
|
||||
* @lport: The local port to dequeue a skb on
|
||||
*/
|
||||
void fcoe_clean_pending_queue(struct fc_lport *lport)
|
||||
{
|
||||
struct fcoe_port *port = lport_priv(lport);
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_bh(&port->fcoe_pending_queue.lock);
|
||||
while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
|
||||
spin_unlock_bh(&port->fcoe_pending_queue.lock);
|
||||
kfree_skb(skb);
|
||||
spin_lock_bh(&port->fcoe_pending_queue.lock);
|
||||
}
|
||||
spin_unlock_bh(&port->fcoe_pending_queue.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
|
||||
|
||||
/**
|
||||
* fcoe_check_wait_queue() - Attempt to clear the transmit backlog
|
||||
* @lport: The local port whose backlog is to be cleared
|
||||
*
|
||||
* This empties the wait_queue, dequeues the head of the wait_queue queue
|
||||
* and calls fcoe_start_io() for each packet. If all skb have been
|
||||
* transmitted it returns the qlen. If an error occurs it restores
|
||||
* wait_queue (to try again later) and returns -1.
|
||||
*
|
||||
* The wait_queue is used when the skb transmit fails. The failed skb
|
||||
* will go in the wait_queue which will be emptied by the timer function or
|
||||
* by the next skb transmit.
|
||||
*/
|
||||
void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
|
||||
{
|
||||
struct fcoe_port *port = lport_priv(lport);
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&port->fcoe_pending_queue.lock);
|
||||
|
||||
if (skb)
|
||||
__skb_queue_tail(&port->fcoe_pending_queue, skb);
|
||||
|
||||
if (port->fcoe_pending_queue_active)
|
||||
goto out;
|
||||
port->fcoe_pending_queue_active = 1;
|
||||
|
||||
while (port->fcoe_pending_queue.qlen) {
|
||||
/* keep qlen > 0 until fcoe_start_io succeeds */
|
||||
port->fcoe_pending_queue.qlen++;
|
||||
skb = __skb_dequeue(&port->fcoe_pending_queue);
|
||||
|
||||
spin_unlock_bh(&port->fcoe_pending_queue.lock);
|
||||
rc = fcoe_start_io(skb);
|
||||
spin_lock_bh(&port->fcoe_pending_queue.lock);
|
||||
|
||||
if (rc) {
|
||||
__skb_queue_head(&port->fcoe_pending_queue, skb);
|
||||
/* undo temporary increment above */
|
||||
port->fcoe_pending_queue.qlen--;
|
||||
break;
|
||||
}
|
||||
/* undo temporary increment above */
|
||||
port->fcoe_pending_queue.qlen--;
|
||||
}
|
||||
|
||||
if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
|
||||
lport->qfull = 0;
|
||||
if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
|
||||
mod_timer(&port->timer, jiffies + 2);
|
||||
port->fcoe_pending_queue_active = 0;
|
||||
out:
|
||||
if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
|
||||
lport->qfull = 1;
|
||||
spin_unlock_bh(&port->fcoe_pending_queue.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
|
||||
|
||||
/**
|
||||
* fcoe_queue_timer() - The fcoe queue timer
|
||||
* @lport: The local port
|
||||
*
|
||||
* Calls fcoe_check_wait_queue on timeout
|
||||
*/
|
||||
void fcoe_queue_timer(ulong lport)
|
||||
{
|
||||
fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_queue_timer);
|
||||
|
||||
/**
|
||||
* fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
|
||||
* @skb: The packet to be transmitted
|
||||
* @tlen: The total length of the trailer
|
||||
* @fps: The fcoe context
|
||||
*
|
||||
* This routine allocates a page for frame trailers. The page is re-used if
|
||||
* there is enough room left on it for the current trailer. If there isn't
|
||||
* enough buffer left a new page is allocated for the trailer. Reference to
|
||||
* the page from this function as well as the skbs using the page fragments
|
||||
* ensure that the page is freed at the appropriate time.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
|
||||
struct fcoe_percpu_s *fps)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = fps->crc_eof_page;
|
||||
if (!page) {
|
||||
page = alloc_page(GFP_ATOMIC);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
fps->crc_eof_page = page;
|
||||
fps->crc_eof_offset = 0;
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
fps->crc_eof_offset, tlen);
|
||||
skb->len += tlen;
|
||||
skb->data_len += tlen;
|
||||
skb->truesize += tlen;
|
||||
fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
|
||||
|
||||
if (fps->crc_eof_offset >= PAGE_SIZE) {
|
||||
fps->crc_eof_page = NULL;
|
||||
fps->crc_eof_offset = 0;
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
|
||||
|
||||
/**
|
||||
* fcoe_transport_lookup - find an fcoe transport that matches a netdev
|
||||
* @netdev: The netdev to look for from all attached transports
|
||||
*
|
||||
* Returns : ptr to the fcoe transport that supports this netdev or NULL
|
||||
* if not found.
|
||||
*
|
||||
* The ft_mutex should be held when this is called
|
||||
*/
|
||||
static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
list_for_each_entry(ft, &fcoe_transports, list)
|
||||
if (ft->match && ft->match(netdev))
|
||||
return ft;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_attach - Attaches an FCoE transport
|
||||
* @ft: The fcoe transport to be attached
|
||||
*
|
||||
* Returns : 0 for success
|
||||
*/
|
||||
int fcoe_transport_attach(struct fcoe_transport *ft)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
mutex_lock(&ft_mutex);
|
||||
if (ft->attached) {
|
||||
LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
|
||||
ft->name);
|
||||
rc = -EEXIST;
|
||||
goto out_attach;
|
||||
}
|
||||
|
||||
/* Add default transport to the tail */
|
||||
if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
|
||||
list_add(&ft->list, &fcoe_transports);
|
||||
else
|
||||
list_add_tail(&ft->list, &fcoe_transports);
|
||||
|
||||
ft->attached = true;
|
||||
LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
|
||||
|
||||
out_attach:
|
||||
mutex_unlock(&ft_mutex);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(fcoe_transport_attach);
|
||||
|
||||
/**
|
||||
* fcoe_transport_attach - Detaches an FCoE transport
|
||||
* @ft: The fcoe transport to be attached
|
||||
*
|
||||
* Returns : 0 for success
|
||||
*/
|
||||
int fcoe_transport_detach(struct fcoe_transport *ft)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
mutex_lock(&ft_mutex);
|
||||
if (!ft->attached) {
|
||||
LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
|
||||
ft->name);
|
||||
rc = -ENODEV;
|
||||
goto out_attach;
|
||||
}
|
||||
|
||||
list_del(&ft->list);
|
||||
ft->attached = false;
|
||||
LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
|
||||
|
||||
out_attach:
|
||||
mutex_unlock(&ft_mutex);
|
||||
return rc;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(fcoe_transport_detach);
|
||||
|
||||
static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
int i, j;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
i = j = sprintf(buffer, "Attached FCoE transports:");
|
||||
mutex_lock(&ft_mutex);
|
||||
list_for_each_entry(ft, &fcoe_transports, list) {
|
||||
i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
|
||||
if (i >= PAGE_SIZE)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ft_mutex);
|
||||
if (i == j)
|
||||
i += snprintf(&buffer[i], IFNAMSIZ, "none");
|
||||
return i;
|
||||
}
|
||||
|
||||
static int __init fcoe_transport_init(void)
|
||||
{
|
||||
register_netdevice_notifier(&libfcoe_notifier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __exit fcoe_transport_exit(void)
|
||||
{
|
||||
struct fcoe_transport *ft;
|
||||
|
||||
unregister_netdevice_notifier(&libfcoe_notifier);
|
||||
mutex_lock(&ft_mutex);
|
||||
list_for_each_entry(ft, &fcoe_transports, list)
|
||||
printk(KERN_ERR "FCoE transport %s is still attached!\n",
|
||||
ft->name);
|
||||
mutex_unlock(&ft_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int fcoe_add_netdev_mapping(struct net_device *netdev,
|
||||
struct fcoe_transport *ft)
|
||||
{
|
||||
struct fcoe_netdev_mapping *nm;
|
||||
|
||||
nm = kmalloc(sizeof(*nm), GFP_KERNEL);
|
||||
if (!nm) {
|
||||
printk(KERN_ERR "Unable to allocate netdev_mapping");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nm->netdev = netdev;
|
||||
nm->ft = ft;
|
||||
|
||||
mutex_lock(&fn_mutex);
|
||||
list_add(&nm->list, &fcoe_netdevs);
|
||||
mutex_unlock(&fn_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void fcoe_del_netdev_mapping(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_netdev_mapping *nm = NULL, *tmp;
|
||||
|
||||
mutex_lock(&fn_mutex);
|
||||
list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
|
||||
if (nm->netdev == netdev) {
|
||||
list_del(&nm->list);
|
||||
kfree(nm);
|
||||
mutex_unlock(&fn_mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&fn_mutex);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
|
||||
* it was created
|
||||
*
|
||||
* Returns : ptr to the fcoe transport that supports this netdev or NULL
|
||||
* if not found.
|
||||
*
|
||||
* The ft_mutex should be held when this is called
|
||||
*/
|
||||
static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_transport *ft = NULL;
|
||||
struct fcoe_netdev_mapping *nm;
|
||||
|
||||
mutex_lock(&fn_mutex);
|
||||
list_for_each_entry(nm, &fcoe_netdevs, list) {
|
||||
if (netdev == nm->netdev) {
|
||||
ft = nm->ft;
|
||||
mutex_unlock(&fn_mutex);
|
||||
return ft;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&fn_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_if_to_netdev() - Parse a name buffer to get a net device
|
||||
* @buffer: The name of the net device
|
||||
*
|
||||
* Returns: NULL or a ptr to net_device
|
||||
*/
|
||||
static struct net_device *fcoe_if_to_netdev(const char *buffer)
|
||||
{
|
||||
char *cp;
|
||||
char ifname[IFNAMSIZ + 2];
|
||||
|
||||
if (buffer) {
|
||||
strlcpy(ifname, buffer, IFNAMSIZ);
|
||||
cp = ifname + strlen(ifname);
|
||||
while (--cp >= ifname && *cp == '\n')
|
||||
*cp = '\0';
|
||||
return dev_get_by_name(&init_net, ifname);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* libfcoe_device_notification() - Handler for net device events
|
||||
* @notifier: The context of the notification
|
||||
* @event: The type of event
|
||||
* @ptr: The net device that the event was on
|
||||
*
|
||||
* This function is called by the Ethernet driver in case of link change event.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int libfcoe_device_notification(struct notifier_block *notifier,
|
||||
ulong event, void *ptr)
|
||||
{
|
||||
struct net_device *netdev = ptr;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_UNREGISTER:
|
||||
printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
|
||||
netdev->name);
|
||||
fcoe_del_netdev_mapping(netdev);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* fcoe_transport_create() - Create a fcoe interface
|
||||
* @buffer: The name of the Ethernet interface to create on
|
||||
* @kp: The associated kernel param
|
||||
*
|
||||
* Called from sysfs. This holds the ft_mutex while calling the
|
||||
* registered fcoe transport's create function.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
|
||||
|
||||
if (!mutex_trylock(&ft_mutex))
|
||||
return restart_syscall();
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
if (!netdev) {
|
||||
LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
|
||||
goto out_nodev;
|
||||
}
|
||||
|
||||
ft = fcoe_netdev_map_lookup(netdev);
|
||||
if (ft) {
|
||||
LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
|
||||
"FCoE instance on %s.\n",
|
||||
ft->name, netdev->name);
|
||||
rc = -EEXIST;
|
||||
goto out_putdev;
|
||||
}
|
||||
|
||||
ft = fcoe_transport_lookup(netdev);
|
||||
if (!ft) {
|
||||
LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
|
||||
netdev->name);
|
||||
goto out_putdev;
|
||||
}
|
||||
|
||||
rc = fcoe_add_netdev_mapping(netdev, ft);
|
||||
if (rc) {
|
||||
LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
|
||||
"for FCoE transport %s for %s.\n",
|
||||
ft->name, netdev->name);
|
||||
goto out_putdev;
|
||||
}
|
||||
|
||||
/* pass to transport create */
|
||||
rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
|
||||
if (rc)
|
||||
fcoe_del_netdev_mapping(netdev);
|
||||
|
||||
LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
|
||||
ft->name, (rc) ? "failed" : "succeeded",
|
||||
netdev->name);
|
||||
|
||||
out_putdev:
|
||||
dev_put(netdev);
|
||||
out_nodev:
|
||||
mutex_unlock(&ft_mutex);
|
||||
if (rc == -ERESTARTSYS)
|
||||
return restart_syscall();
|
||||
else
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_destroy() - Destroy a FCoE interface
|
||||
* @buffer: The name of the Ethernet interface to be destroyed
|
||||
* @kp: The associated kernel parameter
|
||||
*
|
||||
* Called from sysfs. This holds the ft_mutex while calling the
|
||||
* registered fcoe transport's destroy function.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
if (!mutex_trylock(&ft_mutex))
|
||||
return restart_syscall();
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
if (!netdev) {
|
||||
LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
|
||||
goto out_nodev;
|
||||
}
|
||||
|
||||
ft = fcoe_netdev_map_lookup(netdev);
|
||||
if (!ft) {
|
||||
LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
|
||||
netdev->name);
|
||||
goto out_putdev;
|
||||
}
|
||||
|
||||
/* pass to transport destroy */
|
||||
rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
|
||||
fcoe_del_netdev_mapping(netdev);
|
||||
LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
|
||||
ft->name, (rc) ? "failed" : "succeeded",
|
||||
netdev->name);
|
||||
|
||||
out_putdev:
|
||||
dev_put(netdev);
|
||||
out_nodev:
|
||||
mutex_unlock(&ft_mutex);
|
||||
|
||||
if (rc == -ERESTARTSYS)
|
||||
return restart_syscall();
|
||||
else
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_disable() - Disables a FCoE interface
|
||||
* @buffer: The name of the Ethernet interface to be disabled
|
||||
* @kp: The associated kernel parameter
|
||||
*
|
||||
* Called from sysfs.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
if (!mutex_trylock(&ft_mutex))
|
||||
return restart_syscall();
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
if (!netdev)
|
||||
goto out_nodev;
|
||||
|
||||
ft = fcoe_netdev_map_lookup(netdev);
|
||||
if (!ft)
|
||||
goto out_putdev;
|
||||
|
||||
rc = ft->disable ? ft->disable(netdev) : -ENODEV;
|
||||
|
||||
out_putdev:
|
||||
dev_put(netdev);
|
||||
out_nodev:
|
||||
mutex_unlock(&ft_mutex);
|
||||
|
||||
if (rc == -ERESTARTSYS)
|
||||
return restart_syscall();
|
||||
else
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_transport_enable() - Enables a FCoE interface
|
||||
* @buffer: The name of the Ethernet interface to be enabled
|
||||
* @kp: The associated kernel parameter
|
||||
*
|
||||
* Called from sysfs.
|
||||
*
|
||||
* Returns: 0 for success
|
||||
*/
|
||||
static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
struct net_device *netdev = NULL;
|
||||
struct fcoe_transport *ft = NULL;
|
||||
|
||||
if (!mutex_trylock(&ft_mutex))
|
||||
return restart_syscall();
|
||||
|
||||
#ifdef CONFIG_LIBFCOE_MODULE
|
||||
/*
|
||||
* Make sure the module has been initialized, and is not about to be
|
||||
* removed. Module parameter sysfs files are writable before the
|
||||
* module_init function is called and after module_exit.
|
||||
*/
|
||||
if (THIS_MODULE->state != MODULE_STATE_LIVE)
|
||||
goto out_nodev;
|
||||
#endif
|
||||
|
||||
netdev = fcoe_if_to_netdev(buffer);
|
||||
if (!netdev)
|
||||
goto out_nodev;
|
||||
|
||||
ft = fcoe_netdev_map_lookup(netdev);
|
||||
if (!ft)
|
||||
goto out_putdev;
|
||||
|
||||
rc = ft->enable ? ft->enable(netdev) : -ENODEV;
|
||||
|
||||
out_putdev:
|
||||
dev_put(netdev);
|
||||
out_nodev:
|
||||
mutex_unlock(&ft_mutex);
|
||||
if (rc == -ERESTARTSYS)
|
||||
return restart_syscall();
|
||||
else
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* libfcoe_init() - Initialization routine for libfcoe.ko
|
||||
*/
|
||||
static int __init libfcoe_init(void)
|
||||
{
|
||||
fcoe_transport_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(libfcoe_init);
|
||||
|
||||
/**
|
||||
* libfcoe_exit() - Tear down libfcoe.ko
|
||||
*/
|
||||
static void __exit libfcoe_exit(void)
|
||||
{
|
||||
fcoe_transport_exit();
|
||||
}
|
||||
module_exit(libfcoe_exit);
|
|
@ -0,0 +1,31 @@
|
|||
#ifndef _FCOE_LIBFCOE_H_
|
||||
#define _FCOE_LIBFCOE_H_
|
||||
|
||||
extern unsigned int libfcoe_debug_logging;
|
||||
#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
|
||||
#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
|
||||
#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
|
||||
|
||||
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
|
||||
do { \
|
||||
if (unlikely(libfcoe_debug_logging & LEVEL)) \
|
||||
do { \
|
||||
CMD; \
|
||||
} while (0); \
|
||||
} while (0)
|
||||
|
||||
#define LIBFCOE_DBG(fmt, args...) \
|
||||
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
|
||||
printk(KERN_INFO "libfcoe: " fmt, ##args);)
|
||||
|
||||
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
|
||||
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
|
||||
printk(KERN_INFO "host%d: fip: " fmt, \
|
||||
(fip)->lp->host->host_no, ##args);)
|
||||
|
||||
#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
|
||||
LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
|
||||
printk(KERN_INFO "%s: " fmt, \
|
||||
__func__, ##args);)
|
||||
|
||||
#endif /* _FCOE_LIBFCOE_H_ */
|
|
@ -37,7 +37,7 @@
|
|||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.4.0.145"
|
||||
#define DRV_VERSION "1.5.0.1"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
|
|
|
@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
|
|||
vdev->linkstatus_pa);
|
||||
if (vdev->stats)
|
||||
pci_free_consistent(vdev->pdev,
|
||||
sizeof(struct vnic_dev),
|
||||
sizeof(struct vnic_stats),
|
||||
vdev->stats, vdev->stats_pa);
|
||||
if (vdev->fw_info)
|
||||
pci_free_consistent(vdev->pdev,
|
||||
|
|
|
@ -74,6 +74,10 @@ static int hpsa_allow_any;
|
|||
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(hpsa_allow_any,
|
||||
"Allow hpsa driver to access unknown HP Smart Array hardware");
|
||||
static int hpsa_simple_mode;
|
||||
module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(hpsa_simple_mode,
|
||||
"Use 'simple mode' rather than 'performant mode'");
|
||||
|
||||
/* define the PCI info for the cards we can control */
|
||||
static const struct pci_device_id hpsa_pci_device_id[] = {
|
||||
|
@ -85,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
|
|||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
|
||||
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
|
||||
{0,}
|
||||
|
@ -109,11 +115,13 @@ static struct board_type products[] = {
|
|||
{0x3249103C, "Smart Array P812", &SA5_access},
|
||||
{0x324a103C, "Smart Array P712m", &SA5_access},
|
||||
{0x324b103C, "Smart Array P711m", &SA5_access},
|
||||
{0x3250103C, "Smart Array", &SA5_access},
|
||||
{0x3250113C, "Smart Array", &SA5_access},
|
||||
{0x3250123C, "Smart Array", &SA5_access},
|
||||
{0x3250133C, "Smart Array", &SA5_access},
|
||||
{0x3250143C, "Smart Array", &SA5_access},
|
||||
{0x3350103C, "Smart Array", &SA5_access},
|
||||
{0x3351103C, "Smart Array", &SA5_access},
|
||||
{0x3352103C, "Smart Array", &SA5_access},
|
||||
{0x3353103C, "Smart Array", &SA5_access},
|
||||
{0x3354103C, "Smart Array", &SA5_access},
|
||||
{0x3355103C, "Smart Array", &SA5_access},
|
||||
{0x3356103C, "Smart Array", &SA5_access},
|
||||
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
|
||||
};
|
||||
|
||||
|
@ -147,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
|
|||
static int hpsa_slave_alloc(struct scsi_device *sdev);
|
||||
static void hpsa_slave_destroy(struct scsi_device *sdev);
|
||||
|
||||
static ssize_t raid_level_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
static ssize_t lunid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
static ssize_t unique_id_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
static ssize_t host_show_firmware_revision(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
|
||||
static ssize_t host_store_rescan(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count);
|
||||
static int check_for_unit_attention(struct ctlr_info *h,
|
||||
struct CommandList *c);
|
||||
static void check_ioctl_unit_attention(struct ctlr_info *h,
|
||||
|
@ -173,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
|
|||
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
|
||||
unsigned long *memory_bar);
|
||||
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
|
||||
|
||||
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
|
||||
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
|
||||
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
|
||||
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
|
||||
static DEVICE_ATTR(firmware_revision, S_IRUGO,
|
||||
host_show_firmware_revision, NULL);
|
||||
|
||||
static struct device_attribute *hpsa_sdev_attrs[] = {
|
||||
&dev_attr_raid_level,
|
||||
&dev_attr_lunid,
|
||||
&dev_attr_unique_id,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct device_attribute *hpsa_shost_attrs[] = {
|
||||
&dev_attr_rescan,
|
||||
&dev_attr_firmware_revision,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct scsi_host_template hpsa_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "hpsa",
|
||||
.proc_name = "hpsa",
|
||||
.queuecommand = hpsa_scsi_queue_command,
|
||||
.scan_start = hpsa_scan_start,
|
||||
.scan_finished = hpsa_scan_finished,
|
||||
.change_queue_depth = hpsa_change_queue_depth,
|
||||
.this_id = -1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
|
||||
.ioctl = hpsa_ioctl,
|
||||
.slave_alloc = hpsa_slave_alloc,
|
||||
.slave_destroy = hpsa_slave_destroy,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = hpsa_compat_ioctl,
|
||||
#endif
|
||||
.sdev_attrs = hpsa_sdev_attrs,
|
||||
.shost_attrs = hpsa_shost_attrs,
|
||||
};
|
||||
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
|
||||
void __iomem *vaddr, int wait_for_ready);
|
||||
#define BOARD_NOT_READY 0
|
||||
#define BOARD_READY 1
|
||||
|
||||
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
|
||||
{
|
||||
|
@ -291,67 +252,63 @@ static ssize_t host_show_firmware_revision(struct device *dev,
|
|||
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
|
||||
}
|
||||
|
||||
/* Enqueuing and dequeuing functions for cmdlists. */
|
||||
static inline void addQ(struct hlist_head *list, struct CommandList *c)
|
||||
static ssize_t host_show_commands_outstanding(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
hlist_add_head(&c->list, list);
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct ctlr_info *h = shost_to_hba(shost);
|
||||
|
||||
return snprintf(buf, 20, "%d\n", h->commands_outstanding);
|
||||
}
|
||||
|
||||
static inline u32 next_command(struct ctlr_info *h)
|
||||
static ssize_t host_show_transport_mode(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
u32 a;
|
||||
struct ctlr_info *h;
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
|
||||
if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
|
||||
return h->access.command_completed(h);
|
||||
|
||||
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
|
||||
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
|
||||
(h->reply_pool_head)++;
|
||||
h->commands_outstanding--;
|
||||
} else {
|
||||
a = FIFO_EMPTY;
|
||||
}
|
||||
/* Check for wraparound */
|
||||
if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
|
||||
h->reply_pool_head = h->reply_pool;
|
||||
h->reply_pool_wraparound ^= 1;
|
||||
}
|
||||
return a;
|
||||
h = shost_to_hba(shost);
|
||||
return snprintf(buf, 20, "%s\n",
|
||||
h->transMethod & CFGTBL_Trans_Performant ?
|
||||
"performant" : "simple");
|
||||
}
|
||||
|
||||
/* set_performant_mode: Modify the tag for cciss performant
|
||||
* set bit 0 for pull model, bits 3-1 for block fetch
|
||||
* register number
|
||||
*/
|
||||
static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
|
||||
/* List of controllers which cannot be reset on kexec with reset_devices */
|
||||
static u32 unresettable_controller[] = {
|
||||
0x324a103C, /* Smart Array P712m */
|
||||
0x324b103C, /* SmartArray P711m */
|
||||
0x3223103C, /* Smart Array P800 */
|
||||
0x3234103C, /* Smart Array P400 */
|
||||
0x3235103C, /* Smart Array P400i */
|
||||
0x3211103C, /* Smart Array E200i */
|
||||
0x3212103C, /* Smart Array E200 */
|
||||
0x3213103C, /* Smart Array E200i */
|
||||
0x3214103C, /* Smart Array E200i */
|
||||
0x3215103C, /* Smart Array E200i */
|
||||
0x3237103C, /* Smart Array E500 */
|
||||
0x323D103C, /* Smart Array P700m */
|
||||
0x409C0E11, /* Smart Array 6400 */
|
||||
0x409D0E11, /* Smart Array 6400 EM */
|
||||
};
|
||||
|
||||
static int ctlr_is_resettable(struct ctlr_info *h)
|
||||
{
|
||||
if (likely(h->transMethod == CFGTBL_Trans_Performant))
|
||||
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
|
||||
if (unresettable_controller[i] == h->board_id)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
|
||||
struct CommandList *c)
|
||||
static ssize_t host_show_resettable(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct ctlr_info *h;
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
|
||||
set_performant_mode(h, c);
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
addQ(&h->reqQ, c);
|
||||
h->Qdepth++;
|
||||
start_io(h);
|
||||
spin_unlock_irqrestore(&h->lock, flags);
|
||||
}
|
||||
|
||||
static inline void removeQ(struct CommandList *c)
|
||||
{
|
||||
if (WARN_ON(hlist_unhashed(&c->list)))
|
||||
return;
|
||||
hlist_del_init(&c->list);
|
||||
}
|
||||
|
||||
static inline int is_hba_lunid(unsigned char scsi3addr[])
|
||||
{
|
||||
return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
|
||||
h = shost_to_hba(shost);
|
||||
return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
|
||||
}
|
||||
|
||||
static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
|
||||
|
@ -359,15 +316,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
|
|||
return (scsi3addr[3] & 0xC0) == 0x40;
|
||||
}
|
||||
|
||||
static inline int is_scsi_rev_5(struct ctlr_info *h)
|
||||
{
|
||||
if (!h->hba_inquiry_data)
|
||||
return 0;
|
||||
if ((h->hba_inquiry_data[2] & 0x07) == 5)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
|
||||
"UNKNOWN"
|
||||
};
|
||||
|
@ -459,6 +407,129 @@ static ssize_t unique_id_show(struct device *dev,
|
|||
sn[12], sn[13], sn[14], sn[15]);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
|
||||
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
|
||||
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
|
||||
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
|
||||
static DEVICE_ATTR(firmware_revision, S_IRUGO,
|
||||
host_show_firmware_revision, NULL);
|
||||
static DEVICE_ATTR(commands_outstanding, S_IRUGO,
|
||||
host_show_commands_outstanding, NULL);
|
||||
static DEVICE_ATTR(transport_mode, S_IRUGO,
|
||||
host_show_transport_mode, NULL);
|
||||
static DEVICE_ATTR(resettable, S_IRUGO,
|
||||
host_show_resettable, NULL);
|
||||
|
||||
static struct device_attribute *hpsa_sdev_attrs[] = {
|
||||
&dev_attr_raid_level,
|
||||
&dev_attr_lunid,
|
||||
&dev_attr_unique_id,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct device_attribute *hpsa_shost_attrs[] = {
|
||||
&dev_attr_rescan,
|
||||
&dev_attr_firmware_revision,
|
||||
&dev_attr_commands_outstanding,
|
||||
&dev_attr_transport_mode,
|
||||
&dev_attr_resettable,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct scsi_host_template hpsa_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "hpsa",
|
||||
.proc_name = "hpsa",
|
||||
.queuecommand = hpsa_scsi_queue_command,
|
||||
.scan_start = hpsa_scan_start,
|
||||
.scan_finished = hpsa_scan_finished,
|
||||
.change_queue_depth = hpsa_change_queue_depth,
|
||||
.this_id = -1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
|
||||
.ioctl = hpsa_ioctl,
|
||||
.slave_alloc = hpsa_slave_alloc,
|
||||
.slave_destroy = hpsa_slave_destroy,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = hpsa_compat_ioctl,
|
||||
#endif
|
||||
.sdev_attrs = hpsa_sdev_attrs,
|
||||
.shost_attrs = hpsa_shost_attrs,
|
||||
};
|
||||
|
||||
|
||||
/* Enqueuing and dequeuing functions for cmdlists. */
|
||||
static inline void addQ(struct list_head *list, struct CommandList *c)
|
||||
{
|
||||
list_add_tail(&c->list, list);
|
||||
}
|
||||
|
||||
static inline u32 next_command(struct ctlr_info *h)
|
||||
{
|
||||
u32 a;
|
||||
|
||||
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
|
||||
return h->access.command_completed(h);
|
||||
|
||||
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
|
||||
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
|
||||
(h->reply_pool_head)++;
|
||||
h->commands_outstanding--;
|
||||
} else {
|
||||
a = FIFO_EMPTY;
|
||||
}
|
||||
/* Check for wraparound */
|
||||
if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
|
||||
h->reply_pool_head = h->reply_pool;
|
||||
h->reply_pool_wraparound ^= 1;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
/* set_performant_mode: Modify the tag for cciss performant
|
||||
* set bit 0 for pull model, bits 3-1 for block fetch
|
||||
* register number
|
||||
*/
|
||||
static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
|
||||
{
|
||||
if (likely(h->transMethod & CFGTBL_Trans_Performant))
|
||||
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
|
||||
}
|
||||
|
||||
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
|
||||
struct CommandList *c)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
set_performant_mode(h, c);
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
addQ(&h->reqQ, c);
|
||||
h->Qdepth++;
|
||||
start_io(h);
|
||||
spin_unlock_irqrestore(&h->lock, flags);
|
||||
}
|
||||
|
||||
static inline void removeQ(struct CommandList *c)
|
||||
{
|
||||
if (WARN_ON(list_empty(&c->list)))
|
||||
return;
|
||||
list_del_init(&c->list);
|
||||
}
|
||||
|
||||
static inline int is_hba_lunid(unsigned char scsi3addr[])
|
||||
{
|
||||
return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
|
||||
}
|
||||
|
||||
static inline int is_scsi_rev_5(struct ctlr_info *h)
|
||||
{
|
||||
if (!h->hba_inquiry_data)
|
||||
return 0;
|
||||
if ((h->hba_inquiry_data[2] & 0x07) == 5)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpsa_find_target_lun(struct ctlr_info *h,
|
||||
unsigned char scsi3addr[], int bus, int *target, int *lun)
|
||||
{
|
||||
|
@ -1130,6 +1201,10 @@ static void complete_scsi_command(struct CommandList *cp,
|
|||
cmd->result = DID_TIME_OUT << 16;
|
||||
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
|
||||
break;
|
||||
case CMD_UNABORTABLE:
|
||||
cmd->result = DID_ERROR << 16;
|
||||
dev_warn(&h->pdev->dev, "Command unabortable\n");
|
||||
break;
|
||||
default:
|
||||
cmd->result = DID_ERROR << 16;
|
||||
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
|
||||
|
@ -1160,7 +1235,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
|
|||
sh->sg_tablesize = h->maxsgentries;
|
||||
h->scsi_host = sh;
|
||||
sh->hostdata[0] = (unsigned long) h;
|
||||
sh->irq = h->intr[PERF_MODE_INT];
|
||||
sh->irq = h->intr[h->intr_mode];
|
||||
sh->unique_id = sh->irq;
|
||||
error = scsi_add_host(sh, &h->pdev->dev);
|
||||
if (error)
|
||||
|
@ -1295,6 +1370,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
|
|||
case CMD_TIMEOUT:
|
||||
dev_warn(d, "cp %p timed out\n", cp);
|
||||
break;
|
||||
case CMD_UNABORTABLE:
|
||||
dev_warn(d, "Command unabortable\n");
|
||||
break;
|
||||
default:
|
||||
dev_warn(d, "cp %p returned unknown status %x\n", cp,
|
||||
ei->CommandStatus);
|
||||
|
@ -1595,6 +1673,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
|
|||
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
|
||||
return 0;
|
||||
|
||||
memset(scsi3addr, 0, 8);
|
||||
scsi3addr[3] = target;
|
||||
if (is_hba_lunid(scsi3addr))
|
||||
return 0; /* Don't add the RAID controller here. */
|
||||
|
||||
|
@ -1609,8 +1689,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
|
|||
return 0;
|
||||
}
|
||||
|
||||
memset(scsi3addr, 0, 8);
|
||||
scsi3addr[3] = target;
|
||||
if (hpsa_update_device_info(h, scsi3addr, this_device))
|
||||
return 0;
|
||||
(*nmsa2xxx_enclosures)++;
|
||||
|
@ -2199,7 +2277,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
|
|||
|
||||
c->cmdindex = i;
|
||||
|
||||
INIT_HLIST_NODE(&c->list);
|
||||
INIT_LIST_HEAD(&c->list);
|
||||
c->busaddr = (u32) cmd_dma_handle;
|
||||
temp64.val = (u64) err_dma_handle;
|
||||
c->ErrDesc.Addr.lower = temp64.val32.lower;
|
||||
|
@ -2237,7 +2315,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
|
|||
}
|
||||
memset(c->err_info, 0, sizeof(*c->err_info));
|
||||
|
||||
INIT_HLIST_NODE(&c->list);
|
||||
INIT_LIST_HEAD(&c->list);
|
||||
c->busaddr = (u32) cmd_dma_handle;
|
||||
temp64.val = (u64) err_dma_handle;
|
||||
c->ErrDesc.Addr.lower = temp64.val32.lower;
|
||||
|
@ -2267,7 +2345,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
|
|||
pci_free_consistent(h->pdev, sizeof(*c->err_info),
|
||||
c->err_info, (dma_addr_t) temp64.val);
|
||||
pci_free_consistent(h->pdev, sizeof(*c),
|
||||
c, (dma_addr_t) c->busaddr);
|
||||
c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -2281,6 +2359,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
|
|||
int err;
|
||||
u32 cp;
|
||||
|
||||
memset(&arg64, 0, sizeof(arg64));
|
||||
err = 0;
|
||||
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
|
||||
sizeof(arg64.LUN_info));
|
||||
|
@ -2317,6 +2396,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
|
|||
int err;
|
||||
u32 cp;
|
||||
|
||||
memset(&arg64, 0, sizeof(arg64));
|
||||
err = 0;
|
||||
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
|
||||
sizeof(arg64.LUN_info));
|
||||
|
@ -2433,15 +2513,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
|
||||
if (buff == NULL)
|
||||
return -EFAULT;
|
||||
}
|
||||
if (iocommand.Request.Type.Direction == XFER_WRITE) {
|
||||
/* Copy the data into the buffer we created */
|
||||
if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
|
||||
kfree(buff);
|
||||
return -EFAULT;
|
||||
if (iocommand.Request.Type.Direction == XFER_WRITE) {
|
||||
/* Copy the data into the buffer we created */
|
||||
if (copy_from_user(buff, iocommand.buf,
|
||||
iocommand.buf_size)) {
|
||||
kfree(buff);
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
memset(buff, 0, iocommand.buf_size);
|
||||
}
|
||||
} else
|
||||
memset(buff, 0, iocommand.buf_size);
|
||||
}
|
||||
c = cmd_special_alloc(h);
|
||||
if (c == NULL) {
|
||||
kfree(buff);
|
||||
|
@ -2487,8 +2569,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
cmd_special_free(h, c);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (iocommand.Request.Type.Direction == XFER_READ) {
|
||||
if (iocommand.Request.Type.Direction == XFER_READ &&
|
||||
iocommand.buf_size > 0) {
|
||||
/* Copy the data out of the buffer we created */
|
||||
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
|
||||
kfree(buff);
|
||||
|
@ -2581,14 +2663,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
}
|
||||
c->cmd_type = CMD_IOCTL_PEND;
|
||||
c->Header.ReplyQueue = 0;
|
||||
|
||||
if (ioc->buf_size > 0) {
|
||||
c->Header.SGList = sg_used;
|
||||
c->Header.SGTotal = sg_used;
|
||||
} else {
|
||||
c->Header.SGList = 0;
|
||||
c->Header.SGTotal = 0;
|
||||
}
|
||||
c->Header.SGList = c->Header.SGTotal = sg_used;
|
||||
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
|
||||
c->Header.Tag.lower = c->busaddr;
|
||||
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
|
||||
|
@ -2605,7 +2680,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
}
|
||||
}
|
||||
hpsa_scsi_do_simple_cmd_core(h, c);
|
||||
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
|
||||
if (sg_used)
|
||||
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
|
||||
check_ioctl_unit_attention(h, c);
|
||||
/* Copy the error information out */
|
||||
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
|
||||
|
@ -2614,7 +2690,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
status = -EFAULT;
|
||||
goto cleanup1;
|
||||
}
|
||||
if (ioc->Request.Type.Direction == XFER_READ) {
|
||||
if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
|
||||
/* Copy the data out of the buffer we created */
|
||||
BYTE __user *ptr = ioc->buf;
|
||||
for (i = 0; i < sg_used; i++) {
|
||||
|
@ -2810,8 +2886,8 @@ static void start_io(struct ctlr_info *h)
|
|||
{
|
||||
struct CommandList *c;
|
||||
|
||||
while (!hlist_empty(&h->reqQ)) {
|
||||
c = hlist_entry(h->reqQ.first, struct CommandList, list);
|
||||
while (!list_empty(&h->reqQ)) {
|
||||
c = list_entry(h->reqQ.next, struct CommandList, list);
|
||||
/* can't do anything if fifo is full */
|
||||
if ((h->access.fifo_full(h))) {
|
||||
dev_warn(&h->pdev->dev, "fifo full\n");
|
||||
|
@ -2867,20 +2943,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
|
|||
|
||||
static inline u32 hpsa_tag_contains_index(u32 tag)
|
||||
{
|
||||
#define DIRECT_LOOKUP_BIT 0x10
|
||||
return tag & DIRECT_LOOKUP_BIT;
|
||||
}
|
||||
|
||||
static inline u32 hpsa_tag_to_index(u32 tag)
|
||||
{
|
||||
#define DIRECT_LOOKUP_SHIFT 5
|
||||
return tag >> DIRECT_LOOKUP_SHIFT;
|
||||
}
|
||||
|
||||
static inline u32 hpsa_tag_discard_error_bits(u32 tag)
|
||||
|
||||
static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
|
||||
{
|
||||
#define HPSA_ERROR_BITS 0x03
|
||||
return tag & ~HPSA_ERROR_BITS;
|
||||
#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
|
||||
#define HPSA_SIMPLE_ERROR_BITS 0x03
|
||||
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
|
||||
return tag & ~HPSA_SIMPLE_ERROR_BITS;
|
||||
return tag & ~HPSA_PERF_ERROR_BITS;
|
||||
}
|
||||
|
||||
/* process completion of an indexed ("direct lookup") command */
|
||||
|
@ -2904,10 +2982,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
|
|||
{
|
||||
u32 tag;
|
||||
struct CommandList *c = NULL;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
tag = hpsa_tag_discard_error_bits(raw_tag);
|
||||
hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
|
||||
tag = hpsa_tag_discard_error_bits(h, raw_tag);
|
||||
list_for_each_entry(c, &h->cmpQ, list) {
|
||||
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
|
||||
finish_cmd(c, raw_tag);
|
||||
return next_command(h);
|
||||
|
@ -2957,7 +3034,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Send a message CDB to the firmware. */
|
||||
/* Send a message CDB to the firmware. Careful, this only works
|
||||
* in simple mode, not performant mode due to the tag lookup.
|
||||
* We only ever use this immediately after a controller reset.
|
||||
*/
|
||||
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
|
||||
unsigned char type)
|
||||
{
|
||||
|
@ -3023,7 +3103,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
|
|||
|
||||
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
|
||||
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
|
||||
if (hpsa_tag_discard_error_bits(tag) == paddr32)
|
||||
if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
|
||||
break;
|
||||
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
|
||||
}
|
||||
|
@ -3055,38 +3135,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
|
|||
#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
|
||||
#define hpsa_noop(p) hpsa_message(p, 3, 0)
|
||||
|
||||
static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
|
||||
{
|
||||
/* the #defines are stolen from drivers/pci/msi.h. */
|
||||
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
|
||||
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
|
||||
|
||||
int pos;
|
||||
u16 control = 0;
|
||||
|
||||
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
|
||||
if (pos) {
|
||||
pci_read_config_word(pdev, msi_control_reg(pos), &control);
|
||||
if (control & PCI_MSI_FLAGS_ENABLE) {
|
||||
dev_info(&pdev->dev, "resetting MSI\n");
|
||||
pci_write_config_word(pdev, msi_control_reg(pos),
|
||||
control & ~PCI_MSI_FLAGS_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
|
||||
if (pos) {
|
||||
pci_read_config_word(pdev, msi_control_reg(pos), &control);
|
||||
if (control & PCI_MSIX_FLAGS_ENABLE) {
|
||||
dev_info(&pdev->dev, "resetting MSI-X\n");
|
||||
pci_write_config_word(pdev, msi_control_reg(pos),
|
||||
control & ~PCI_MSIX_FLAGS_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
|
||||
void * __iomem vaddr, bool use_doorbell)
|
||||
{
|
||||
|
@ -3142,17 +3190,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
|
|||
*/
|
||||
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
|
||||
{
|
||||
u16 saved_config_space[32];
|
||||
u64 cfg_offset;
|
||||
u32 cfg_base_addr;
|
||||
u64 cfg_base_addr_index;
|
||||
void __iomem *vaddr;
|
||||
unsigned long paddr;
|
||||
u32 misc_fw_support, active_transport;
|
||||
int rc, i;
|
||||
int rc;
|
||||
struct CfgTable __iomem *cfgtable;
|
||||
bool use_doorbell;
|
||||
u32 board_id;
|
||||
u16 command_register;
|
||||
|
||||
/* For controllers as old as the P600, this is very nearly
|
||||
* the same thing as
|
||||
|
@ -3162,14 +3210,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
|
|||
* pci_set_power_state(pci_dev, PCI_D0);
|
||||
* pci_restore_state(pci_dev);
|
||||
*
|
||||
* but we can't use these nice canned kernel routines on
|
||||
* kexec, because they also check the MSI/MSI-X state in PCI
|
||||
* configuration space and do the wrong thing when it is
|
||||
* set/cleared. Also, the pci_save/restore_state functions
|
||||
* violate the ordering requirements for restoring the
|
||||
* configuration space from the CCISS document (see the
|
||||
* comment below). So we roll our own ....
|
||||
*
|
||||
* For controllers newer than the P600, the pci power state
|
||||
* method of resetting doesn't work so we have another way
|
||||
* using the doorbell register.
|
||||
|
@ -3182,13 +3222,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
|
|||
* likely not be happy. Just forbid resetting this conjoined mess.
|
||||
* The 640x isn't really supported by hpsa anyway.
|
||||
*/
|
||||
hpsa_lookup_board_id(pdev, &board_id);
|
||||
rc = hpsa_lookup_board_id(pdev, &board_id);
|
||||
if (rc < 0) {
|
||||
dev_warn(&pdev->dev, "Not resetting device.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
|
||||
return -ENOTSUPP;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
|
||||
|
||||
/* Save the PCI command register */
|
||||
pci_read_config_word(pdev, 4, &command_register);
|
||||
/* Turn the board off. This is so that later pci_restore_state()
|
||||
* won't turn the board on before the rest of config space is ready.
|
||||
*/
|
||||
pci_disable_device(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
/* find the first memory BAR, so we can find the cfg table */
|
||||
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
|
||||
|
@ -3214,46 +3262,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
|
|||
misc_fw_support = readl(&cfgtable->misc_fw_support);
|
||||
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
|
||||
|
||||
/* The doorbell reset seems to cause lockups on some Smart
|
||||
* Arrays (e.g. P410, P410i, maybe others). Until this is
|
||||
* fixed or at least isolated, avoid the doorbell reset.
|
||||
*/
|
||||
use_doorbell = 0;
|
||||
|
||||
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
|
||||
if (rc)
|
||||
goto unmap_cfgtable;
|
||||
|
||||
/* Restore the PCI configuration space. The Open CISS
|
||||
* Specification says, "Restore the PCI Configuration
|
||||
* Registers, offsets 00h through 60h. It is important to
|
||||
* restore the command register, 16-bits at offset 04h,
|
||||
* last. Do not restore the configuration status register,
|
||||
* 16-bits at offset 06h." Note that the offset is 2*i.
|
||||
*/
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (i == 2 || i == 3)
|
||||
continue;
|
||||
pci_write_config_word(pdev, 2*i, saved_config_space[i]);
|
||||
pci_restore_state(pdev);
|
||||
rc = pci_enable_device(pdev);
|
||||
if (rc) {
|
||||
dev_warn(&pdev->dev, "failed to enable device.\n");
|
||||
goto unmap_cfgtable;
|
||||
}
|
||||
wmb();
|
||||
pci_write_config_word(pdev, 4, saved_config_space[2]);
|
||||
pci_write_config_word(pdev, 4, command_register);
|
||||
|
||||
/* Some devices (notably the HP Smart Array 5i Controller)
|
||||
need a little pause here */
|
||||
msleep(HPSA_POST_RESET_PAUSE_MSECS);
|
||||
|
||||
/* Wait for board to become not ready, then ready. */
|
||||
dev_info(&pdev->dev, "Waiting for board to become ready.\n");
|
||||
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
|
||||
if (rc)
|
||||
dev_warn(&pdev->dev,
|
||||
"failed waiting for board to become not ready\n");
|
||||
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
|
||||
if (rc) {
|
||||
dev_warn(&pdev->dev,
|
||||
"failed waiting for board to become ready\n");
|
||||
goto unmap_cfgtable;
|
||||
}
|
||||
dev_info(&pdev->dev, "board ready.\n");
|
||||
|
||||
/* Controller should be in simple mode at this point. If it's not,
|
||||
* It means we're on one of those controllers which doesn't support
|
||||
* the doorbell reset method and on which the PCI power management reset
|
||||
* method doesn't work (P800, for example.)
|
||||
* In those cases, pretend the reset worked and hope for the best.
|
||||
* In those cases, don't try to proceed, as it generally doesn't work.
|
||||
*/
|
||||
active_transport = readl(&cfgtable->TransportActive);
|
||||
if (active_transport & PERFORMANT_MODE) {
|
||||
dev_warn(&pdev->dev, "Unable to successfully reset controller,"
|
||||
" proceeding anyway.\n");
|
||||
rc = -ENOTSUPP;
|
||||
" Ignoring controller.\n");
|
||||
rc = -ENODEV;
|
||||
}
|
||||
|
||||
unmap_cfgtable:
|
||||
|
@ -3386,7 +3435,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
|
|||
default_int_mode:
|
||||
#endif /* CONFIG_PCI_MSI */
|
||||
/* if we get here we're going to use the default interrupt mode */
|
||||
h->intr[PERF_MODE_INT] = h->pdev->irq;
|
||||
h->intr[h->intr_mode] = h->pdev->irq;
|
||||
}
|
||||
|
||||
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
|
||||
|
@ -3438,18 +3487,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
|
||||
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
|
||||
void __iomem *vaddr, int wait_for_ready)
|
||||
{
|
||||
int i;
|
||||
int i, iterations;
|
||||
u32 scratchpad;
|
||||
if (wait_for_ready)
|
||||
iterations = HPSA_BOARD_READY_ITERATIONS;
|
||||
else
|
||||
iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
|
||||
|
||||
for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
|
||||
scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
||||
if (scratchpad == HPSA_FIRMWARE_READY)
|
||||
return 0;
|
||||
for (i = 0; i < iterations; i++) {
|
||||
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
|
||||
if (wait_for_ready) {
|
||||
if (scratchpad == HPSA_FIRMWARE_READY)
|
||||
return 0;
|
||||
} else {
|
||||
if (scratchpad != HPSA_FIRMWARE_READY)
|
||||
return 0;
|
||||
}
|
||||
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
|
||||
}
|
||||
dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
|
||||
dev_warn(&pdev->dev, "board not ready, timed out.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -3497,6 +3556,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
|
|||
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
|
||||
{
|
||||
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
|
||||
|
||||
/* Limit commands in memory limited kdump scenario. */
|
||||
if (reset_devices && h->max_commands > 32)
|
||||
h->max_commands = 32;
|
||||
|
||||
if (h->max_commands < 16) {
|
||||
dev_warn(&h->pdev->dev, "Controller reports "
|
||||
"max supported commands of %d, an obvious lie. "
|
||||
|
@ -3571,16 +3635,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
|
|||
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
|
||||
{
|
||||
int i;
|
||||
u32 doorbell_value;
|
||||
unsigned long flags;
|
||||
|
||||
/* under certain very rare conditions, this can take awhile.
|
||||
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
|
||||
* as we enter this code.)
|
||||
*/
|
||||
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
|
||||
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
|
||||
spin_unlock_irqrestore(&h->lock, flags);
|
||||
if (!(doorbell_value & CFGTBL_ChangeReq))
|
||||
break;
|
||||
/* delay and try again */
|
||||
msleep(10);
|
||||
usleep_range(10000, 20000);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3603,6 +3672,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
|
|||
"unable to get board into simple mode\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
h->transMethod = CFGTBL_Trans_Simple;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3641,7 +3711,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
|
|||
err = -ENOMEM;
|
||||
goto err_out_free_res;
|
||||
}
|
||||
err = hpsa_wait_for_board_ready(h);
|
||||
err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
|
||||
if (err)
|
||||
goto err_out_free_res;
|
||||
err = hpsa_find_cfgtables(h);
|
||||
|
@ -3710,8 +3780,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
|
|||
return 0; /* just try to do the kdump anyhow. */
|
||||
if (rc)
|
||||
return -ENODEV;
|
||||
if (hpsa_reset_msi(pdev))
|
||||
return -ENODEV;
|
||||
|
||||
/* Now try to get the controller to respond to a no-op */
|
||||
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
|
||||
|
@ -3749,8 +3817,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|||
|
||||
h->pdev = pdev;
|
||||
h->busy_initializing = 1;
|
||||
INIT_HLIST_HEAD(&h->cmpQ);
|
||||
INIT_HLIST_HEAD(&h->reqQ);
|
||||
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
|
||||
INIT_LIST_HEAD(&h->cmpQ);
|
||||
INIT_LIST_HEAD(&h->reqQ);
|
||||
spin_lock_init(&h->lock);
|
||||
spin_lock_init(&h->scan_lock);
|
||||
rc = hpsa_pci_init(h);
|
||||
if (rc != 0)
|
||||
goto clean1;
|
||||
|
@ -3777,20 +3848,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|||
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
||||
|
||||
if (h->msix_vector || h->msi_vector)
|
||||
rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
|
||||
rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
|
||||
IRQF_DISABLED, h->devname, h);
|
||||
else
|
||||
rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
|
||||
rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
|
||||
IRQF_DISABLED, h->devname, h);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "unable to get irq %d for %s\n",
|
||||
h->intr[PERF_MODE_INT], h->devname);
|
||||
h->intr[h->intr_mode], h->devname);
|
||||
goto clean2;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
|
||||
h->devname, pdev->device,
|
||||
h->intr[PERF_MODE_INT], dac ? "" : " not");
|
||||
h->intr[h->intr_mode], dac ? "" : " not");
|
||||
|
||||
h->cmd_pool_bits =
|
||||
kmalloc(((h->nr_cmds + BITS_PER_LONG -
|
||||
|
@ -3810,8 +3881,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|||
}
|
||||
if (hpsa_allocate_sg_chain_blocks(h))
|
||||
goto clean4;
|
||||
spin_lock_init(&h->lock);
|
||||
spin_lock_init(&h->scan_lock);
|
||||
init_waitqueue_head(&h->scan_wait_queue);
|
||||
h->scan_finished = 1; /* no scan currently in progress */
|
||||
|
||||
|
@ -3843,7 +3912,7 @@ clean4:
|
|||
h->nr_cmds * sizeof(struct ErrorInfo),
|
||||
h->errinfo_pool,
|
||||
h->errinfo_pool_dhandle);
|
||||
free_irq(h->intr[PERF_MODE_INT], h);
|
||||
free_irq(h->intr[h->intr_mode], h);
|
||||
clean2:
|
||||
clean1:
|
||||
h->busy_initializing = 0;
|
||||
|
@ -3887,7 +3956,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
|
|||
*/
|
||||
hpsa_flush_cache(h);
|
||||
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
||||
free_irq(h->intr[PERF_MODE_INT], h);
|
||||
free_irq(h->intr[h->intr_mode], h);
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
if (h->msix_vector)
|
||||
pci_disable_msix(h->pdev);
|
||||
|
@ -3989,7 +4058,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
|
|||
}
|
||||
}
|
||||
|
||||
static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
|
||||
static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
|
||||
u32 use_short_tags)
|
||||
{
|
||||
int i;
|
||||
unsigned long register_value;
|
||||
|
@ -4037,7 +4107,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
|
|||
writel(0, &h->transtable->RepQCtrAddrHigh32);
|
||||
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
|
||||
writel(0, &h->transtable->RepQAddr0High32);
|
||||
writel(CFGTBL_Trans_Performant,
|
||||
writel(CFGTBL_Trans_Performant | use_short_tags,
|
||||
&(h->cfgtable->HostWrite.TransportRequest));
|
||||
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
|
||||
hpsa_wait_for_mode_change_ack(h);
|
||||
|
@ -4047,12 +4117,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
|
|||
" performant mode\n");
|
||||
return;
|
||||
}
|
||||
/* Change the access methods to the performant access methods */
|
||||
h->access = SA5_performant_access;
|
||||
h->transMethod = CFGTBL_Trans_Performant;
|
||||
}
|
||||
|
||||
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
||||
{
|
||||
u32 trans_support;
|
||||
|
||||
if (hpsa_simple_mode)
|
||||
return;
|
||||
|
||||
trans_support = readl(&(h->cfgtable->TransportSupport));
|
||||
if (!(trans_support & PERFORMANT_MODE))
|
||||
return;
|
||||
|
@ -4072,11 +4148,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|||
|| (h->blockFetchTable == NULL))
|
||||
goto clean_up;
|
||||
|
||||
hpsa_enter_performant_mode(h);
|
||||
|
||||
/* Change the access methods to the performant access methods */
|
||||
h->access = SA5_performant_access;
|
||||
h->transMethod = CFGTBL_Trans_Performant;
|
||||
hpsa_enter_performant_mode(h,
|
||||
trans_support & CFGTBL_Trans_use_short_tags);
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -72,11 +72,12 @@ struct ctlr_info {
|
|||
unsigned int intr[4];
|
||||
unsigned int msix_vector;
|
||||
unsigned int msi_vector;
|
||||
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
|
||||
struct access_method access;
|
||||
|
||||
/* queue and queue Info */
|
||||
struct hlist_head reqQ;
|
||||
struct hlist_head cmpQ;
|
||||
struct list_head reqQ;
|
||||
struct list_head cmpQ;
|
||||
unsigned int Qdepth;
|
||||
unsigned int maxQsinceinit;
|
||||
unsigned int maxSG;
|
||||
|
@ -154,12 +155,16 @@ struct ctlr_info {
|
|||
* HPSA_BOARD_READY_ITERATIONS are derived from those.
|
||||
*/
|
||||
#define HPSA_BOARD_READY_WAIT_SECS (120)
|
||||
#define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
|
||||
#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
|
||||
#define HPSA_BOARD_READY_POLL_INTERVAL \
|
||||
((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
|
||||
#define HPSA_BOARD_READY_ITERATIONS \
|
||||
((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
|
||||
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
|
||||
#define HPSA_BOARD_NOT_READY_ITERATIONS \
|
||||
((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
|
||||
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
|
||||
#define HPSA_POST_RESET_PAUSE_MSECS (3000)
|
||||
#define HPSA_POST_RESET_NOOP_RETRIES (12)
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@
|
|||
|
||||
#define CFGTBL_Trans_Simple 0x00000002l
|
||||
#define CFGTBL_Trans_Performant 0x00000004l
|
||||
#define CFGTBL_Trans_use_short_tags 0x20000000l
|
||||
|
||||
#define CFGTBL_BusType_Ultra2 0x00000001l
|
||||
#define CFGTBL_BusType_Ultra3 0x00000002l
|
||||
|
@ -265,6 +266,7 @@ struct ErrorInfo {
|
|||
|
||||
#define DIRECT_LOOKUP_SHIFT 5
|
||||
#define DIRECT_LOOKUP_BIT 0x10
|
||||
#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
|
||||
|
||||
#define HPSA_ERROR_BIT 0x02
|
||||
struct ctlr_info; /* defined in hpsa.h */
|
||||
|
@ -291,7 +293,7 @@ struct CommandList {
|
|||
struct ctlr_info *h;
|
||||
int cmd_type;
|
||||
long cmdindex;
|
||||
struct hlist_node list;
|
||||
struct list_head list;
|
||||
struct request *rq;
|
||||
struct completion *waiting;
|
||||
void *scsi_cmd;
|
||||
|
|
|
@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
|
|||
ipr_clear_res_target(res);
|
||||
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
||||
}
|
||||
} else if (!res->sdev) {
|
||||
} else if (!res->sdev || res->del_from_ml) {
|
||||
res->add_to_ml = 1;
|
||||
if (ioa_cfg->allow_ml_add_del)
|
||||
schedule_work(&ioa_cfg->work_q);
|
||||
|
@ -3104,7 +3104,10 @@ restart:
|
|||
did_work = 1;
|
||||
sdev = res->sdev;
|
||||
if (!scsi_device_get(sdev)) {
|
||||
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
||||
if (!res->add_to_ml)
|
||||
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
|
||||
else
|
||||
res->del_from_ml = 0;
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
scsi_remove_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
|
@ -8864,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev)
|
|||
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
|
||||
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
|
||||
flush_scheduled_work();
|
||||
flush_work_sync(&ioa_cfg->work_q);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
|
||||
|
||||
spin_lock(&ipr_driver_lock);
|
||||
|
|
|
@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
|||
iscsi_sw_tcp_release_conn(conn);
|
||||
}
|
||||
|
||||
static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
|
||||
char *buf, int *port,
|
||||
int (*getname)(struct socket *,
|
||||
struct sockaddr *,
|
||||
int *addrlen))
|
||||
{
|
||||
struct sockaddr_storage *addr;
|
||||
struct sockaddr_in6 *sin6;
|
||||
struct sockaddr_in *sin;
|
||||
int rc = 0, len;
|
||||
|
||||
addr = kmalloc(sizeof(*addr), GFP_KERNEL);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (getname(sock, (struct sockaddr *) addr, &len)) {
|
||||
rc = -ENODEV;
|
||||
goto free_addr;
|
||||
}
|
||||
|
||||
switch (addr->ss_family) {
|
||||
case AF_INET:
|
||||
sin = (struct sockaddr_in *)addr;
|
||||
spin_lock_bh(&conn->session->lock);
|
||||
sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
|
||||
*port = be16_to_cpu(sin->sin_port);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
break;
|
||||
case AF_INET6:
|
||||
sin6 = (struct sockaddr_in6 *)addr;
|
||||
spin_lock_bh(&conn->session->lock);
|
||||
sprintf(buf, "%pI6", &sin6->sin6_addr);
|
||||
*port = be16_to_cpu(sin6->sin6_port);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
break;
|
||||
}
|
||||
free_addr:
|
||||
kfree(addr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
|
||||
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
|
||||
int is_leading)
|
||||
{
|
||||
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
|
||||
struct iscsi_host *ihost = shost_priv(shost);
|
||||
struct iscsi_session *session = cls_session->dd_data;
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
||||
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
|
||||
|
@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
"sockfd_lookup failed %d\n", err);
|
||||
return -EEXIST;
|
||||
}
|
||||
/*
|
||||
* copy these values now because if we drop the session
|
||||
* userspace may still want to query the values since we will
|
||||
* be using them for the reconnect
|
||||
*/
|
||||
err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
|
||||
&conn->portal_port, kernel_getpeername);
|
||||
if (err)
|
||||
goto free_socket;
|
||||
|
||||
err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
|
||||
&ihost->local_port, kernel_getsockname);
|
||||
if (err)
|
||||
goto free_socket;
|
||||
|
||||
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
|
||||
if (err)
|
||||
goto free_socket;
|
||||
|
||||
spin_lock_bh(&session->lock);
|
||||
/* bind iSCSI connection and socket */
|
||||
tcp_sw_conn->sock = sock;
|
||||
spin_unlock_bh(&session->lock);
|
||||
|
||||
/* setup Socket parameters */
|
||||
sk = sock->sk;
|
||||
|
@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
|||
enum iscsi_param param, char *buf)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
int len;
|
||||
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
||||
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
|
||||
struct sockaddr_in6 addr;
|
||||
int rc, len;
|
||||
|
||||
switch(param) {
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
spin_lock_bh(&conn->session->lock);
|
||||
len = sprintf(buf, "%hu\n", conn->portal_port);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
break;
|
||||
case ISCSI_PARAM_CONN_ADDRESS:
|
||||
spin_lock_bh(&conn->session->lock);
|
||||
len = sprintf(buf, "%s\n", conn->portal_address);
|
||||
if (!tcp_sw_conn || !tcp_sw_conn->sock) {
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
rc = kernel_getpeername(tcp_sw_conn->sock,
|
||||
(struct sockaddr *)&addr, &len);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
break;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
|
||||
&addr, param, buf);
|
||||
default:
|
||||
return iscsi_conn_get_param(cls_conn, param, buf);
|
||||
}
|
||||
|
||||
return len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
|
||||
enum iscsi_host_param param, char *buf)
|
||||
{
|
||||
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
|
||||
struct iscsi_session *session = tcp_sw_host->session;
|
||||
struct iscsi_conn *conn;
|
||||
struct iscsi_tcp_conn *tcp_conn;
|
||||
struct iscsi_sw_tcp_conn *tcp_sw_conn;
|
||||
struct sockaddr_in6 addr;
|
||||
int rc, len;
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_HOST_PARAM_IPADDRESS:
|
||||
spin_lock_bh(&session->lock);
|
||||
conn = session->leadconn;
|
||||
if (!conn) {
|
||||
spin_unlock_bh(&session->lock);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
tcp_conn = conn->dd_data;
|
||||
|
||||
tcp_sw_conn = tcp_conn->dd_data;
|
||||
if (!tcp_sw_conn->sock) {
|
||||
spin_unlock_bh(&session->lock);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
rc = kernel_getsockname(tcp_sw_conn->sock,
|
||||
(struct sockaddr *)&addr, &len);
|
||||
spin_unlock_bh(&session->lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
|
||||
&addr, param, buf);
|
||||
default:
|
||||
return iscsi_host_get_param(shost, param, buf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
|||
{
|
||||
struct iscsi_cls_session *cls_session;
|
||||
struct iscsi_session *session;
|
||||
struct iscsi_sw_tcp_host *tcp_sw_host;
|
||||
struct Scsi_Host *shost;
|
||||
|
||||
if (ep) {
|
||||
|
@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
|
||||
shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
|
||||
sizeof(struct iscsi_sw_tcp_host), 1);
|
||||
if (!shost)
|
||||
return NULL;
|
||||
shost->transportt = iscsi_sw_tcp_scsi_transport;
|
||||
|
@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
|||
if (!cls_session)
|
||||
goto remove_host;
|
||||
session = cls_session->dd_data;
|
||||
tcp_sw_host = iscsi_host_priv(shost);
|
||||
tcp_sw_host->session = session;
|
||||
|
||||
shost->can_queue = session->scsi_cmds_max;
|
||||
if (iscsi_tcp_r2tpool_alloc(session))
|
||||
|
@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
|
|||
.start_conn = iscsi_conn_start,
|
||||
.stop_conn = iscsi_sw_tcp_conn_stop,
|
||||
/* iscsi host params */
|
||||
.get_host_param = iscsi_host_get_param,
|
||||
.get_host_param = iscsi_sw_tcp_host_get_param,
|
||||
.set_host_param = iscsi_host_set_param,
|
||||
/* IO */
|
||||
.send_pdu = iscsi_conn_send_pdu,
|
||||
|
|
|
@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn {
|
|||
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
|
||||
};
|
||||
|
||||
struct iscsi_sw_tcp_host {
|
||||
struct iscsi_session *session;
|
||||
};
|
||||
|
||||
struct iscsi_sw_tcp_hdrbuf {
|
||||
struct iscsi_hdr hdrbuf;
|
||||
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
|
||||
|
|
|
@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */
|
|||
EXPORT_SYMBOL(fc_cpu_mask);
|
||||
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
|
||||
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
|
||||
struct workqueue_struct *fc_exch_workqueue;
|
||||
static struct workqueue_struct *fc_exch_workqueue;
|
||||
|
||||
/*
|
||||
* Structure and function definitions for managing Fibre Channel Exchanges
|
||||
|
@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
|
|||
return sp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the response handler for the exchange associated with a sequence.
|
||||
*/
|
||||
static void fc_seq_set_resp(struct fc_seq *sp,
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *,
|
||||
void *),
|
||||
void *arg)
|
||||
{
|
||||
struct fc_exch *ep = fc_seq_exch(sp);
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
ep->resp = resp;
|
||||
ep->arg = arg;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_seq_exch_abort() - Abort an exchange and sequence
|
||||
* @req_sp: The sequence to be aborted
|
||||
|
@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work)
|
|||
if (e_stat & ESB_ST_ABNORMAL)
|
||||
rc = fc_exch_done_locked(ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
if (!rc)
|
||||
fc_exch_delete(ep);
|
||||
if (resp)
|
||||
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
|
||||
if (!rc) {
|
||||
/* delete the exchange if it's already being aborted */
|
||||
fc_exch_delete(ep);
|
||||
return;
|
||||
}
|
||||
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
|
||||
goto done;
|
||||
}
|
||||
|
@ -1266,6 +1279,8 @@ free:
|
|||
* @fp: The request frame
|
||||
*
|
||||
* On success, the sequence pointer will be returned and also in fr_seq(@fp).
|
||||
* A reference will be held on the exchange/sequence for the caller, which
|
||||
* must call fc_seq_release().
|
||||
*/
|
||||
static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
|
||||
{
|
||||
|
@ -1282,6 +1297,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
|
|||
return fr_seq(fp);
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_seq_release() - Release the hold
|
||||
* @sp: The sequence.
|
||||
*/
|
||||
static void fc_seq_release(struct fc_seq *sp)
|
||||
{
|
||||
fc_exch_release(fc_seq_exch(sp));
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_recv_req() - Handler for an incoming request
|
||||
* @lport: The local port that received the request
|
||||
|
@ -2151,6 +2175,7 @@ err:
|
|||
fc_exch_mgr_del(ema);
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_exch_mgr_list_clone);
|
||||
|
||||
/**
|
||||
* fc_exch_mgr_alloc() - Allocate an exchange manager
|
||||
|
@ -2253,17 +2278,46 @@ void fc_exch_mgr_free(struct fc_lport *lport)
|
|||
}
|
||||
EXPORT_SYMBOL(fc_exch_mgr_free);
|
||||
|
||||
/**
|
||||
* fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
|
||||
* upon 'xid'.
|
||||
* @f_ctl: f_ctl
|
||||
* @lport: The local port the frame was received on
|
||||
* @fh: The received frame header
|
||||
*/
|
||||
static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
|
||||
struct fc_lport *lport,
|
||||
struct fc_frame_header *fh)
|
||||
{
|
||||
struct fc_exch_mgr_anchor *ema;
|
||||
u16 xid;
|
||||
|
||||
if (f_ctl & FC_FC_EX_CTX)
|
||||
xid = ntohs(fh->fh_ox_id);
|
||||
else {
|
||||
xid = ntohs(fh->fh_rx_id);
|
||||
if (xid == FC_XID_UNKNOWN)
|
||||
return list_entry(lport->ema_list.prev,
|
||||
typeof(*ema), ema_list);
|
||||
}
|
||||
|
||||
list_for_each_entry(ema, &lport->ema_list, ema_list) {
|
||||
if ((xid >= ema->mp->min_xid) &&
|
||||
(xid <= ema->mp->max_xid))
|
||||
return ema;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
/**
|
||||
* fc_exch_recv() - Handler for received frames
|
||||
* @lport: The local port the frame was received on
|
||||
* @fp: The received frame
|
||||
* @fp: The received frame
|
||||
*/
|
||||
void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
|
||||
{
|
||||
struct fc_frame_header *fh = fc_frame_header_get(fp);
|
||||
struct fc_exch_mgr_anchor *ema;
|
||||
u32 f_ctl, found = 0;
|
||||
u16 oxid;
|
||||
u32 f_ctl;
|
||||
|
||||
/* lport lock ? */
|
||||
if (!lport || lport->state == LPORT_ST_DISABLED) {
|
||||
|
@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
|
|||
}
|
||||
|
||||
f_ctl = ntoh24(fh->fh_f_ctl);
|
||||
oxid = ntohs(fh->fh_ox_id);
|
||||
if (f_ctl & FC_FC_EX_CTX) {
|
||||
list_for_each_entry(ema, &lport->ema_list, ema_list) {
|
||||
if ((oxid >= ema->mp->min_xid) &&
|
||||
(oxid <= ema->mp->max_xid)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
FC_LPORT_DBG(lport, "Received response for out "
|
||||
"of range oxid:%hx\n", oxid);
|
||||
fc_frame_free(fp);
|
||||
return;
|
||||
}
|
||||
} else
|
||||
ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
|
||||
ema = fc_find_ema(f_ctl, lport, fh);
|
||||
if (!ema) {
|
||||
FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
|
||||
"fc_ctl <0x%x>, xid <0x%x>\n",
|
||||
f_ctl,
|
||||
(f_ctl & FC_FC_EX_CTX) ?
|
||||
ntohs(fh->fh_ox_id) :
|
||||
ntohs(fh->fh_rx_id));
|
||||
fc_frame_free(fp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If frame is marked invalid, just drop it.
|
||||
|
@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport)
|
|||
if (!lport->tt.seq_start_next)
|
||||
lport->tt.seq_start_next = fc_seq_start_next;
|
||||
|
||||
if (!lport->tt.seq_set_resp)
|
||||
lport->tt.seq_set_resp = fc_seq_set_resp;
|
||||
|
||||
if (!lport->tt.exch_seq_send)
|
||||
lport->tt.exch_seq_send = fc_exch_seq_send;
|
||||
|
||||
|
@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport)
|
|||
if (!lport->tt.seq_assign)
|
||||
lport->tt.seq_assign = fc_seq_assign;
|
||||
|
||||
if (!lport->tt.seq_release)
|
||||
lport->tt.seq_release = fc_seq_release;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_exch_init);
|
||||
|
@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init);
|
|||
/**
|
||||
* fc_setup_exch_mgr() - Setup an exchange manager
|
||||
*/
|
||||
int fc_setup_exch_mgr()
|
||||
int fc_setup_exch_mgr(void)
|
||||
{
|
||||
fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
|
||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||
|
@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr()
|
|||
/**
|
||||
* fc_destroy_exch_mgr() - Destroy an exchange manager
|
||||
*/
|
||||
void fc_destroy_exch_mgr()
|
||||
void fc_destroy_exch_mgr(void)
|
||||
{
|
||||
destroy_workqueue(fc_exch_workqueue);
|
||||
kmem_cache_destroy(fc_em_cachep);
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
|
||||
#include "fc_libfc.h"
|
||||
|
||||
struct kmem_cache *scsi_pkt_cachep;
|
||||
static struct kmem_cache *scsi_pkt_cachep;
|
||||
|
||||
/* SRB state definitions */
|
||||
#define FC_SRB_FREE 0 /* cmd is free */
|
||||
|
@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
|
|||
if (fsp) {
|
||||
memset(fsp, 0, sizeof(*fsp));
|
||||
fsp->lp = lport;
|
||||
fsp->xfer_ddp = FC_XID_UNKNOWN;
|
||||
atomic_set(&fsp->ref_cnt, 1);
|
||||
init_timer(&fsp->timer);
|
||||
INIT_LIST_HEAD(&fsp->list);
|
||||
|
@ -1201,6 +1202,7 @@ unlock:
|
|||
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
|
||||
{
|
||||
int rc = FAILED;
|
||||
unsigned long ticks_left;
|
||||
|
||||
if (fc_fcp_send_abort(fsp))
|
||||
return FAILED;
|
||||
|
@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
|
|||
fsp->wait_for_comp = 1;
|
||||
|
||||
spin_unlock_bh(&fsp->scsi_pkt_lock);
|
||||
rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
|
||||
ticks_left = wait_for_completion_timeout(&fsp->tm_done,
|
||||
FC_SCSI_TM_TOV);
|
||||
spin_lock_bh(&fsp->scsi_pkt_lock);
|
||||
fsp->wait_for_comp = 0;
|
||||
|
||||
if (!rc) {
|
||||
if (!ticks_left) {
|
||||
FC_FCP_DBG(fsp, "target abort cmd failed\n");
|
||||
rc = FAILED;
|
||||
} else if (fsp->state & FC_SRB_ABORTED) {
|
||||
FC_FCP_DBG(fsp, "target abort cmd passed\n");
|
||||
rc = SUCCESS;
|
||||
|
@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
|
|||
*
|
||||
* scsi-eh will escalate for when either happens.
|
||||
*/
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (fc_fcp_lock_pkt(fsp))
|
||||
|
@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
|
|||
|
||||
/**
|
||||
* fc_queuecommand() - The queuecommand function of the SCSI template
|
||||
* @shost: The Scsi_Host that the command was issued to
|
||||
* @cmd: The scsi_cmnd to be executed
|
||||
* @done: The callback function to be called when the scsi_cmnd is complete
|
||||
*
|
||||
* This is the i/o strategy routine, called by the SCSI layer. This routine
|
||||
* is called with the host_lock held.
|
||||
* This is the i/o strategy routine, called by the SCSI layer.
|
||||
*/
|
||||
static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
|
||||
int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct fc_lport *lport = shost_priv(shost);
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
|
||||
struct fc_fcp_pkt *fsp;
|
||||
struct fc_rport_libfc_priv *rpriv;
|
||||
|
@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
|
|||
int rc = 0;
|
||||
struct fcoe_dev_stats *stats;
|
||||
|
||||
lport = shost_priv(sc_cmd->device->host);
|
||||
|
||||
rval = fc_remote_port_chkready(rport);
|
||||
if (rval) {
|
||||
sc_cmd->result = rval;
|
||||
done(sc_cmd);
|
||||
sc_cmd->scsi_done(sc_cmd);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock_irq(lport->host->host_lock);
|
||||
|
||||
if (!*(struct fc_remote_port **)rport->dd_data) {
|
||||
/*
|
||||
|
@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
|
|||
* online
|
||||
*/
|
||||
sc_cmd->result = DID_IMM_RETRY << 16;
|
||||
done(sc_cmd);
|
||||
sc_cmd->scsi_done(sc_cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
|
|||
* build the libfc request pkt
|
||||
*/
|
||||
fsp->cmd = sc_cmd; /* save the cmd */
|
||||
fsp->lp = lport; /* save the softc ptr */
|
||||
fsp->rport = rport; /* set the remote port ptr */
|
||||
fsp->xfer_ddp = FC_XID_UNKNOWN;
|
||||
sc_cmd->scsi_done = done;
|
||||
|
||||
/*
|
||||
* set up the transfer length
|
||||
|
@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
|
|||
rc = SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
out:
|
||||
spin_lock_irq(lport->host->host_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
DEF_SCSI_QCMD(fc_queuecommand)
|
||||
EXPORT_SYMBOL(fc_queuecommand);
|
||||
|
||||
/**
|
||||
|
@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
|
|||
* the sc passed in is not setup for execution like when sent
|
||||
* through the queuecommand callout.
|
||||
*/
|
||||
fsp->lp = lport; /* save the softc ptr */
|
||||
fsp->rport = rport; /* set the remote port ptr */
|
||||
|
||||
/*
|
||||
|
@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
|
|||
}
|
||||
EXPORT_SYMBOL(fc_fcp_destroy);
|
||||
|
||||
int fc_setup_fcp()
|
||||
int fc_setup_fcp(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
|
@ -2261,7 +2252,7 @@ int fc_setup_fcp()
|
|||
return rc;
|
||||
}
|
||||
|
||||
void fc_destroy_fcp()
|
||||
void fc_destroy_fcp(void)
|
||||
{
|
||||
if (scsi_pkt_cachep)
|
||||
kmem_cache_destroy(scsi_pkt_cachep);
|
||||
|
|
|
@ -35,6 +35,27 @@ unsigned int fc_debug_logging;
|
|||
module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
|
||||
|
||||
DEFINE_MUTEX(fc_prov_mutex);
|
||||
static LIST_HEAD(fc_local_ports);
|
||||
struct blocking_notifier_head fc_lport_notifier_head =
|
||||
BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
|
||||
EXPORT_SYMBOL(fc_lport_notifier_head);
|
||||
|
||||
/*
|
||||
* Providers which primarily send requests and PRLIs.
|
||||
*/
|
||||
struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
|
||||
[0] = &fc_rport_t0_prov,
|
||||
[FC_TYPE_FCP] = &fc_rport_fcp_init,
|
||||
};
|
||||
|
||||
/*
|
||||
* Providers which receive requests.
|
||||
*/
|
||||
struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
|
||||
[FC_TYPE_ELS] = &fc_lport_els_prov,
|
||||
};
|
||||
|
||||
/**
|
||||
* libfc_init() - Initialize libfc.ko
|
||||
*/
|
||||
|
@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
|
|||
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
|
||||
}
|
||||
EXPORT_SYMBOL(fc_fill_reply_hdr);
|
||||
|
||||
/**
|
||||
* fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
|
||||
* if there is service provider (target provider) registered with libfc
|
||||
* for specified "fc_ft_type"
|
||||
* @lport: Local port which service_params needs to be modified
|
||||
* @type: FC-4 type, such as FC_TYPE_FCP
|
||||
*/
|
||||
void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
|
||||
{
|
||||
struct fc4_prov *prov_entry;
|
||||
BUG_ON(type >= FC_FC4_PROV_SIZE);
|
||||
BUG_ON(!lport);
|
||||
prov_entry = fc_passive_prov[type];
|
||||
if (type == FC_TYPE_FCP) {
|
||||
if (prov_entry && prov_entry->recv)
|
||||
lport->service_params |= FCP_SPPF_TARG_FCN;
|
||||
}
|
||||
}
|
||||
|
||||
void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
list_for_each_entry(lport, &fc_local_ports, lport_list)
|
||||
notify(lport, arg);
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(fc_lport_iterate);
|
||||
|
||||
/**
|
||||
* fc_fc4_register_provider() - register FC-4 upper-level provider.
|
||||
* @type: FC-4 type, such as FC_TYPE_FCP
|
||||
* @prov: structure describing provider including ops vector.
|
||||
*
|
||||
* Returns 0 on success, negative error otherwise.
|
||||
*/
|
||||
int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
|
||||
{
|
||||
struct fc4_prov **prov_entry;
|
||||
int ret = 0;
|
||||
|
||||
if (type >= FC_FC4_PROV_SIZE)
|
||||
return -EINVAL;
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
|
||||
if (*prov_entry)
|
||||
ret = -EBUSY;
|
||||
else
|
||||
*prov_entry = prov;
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_fc4_register_provider);
|
||||
|
||||
/**
|
||||
* fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
|
||||
* @type: FC-4 type, such as FC_TYPE_FCP
|
||||
* @prov: structure describing provider including ops vector.
|
||||
*/
|
||||
void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
|
||||
{
|
||||
BUG_ON(type >= FC_FC4_PROV_SIZE);
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
if (prov->recv)
|
||||
rcu_assign_pointer(fc_passive_prov[type], NULL);
|
||||
else
|
||||
rcu_assign_pointer(fc_active_prov[type], NULL);
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
synchronize_rcu();
|
||||
}
|
||||
EXPORT_SYMBOL(fc_fc4_deregister_provider);
|
||||
|
||||
/**
|
||||
* fc_fc4_add_lport() - add new local port to list and run notifiers.
|
||||
* @lport: The new local port.
|
||||
*/
|
||||
void fc_fc4_add_lport(struct fc_lport *lport)
|
||||
{
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
list_add_tail(&lport->lport_list, &fc_local_ports);
|
||||
blocking_notifier_call_chain(&fc_lport_notifier_head,
|
||||
FC_LPORT_EV_ADD, lport);
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_fc4_del_lport() - remove local port from list and run notifiers.
|
||||
* @lport: The new local port.
|
||||
*/
|
||||
void fc_fc4_del_lport(struct fc_lport *lport)
|
||||
{
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
list_del(&lport->lport_list);
|
||||
blocking_notifier_call_chain(&fc_lport_notifier_head,
|
||||
FC_LPORT_EV_DEL, lport);
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
}
|
||||
|
|
|
@ -93,6 +93,17 @@ extern unsigned int fc_debug_logging;
|
|||
printk(KERN_INFO "host%u: scsi: " fmt, \
|
||||
(lport)->host->host_no, ##args))
|
||||
|
||||
/*
|
||||
* FC-4 Providers.
|
||||
*/
|
||||
extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
|
||||
extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
|
||||
extern struct mutex fc_prov_mutex; /* lock over table changes */
|
||||
|
||||
extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
|
||||
extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
|
||||
extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
|
||||
|
||||
/*
|
||||
* Set up direct-data placement for this I/O request
|
||||
*/
|
||||
|
@ -112,6 +123,9 @@ void fc_destroy_fcp(void);
|
|||
* Internal libfc functions
|
||||
*/
|
||||
const char *fc_els_resp_type(struct fc_frame *);
|
||||
extern void fc_fc4_add_lport(struct fc_lport *);
|
||||
extern void fc_fc4_del_lport(struct fc_lport *);
|
||||
extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
|
||||
|
||||
/*
|
||||
* Copies a buffer into an sg list
|
||||
|
|
|
@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport)
|
|||
lport->tt.fcp_abort_io(lport);
|
||||
lport->tt.disc_stop_final(lport);
|
||||
lport->tt.exch_mgr_reset(lport, 0, 0);
|
||||
fc_fc4_del_lport(lport);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_lport_destroy);
|
||||
|
@ -849,7 +850,7 @@ out:
|
|||
}
|
||||
|
||||
/**
|
||||
* fc_lport_recv_req() - The generic lport request handler
|
||||
* fc_lport_recv_els_req() - The generic lport ELS request handler
|
||||
* @lport: The local port that received the request
|
||||
* @fp: The request frame
|
||||
*
|
||||
|
@ -859,9 +860,9 @@ out:
|
|||
* Locking Note: This function should not be called with the lport
|
||||
* lock held becuase it will grab the lock.
|
||||
*/
|
||||
static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
static void fc_lport_recv_els_req(struct fc_lport *lport,
|
||||
struct fc_frame *fp)
|
||||
{
|
||||
struct fc_frame_header *fh = fc_frame_header_get(fp);
|
||||
void (*recv)(struct fc_lport *, struct fc_frame *);
|
||||
|
||||
mutex_lock(&lport->lp_mutex);
|
||||
|
@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
|
|||
*/
|
||||
if (!lport->link_up)
|
||||
fc_frame_free(fp);
|
||||
else if (fh->fh_type == FC_TYPE_ELS &&
|
||||
fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
|
||||
else {
|
||||
/*
|
||||
* Check opcode.
|
||||
*/
|
||||
|
@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
|
|||
}
|
||||
|
||||
recv(lport, fp);
|
||||
} else {
|
||||
FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
|
||||
fr_eof(fp));
|
||||
fc_frame_free(fp);
|
||||
}
|
||||
mutex_unlock(&lport->lp_mutex);
|
||||
}
|
||||
|
||||
static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
|
||||
const struct fc_els_spp *spp_in,
|
||||
struct fc_els_spp *spp_out)
|
||||
{
|
||||
return FC_SPP_RESP_INVL;
|
||||
}
|
||||
|
||||
struct fc4_prov fc_lport_els_prov = {
|
||||
.prli = fc_lport_els_prli,
|
||||
.recv = fc_lport_recv_els_req,
|
||||
};
|
||||
|
||||
/**
|
||||
* fc_lport_recv_req() - The generic lport request handler
|
||||
* @lport: The lport that received the request
|
||||
* @fp: The frame the request is in
|
||||
*
|
||||
* Locking Note: This function should not be called with the lport
|
||||
* lock held becuase it may grab the lock.
|
||||
*/
|
||||
static void fc_lport_recv_req(struct fc_lport *lport,
|
||||
struct fc_frame *fp)
|
||||
{
|
||||
struct fc_frame_header *fh = fc_frame_header_get(fp);
|
||||
struct fc_seq *sp = fr_seq(fp);
|
||||
struct fc4_prov *prov;
|
||||
|
||||
/*
|
||||
* Use RCU read lock and module_lock to be sure module doesn't
|
||||
* deregister and get unloaded while we're calling it.
|
||||
* try_module_get() is inlined and accepts a NULL parameter.
|
||||
* Only ELSes and FCP target ops should come through here.
|
||||
* The locking is unfortunate, and a better scheme is being sought.
|
||||
*/
|
||||
|
||||
rcu_read_lock();
|
||||
if (fh->fh_type >= FC_FC4_PROV_SIZE)
|
||||
goto drop;
|
||||
prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
|
||||
if (!prov || !try_module_get(prov->module))
|
||||
goto drop;
|
||||
rcu_read_unlock();
|
||||
prov->recv(lport, fp);
|
||||
module_put(prov->module);
|
||||
return;
|
||||
drop:
|
||||
rcu_read_unlock();
|
||||
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
|
||||
fc_frame_free(fp);
|
||||
lport->tt.exch_done(sp);
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_lport_reset() - Reset a local port
|
||||
* @lport: The local port which should be reset
|
||||
|
@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
|
|||
*/
|
||||
int fc_lport_config(struct fc_lport *lport)
|
||||
{
|
||||
INIT_LIST_HEAD(&lport->ema_list);
|
||||
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
|
||||
mutex_init(&lport->lp_mutex);
|
||||
|
||||
|
@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport)
|
|||
|
||||
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
|
||||
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
|
||||
fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport)
|
|||
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
|
||||
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
|
||||
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
|
||||
fc_fc4_add_lport(lport);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
|
|||
|
||||
vn_port = libfc_host_alloc(shost->hostt, privsize);
|
||||
if (!vn_port)
|
||||
goto err_out;
|
||||
if (fc_exch_mgr_list_clone(n_port, vn_port))
|
||||
goto err_put;
|
||||
return vn_port;
|
||||
|
||||
vn_port->vport = vport;
|
||||
vport->dd_data = vn_port;
|
||||
|
@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
|
|||
mutex_unlock(&n_port->lp_mutex);
|
||||
|
||||
return vn_port;
|
||||
|
||||
err_put:
|
||||
scsi_host_put(vn_port->host);
|
||||
err_out:
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(libfc_vport_create);
|
||||
|
||||
|
@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
|
|||
|
||||
return lport;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_vport_id_lookup);
|
||||
|
||||
/*
|
||||
* When setting the link state of vports during an lport state change, it's
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
|
||||
#include "fc_libfc.h"
|
||||
|
||||
struct workqueue_struct *rport_event_queue;
|
||||
static struct workqueue_struct *rport_event_queue;
|
||||
|
||||
static void fc_rport_enter_flogi(struct fc_rport_priv *);
|
||||
static void fc_rport_enter_plogi(struct fc_rport_priv *);
|
||||
|
@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
|
|||
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
|
||||
INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
|
||||
INIT_WORK(&rdata->event_work, fc_rport_work);
|
||||
if (port_id != FC_FID_DIR_SERV)
|
||||
if (port_id != FC_FID_DIR_SERV) {
|
||||
rdata->lld_event_callback = lport->tt.rport_event_callback;
|
||||
list_add_rcu(&rdata->peers, &lport->disc.rports);
|
||||
}
|
||||
return rdata;
|
||||
}
|
||||
|
||||
|
@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work)
|
|||
struct fc_rport_operations *rport_ops;
|
||||
struct fc_rport_identifiers ids;
|
||||
struct fc_rport *rport;
|
||||
struct fc4_prov *prov;
|
||||
u8 type;
|
||||
|
||||
mutex_lock(&rdata->rp_mutex);
|
||||
event = rdata->event;
|
||||
|
@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work)
|
|||
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
|
||||
rport_ops->event_callback(lport, rdata, event);
|
||||
}
|
||||
if (rdata->lld_event_callback) {
|
||||
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
|
||||
rdata->lld_event_callback(lport, rdata, event);
|
||||
}
|
||||
kref_put(&rdata->kref, lport->tt.rport_destroy);
|
||||
break;
|
||||
|
||||
case RPORT_EV_FAILED:
|
||||
case RPORT_EV_LOGO:
|
||||
case RPORT_EV_STOP:
|
||||
if (rdata->prli_count) {
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
|
||||
prov = fc_passive_prov[type];
|
||||
if (prov && prov->prlo)
|
||||
prov->prlo(rdata);
|
||||
}
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
}
|
||||
port_id = rdata->ids.port_id;
|
||||
mutex_unlock(&rdata->rp_mutex);
|
||||
|
||||
|
@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work)
|
|||
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
|
||||
rport_ops->event_callback(lport, rdata, event);
|
||||
}
|
||||
if (rdata->lld_event_callback) {
|
||||
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
|
||||
rdata->lld_event_callback(lport, rdata, event);
|
||||
}
|
||||
cancel_delayed_work_sync(&rdata->retry_work);
|
||||
|
||||
/*
|
||||
|
@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work)
|
|||
if (port_id == FC_FID_DIR_SERV) {
|
||||
rdata->event = RPORT_EV_NONE;
|
||||
mutex_unlock(&rdata->rp_mutex);
|
||||
kref_put(&rdata->kref, lport->tt.rport_destroy);
|
||||
} else if ((rdata->flags & FC_RP_STARTED) &&
|
||||
rdata->major_retries <
|
||||
lport->max_rport_retry_count) {
|
||||
|
@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
|
|||
|
||||
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
|
||||
if (PTR_ERR(fp) == -FC_EX_CLOSED)
|
||||
return fc_rport_error(rdata, fp);
|
||||
goto out;
|
||||
|
||||
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
|
||||
FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
|
||||
|
@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
|
|||
return;
|
||||
}
|
||||
|
||||
return fc_rport_error(rdata, fp);
|
||||
out:
|
||||
fc_rport_error(rdata, fp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|||
rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
|
||||
rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
|
||||
|
||||
/* save plogi response sp_features for further reference */
|
||||
rdata->sp_features = ntohs(plp->fl_csp.sp_features);
|
||||
|
||||
if (lport->point_to_multipoint)
|
||||
fc_rport_login_complete(rdata, fp);
|
||||
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
|
||||
|
@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|||
struct fc_els_prli prli;
|
||||
struct fc_els_spp spp;
|
||||
} *pp;
|
||||
struct fc_els_spp temp_spp;
|
||||
struct fc4_prov *prov;
|
||||
u32 roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
u32 fcp_parm = 0;
|
||||
u8 op;
|
||||
|
@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|||
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
|
||||
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
|
||||
pp->spp.spp_flags);
|
||||
rdata->spp_type = pp->spp.spp_type;
|
||||
if (resp_code != FC_SPP_RESP_ACK) {
|
||||
if (resp_code == FC_SPP_RESP_CONF)
|
||||
fc_rport_error(rdata, fp);
|
||||
|
@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|||
fcp_parm = ntohl(pp->spp.spp_params);
|
||||
if (fcp_parm & FCP_SPPF_RETRY)
|
||||
rdata->flags |= FC_RP_FLAGS_RETRY;
|
||||
if (fcp_parm & FCP_SPPF_CONF_COMPL)
|
||||
rdata->flags |= FC_RP_FLAGS_CONF_REQ;
|
||||
|
||||
prov = fc_passive_prov[FC_TYPE_FCP];
|
||||
if (prov) {
|
||||
memset(&temp_spp, 0, sizeof(temp_spp));
|
||||
prov->prli(rdata, pp->prli.prli_spp_len,
|
||||
&pp->spp, &temp_spp);
|
||||
}
|
||||
|
||||
rdata->supported_classes = FC_COS_CLASS3;
|
||||
if (fcp_parm & FCP_SPPF_INIT_FCN)
|
||||
|
@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
|
|||
struct fc_els_spp spp;
|
||||
} *pp;
|
||||
struct fc_frame *fp;
|
||||
struct fc4_prov *prov;
|
||||
|
||||
/*
|
||||
* If the rport is one of the well known addresses
|
||||
|
@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
|
||||
fc_rport_prli_resp, rdata,
|
||||
2 * lport->r_a_tov))
|
||||
fc_prli_fill(lport, fp);
|
||||
|
||||
prov = fc_passive_prov[FC_TYPE_FCP];
|
||||
if (prov) {
|
||||
pp = fc_frame_payload_get(fp, sizeof(*pp));
|
||||
prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
|
||||
}
|
||||
|
||||
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
|
||||
fc_host_port_id(lport->host), FC_TYPE_ELS,
|
||||
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
|
||||
|
||||
if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
|
||||
NULL, rdata, 2 * lport->r_a_tov))
|
||||
fc_rport_error_retry(rdata, NULL);
|
||||
else
|
||||
kref_get(&rdata->kref);
|
||||
|
@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
|
|||
unsigned int len;
|
||||
unsigned int plen;
|
||||
enum fc_els_spp_resp resp;
|
||||
enum fc_els_spp_resp passive;
|
||||
struct fc_seq_els_data rjt_data;
|
||||
u32 fcp_parm;
|
||||
u32 roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
struct fc4_prov *prov;
|
||||
|
||||
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
|
||||
fc_rport_state(rdata));
|
||||
|
@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
|
|||
pp->prli.prli_len = htons(len);
|
||||
len -= sizeof(struct fc_els_prli);
|
||||
|
||||
/* reinitialize remote port roles */
|
||||
rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
|
||||
/*
|
||||
* Go through all the service parameter pages and build
|
||||
* response. If plen indicates longer SPP than standard,
|
||||
* use that. The entire response has been pre-cleared above.
|
||||
*/
|
||||
spp = &pp->spp;
|
||||
mutex_lock(&fc_prov_mutex);
|
||||
while (len >= plen) {
|
||||
rdata->spp_type = rspp->spp_type;
|
||||
spp->spp_type = rspp->spp_type;
|
||||
spp->spp_type_ext = rspp->spp_type_ext;
|
||||
spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
|
||||
resp = FC_SPP_RESP_ACK;
|
||||
resp = 0;
|
||||
|
||||
switch (rspp->spp_type) {
|
||||
case 0: /* common to all FC-4 types */
|
||||
break;
|
||||
case FC_TYPE_FCP:
|
||||
fcp_parm = ntohl(rspp->spp_params);
|
||||
if (fcp_parm & FCP_SPPF_RETRY)
|
||||
rdata->flags |= FC_RP_FLAGS_RETRY;
|
||||
rdata->supported_classes = FC_COS_CLASS3;
|
||||
if (fcp_parm & FCP_SPPF_INIT_FCN)
|
||||
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
|
||||
if (fcp_parm & FCP_SPPF_TARG_FCN)
|
||||
roles |= FC_RPORT_ROLE_FCP_TARGET;
|
||||
rdata->ids.roles = roles;
|
||||
|
||||
spp->spp_params = htonl(lport->service_params);
|
||||
break;
|
||||
default:
|
||||
resp = FC_SPP_RESP_INVL;
|
||||
break;
|
||||
if (rspp->spp_type < FC_FC4_PROV_SIZE) {
|
||||
prov = fc_active_prov[rspp->spp_type];
|
||||
if (prov)
|
||||
resp = prov->prli(rdata, plen, rspp, spp);
|
||||
prov = fc_passive_prov[rspp->spp_type];
|
||||
if (prov) {
|
||||
passive = prov->prli(rdata, plen, rspp, spp);
|
||||
if (!resp || passive == FC_SPP_RESP_ACK)
|
||||
resp = passive;
|
||||
}
|
||||
}
|
||||
if (!resp) {
|
||||
if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
|
||||
resp |= FC_SPP_RESP_CONF;
|
||||
else
|
||||
resp |= FC_SPP_RESP_INVL;
|
||||
}
|
||||
spp->spp_flags |= resp;
|
||||
len -= plen;
|
||||
rspp = (struct fc_els_spp *)((char *)rspp + plen);
|
||||
spp = (struct fc_els_spp *)((char *)spp + plen);
|
||||
}
|
||||
mutex_unlock(&fc_prov_mutex);
|
||||
|
||||
/*
|
||||
* Send LS_ACC. If this fails, the originator should retry.
|
||||
|
@ -1886,10 +1932,83 @@ int fc_rport_init(struct fc_lport *lport)
|
|||
}
|
||||
EXPORT_SYMBOL(fc_rport_init);
|
||||
|
||||
/**
|
||||
* fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
|
||||
* @rdata: remote port private
|
||||
* @spp_len: service parameter page length
|
||||
* @rspp: received service parameter page
|
||||
* @spp: response service parameter page
|
||||
*
|
||||
* Returns the value for the response code to be placed in spp_flags;
|
||||
* Returns 0 if not an initiator.
|
||||
*/
|
||||
static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
|
||||
const struct fc_els_spp *rspp,
|
||||
struct fc_els_spp *spp)
|
||||
{
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
u32 fcp_parm;
|
||||
|
||||
fcp_parm = ntohl(rspp->spp_params);
|
||||
rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
if (fcp_parm & FCP_SPPF_INIT_FCN)
|
||||
rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
|
||||
if (fcp_parm & FCP_SPPF_TARG_FCN)
|
||||
rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
|
||||
if (fcp_parm & FCP_SPPF_RETRY)
|
||||
rdata->flags |= FC_RP_FLAGS_RETRY;
|
||||
rdata->supported_classes = FC_COS_CLASS3;
|
||||
|
||||
if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
|
||||
return 0;
|
||||
|
||||
spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
|
||||
|
||||
/*
|
||||
* OR in our service parameters with other providers (target), if any.
|
||||
*/
|
||||
fcp_parm = ntohl(spp->spp_params);
|
||||
spp->spp_params = htonl(fcp_parm | lport->service_params);
|
||||
return FC_SPP_RESP_ACK;
|
||||
}
|
||||
|
||||
/*
|
||||
* FC-4 provider ops for FCP initiator.
|
||||
*/
|
||||
struct fc4_prov fc_rport_fcp_init = {
|
||||
.prli = fc_rport_fcp_prli,
|
||||
};
|
||||
|
||||
/**
|
||||
* fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
|
||||
* @rdata: remote port private
|
||||
* @spp_len: service parameter page length
|
||||
* @rspp: received service parameter page
|
||||
* @spp: response service parameter page
|
||||
*/
|
||||
static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
|
||||
const struct fc_els_spp *rspp,
|
||||
struct fc_els_spp *spp)
|
||||
{
|
||||
if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
|
||||
return FC_SPP_RESP_INVL;
|
||||
return FC_SPP_RESP_ACK;
|
||||
}
|
||||
|
||||
/*
|
||||
* FC-4 provider ops for type 0 service parameters.
|
||||
*
|
||||
* This handles the special case of type 0 which is always successful
|
||||
* but doesn't do anything otherwise.
|
||||
*/
|
||||
struct fc4_prov fc_rport_t0_prov = {
|
||||
.prli = fc_rport_t0_prli,
|
||||
};
|
||||
|
||||
/**
|
||||
* fc_setup_rport() - Initialize the rport_event_queue
|
||||
*/
|
||||
int fc_setup_rport()
|
||||
int fc_setup_rport(void)
|
||||
{
|
||||
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
|
||||
if (!rport_event_queue)
|
||||
|
@ -1900,7 +2019,7 @@ int fc_setup_rport()
|
|||
/**
|
||||
* fc_destroy_rport() - Destroy the rport_event_queue
|
||||
*/
|
||||
void fc_destroy_rport()
|
||||
void fc_destroy_rport(void)
|
||||
{
|
||||
destroy_workqueue(rport_event_queue);
|
||||
}
|
||||
|
|
|
@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_session_get_param);
|
||||
|
||||
int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
|
||||
enum iscsi_param param, char *buf)
|
||||
{
|
||||
struct sockaddr_in6 *sin6 = NULL;
|
||||
struct sockaddr_in *sin = NULL;
|
||||
int len;
|
||||
|
||||
switch (addr->ss_family) {
|
||||
case AF_INET:
|
||||
sin = (struct sockaddr_in *)addr;
|
||||
break;
|
||||
case AF_INET6:
|
||||
sin6 = (struct sockaddr_in6 *)addr;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_PARAM_CONN_ADDRESS:
|
||||
case ISCSI_HOST_PARAM_IPADDRESS:
|
||||
if (sin)
|
||||
len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
|
||||
else
|
||||
len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
|
||||
break;
|
||||
case ISCSI_PARAM_CONN_PORT:
|
||||
if (sin)
|
||||
len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
|
||||
else
|
||||
len = sprintf(buf, "%hu\n",
|
||||
be16_to_cpu(sin6->sin6_port));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
|
||||
|
||||
int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
||||
enum iscsi_param param, char *buf)
|
||||
{
|
||||
|
@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
|
|||
case ISCSI_HOST_PARAM_INITIATOR_NAME:
|
||||
len = sprintf(buf, "%s\n", ihost->initiatorname);
|
||||
break;
|
||||
case ISCSI_HOST_PARAM_IPADDRESS:
|
||||
len = sprintf(buf, "%s\n", ihost->local_address);
|
||||
break;
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP
|
|||
Allows sas hosts to receive SMP frames. Selecting this
|
||||
option builds an SMP interpreter into libsas. Say
|
||||
N here if you want to save the few kb this consumes.
|
||||
|
||||
config SCSI_SAS_LIBSAS_DEBUG
|
||||
bool "Compile the SAS Domain Transport Attributes in debug mode"
|
||||
default y
|
||||
depends on SCSI_SAS_LIBSAS
|
||||
help
|
||||
Compiles the SAS Layer in debug mode. In debug mode, the
|
||||
SAS Layer prints diagnostic and debug messages.
|
||||
|
|
|
@ -21,10 +21,6 @@
|
|||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
|
||||
ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
|
||||
EXTRA_CFLAGS += -DSAS_DEBUG
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
|
||||
libsas-y += sas_init.o \
|
||||
sas_phy.o \
|
||||
|
|
|
@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
|
|||
case SAS_SG_ERR:
|
||||
return AC_ERR_INVALID;
|
||||
|
||||
case SAM_STAT_CHECK_CONDITION:
|
||||
case SAS_OPEN_TO:
|
||||
case SAS_OPEN_REJECT:
|
||||
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
|
||||
__func__, ts->stat);
|
||||
return AC_ERR_OTHER;
|
||||
|
||||
case SAM_STAT_CHECK_CONDITION:
|
||||
case SAS_ABORTED_TASK:
|
||||
return AC_ERR_DEV;
|
||||
|
||||
|
@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task)
|
|||
sas_ha = dev->port->ha;
|
||||
|
||||
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
|
||||
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
|
||||
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
|
||||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
|
||||
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
|
||||
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
|
||||
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
|
||||
dev->sata_dev.sstatus = resp->sstatus;
|
||||
dev->sata_dev.serror = resp->serror;
|
||||
dev->sata_dev.scontrol = resp->scontrol;
|
||||
} else if (stat->stat != SAM_STAT_GOOD) {
|
||||
} else {
|
||||
ac = sas_to_ata_err(stat);
|
||||
if (ac) {
|
||||
SAS_DPRINTK("%s: SAS error %x\n", __func__,
|
||||
|
@ -305,55 +307,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
|
|||
}
|
||||
}
|
||||
|
||||
static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
|
||||
u32 val)
|
||||
{
|
||||
struct domain_device *dev = link->ap->private_data;
|
||||
|
||||
SAS_DPRINTK("STUB %s\n", __func__);
|
||||
switch (sc_reg_in) {
|
||||
case SCR_STATUS:
|
||||
dev->sata_dev.sstatus = val;
|
||||
break;
|
||||
case SCR_CONTROL:
|
||||
dev->sata_dev.scontrol = val;
|
||||
break;
|
||||
case SCR_ERROR:
|
||||
dev->sata_dev.serror = val;
|
||||
break;
|
||||
case SCR_ACTIVE:
|
||||
dev->sata_dev.ap->link.sactive = val;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
|
||||
u32 *val)
|
||||
{
|
||||
struct domain_device *dev = link->ap->private_data;
|
||||
|
||||
SAS_DPRINTK("STUB %s\n", __func__);
|
||||
switch (sc_reg_in) {
|
||||
case SCR_STATUS:
|
||||
*val = dev->sata_dev.sstatus;
|
||||
return 0;
|
||||
case SCR_CONTROL:
|
||||
*val = dev->sata_dev.scontrol;
|
||||
return 0;
|
||||
case SCR_ERROR:
|
||||
*val = dev->sata_dev.serror;
|
||||
return 0;
|
||||
case SCR_ACTIVE:
|
||||
*val = dev->sata_dev.ap->link.sactive;
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct ata_port_operations sas_sata_ops = {
|
||||
.prereset = ata_std_prereset,
|
||||
.softreset = NULL,
|
||||
|
@ -367,8 +320,6 @@ static struct ata_port_operations sas_sata_ops = {
|
|||
.qc_fill_rtf = sas_ata_qc_fill_rtf,
|
||||
.port_start = ata_sas_port_start,
|
||||
.port_stop = ata_sas_port_stop,
|
||||
.scr_read = sas_ata_scr_read,
|
||||
.scr_write = sas_ata_scr_write
|
||||
};
|
||||
|
||||
static struct ata_port_info sata_port_info = {
|
||||
|
@ -801,7 +752,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
|
|||
|
||||
if (!dev_is_sata(ddev))
|
||||
continue;
|
||||
|
||||
|
||||
ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
|
||||
ata_scsi_port_error_handler(shost, ap);
|
||||
}
|
||||
|
@ -834,13 +785,13 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
|
|||
LIST_HEAD(sata_q);
|
||||
|
||||
ap = NULL;
|
||||
|
||||
|
||||
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
|
||||
struct domain_device *ddev = cmd_to_domain_dev(cmd);
|
||||
|
||||
if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
|
||||
continue;
|
||||
if(ap && ap != ddev->sata_dev.ap)
|
||||
if (ap && ap != ddev->sata_dev.ap)
|
||||
continue;
|
||||
ap = ddev->sata_dev.ap;
|
||||
rtn = 1;
|
||||
|
@ -848,8 +799,21 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
|
|||
}
|
||||
|
||||
if (!list_empty(&sata_q)) {
|
||||
ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
|
||||
ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
|
||||
ata_scsi_cmd_error_handler(shost, ap, &sata_q);
|
||||
/*
|
||||
* ata's error handler may leave the cmd on the list
|
||||
* so make sure they don't remain on a stack list
|
||||
* about to go out of scope.
|
||||
*
|
||||
* This looks strange, since the commands are
|
||||
* now part of no list, but the next error
|
||||
* action will be ata_port_error_handler()
|
||||
* which takes no list and sweeps them up
|
||||
* anyway from the ata tag array.
|
||||
*/
|
||||
while (!list_empty(&sata_q))
|
||||
list_del_init(sata_q.next);
|
||||
}
|
||||
} while (ap);
|
||||
|
||||
|
|
|
@ -24,8 +24,6 @@
|
|||
|
||||
#include "sas_dump.h"
|
||||
|
||||
#ifdef SAS_DEBUG
|
||||
|
||||
static const char *sas_hae_str[] = {
|
||||
[0] = "HAE_RESET",
|
||||
};
|
||||
|
@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port)
|
|||
SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
|
||||
SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
|
||||
}
|
||||
|
||||
#endif /* SAS_DEBUG */
|
||||
|
|
|
@ -24,19 +24,7 @@
|
|||
|
||||
#include "sas_internal.h"
|
||||
|
||||
#ifdef SAS_DEBUG
|
||||
|
||||
void sas_dprint_porte(int phyid, enum port_event pe);
|
||||
void sas_dprint_phye(int phyid, enum phy_event pe);
|
||||
void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
|
||||
void sas_dump_port(struct asd_sas_port *port);
|
||||
|
||||
#else /* SAS_DEBUG */
|
||||
|
||||
static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
|
||||
static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
|
||||
static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
|
||||
enum ha_event he) { }
|
||||
static inline void sas_dump_port(struct asd_sas_port *port) { }
|
||||
|
||||
#endif /* SAS_DEBUG */
|
||||
|
|
|
@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
|
|||
* dev to host FIS as described in section G.5 of
|
||||
* sas-2 r 04b */
|
||||
dr = &((struct smp_resp *)disc_resp)->disc;
|
||||
if (memcmp(dev->sas_addr, dr->attached_sas_addr,
|
||||
SAS_ADDR_SIZE) == 0) {
|
||||
sas_printk("Found loopback topology, just ignore it!\n");
|
||||
return 0;
|
||||
}
|
||||
if (!(dr->attached_dev_type == 0 &&
|
||||
dr->attached_sata_dev))
|
||||
break;
|
||||
|
|
|
@ -33,11 +33,7 @@
|
|||
|
||||
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
|
||||
|
||||
#ifdef SAS_DEBUG
|
||||
#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
|
||||
#else
|
||||
#define SAS_DPRINTK(fmt, ...)
|
||||
#endif
|
||||
#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
|
||||
|
||||
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
|
||||
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
|
||||
|
|
|
@ -681,11 +681,10 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
|
|||
{
|
||||
struct sas_task *task = TO_SAS_TASK(cmd);
|
||||
unsigned long flags;
|
||||
enum blk_eh_timer_return rtn;
|
||||
enum blk_eh_timer_return rtn;
|
||||
|
||||
if (sas_ata_timed_out(cmd, task, &rtn))
|
||||
return rtn;
|
||||
|
||||
|
||||
if (!task) {
|
||||
cmd->request->timeout /= 2;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
|
||||
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||
|
@ -325,6 +325,7 @@ struct lpfc_vport {
|
|||
#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
|
||||
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
|
||||
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
|
||||
#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
|
||||
|
||||
uint32_t ct_flags;
|
||||
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
|
||||
|
@ -348,6 +349,8 @@ struct lpfc_vport {
|
|||
|
||||
uint32_t fc_myDID; /* fibre channel S_ID */
|
||||
uint32_t fc_prevDID; /* previous fibre channel S_ID */
|
||||
struct lpfc_name fabric_portname;
|
||||
struct lpfc_name fabric_nodename;
|
||||
|
||||
int32_t stopped; /* HBA has not been restarted since last ERATT */
|
||||
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
|
||||
|
@ -372,6 +375,7 @@ struct lpfc_vport {
|
|||
#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
|
||||
#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
|
||||
#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
|
||||
#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
|
||||
|
||||
#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
|
||||
#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
|
||||
|
@ -382,6 +386,7 @@ struct lpfc_vport {
|
|||
|
||||
struct timer_list fc_fdmitmo;
|
||||
struct timer_list els_tmofunc;
|
||||
struct timer_list delayed_disc_tmo;
|
||||
|
||||
int unreg_vpi_cmpl;
|
||||
|
||||
|
@ -548,6 +553,8 @@ struct lpfc_hba {
|
|||
#define LPFC_SLI3_CRP_ENABLED 0x08
|
||||
#define LPFC_SLI3_BG_ENABLED 0x20
|
||||
#define LPFC_SLI3_DSS_ENABLED 0x40
|
||||
#define LPFC_SLI4_PERFH_ENABLED 0x80
|
||||
#define LPFC_SLI4_PHWQ_ENABLED 0x100
|
||||
uint32_t iocb_cmd_size;
|
||||
uint32_t iocb_rsp_size;
|
||||
|
||||
|
@ -655,7 +662,7 @@ struct lpfc_hba {
|
|||
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
|
||||
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
|
||||
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
|
||||
|
||||
uint32_t cfg_enable_dss;
|
||||
lpfc_vpd_t vpd; /* vital product data */
|
||||
|
||||
struct pci_dev *pcidev;
|
||||
|
@ -792,6 +799,10 @@ struct lpfc_hba {
|
|||
struct dentry *debug_slow_ring_trc;
|
||||
struct lpfc_debugfs_trc *slow_ring_trc;
|
||||
atomic_t slow_ring_trc_cnt;
|
||||
/* iDiag debugfs sub-directory */
|
||||
struct dentry *idiag_root;
|
||||
struct dentry *idiag_pci_cfg;
|
||||
struct dentry *idiag_que_info;
|
||||
#endif
|
||||
|
||||
/* Used for deferred freeing of ELS data buffers */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
|
||||
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||
|
@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
|
|||
int status = 0;
|
||||
int cnt = 0;
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
init_completion(&online_compl);
|
||||
lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
rc = lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
LPFC_EVT_OFFLINE_PREP);
|
||||
if (rc == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
wait_for_completion(&online_compl);
|
||||
|
||||
if (status != 0)
|
||||
|
@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
|
|||
}
|
||||
|
||||
init_completion(&online_compl);
|
||||
lpfc_workq_post_event(phba, &status, &online_compl, type);
|
||||
rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
|
||||
if (rc == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
wait_for_completion(&online_compl);
|
||||
|
||||
if (status != 0)
|
||||
|
@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
|
|||
*
|
||||
* Notes:
|
||||
* Assumes any error from lpfc_do_offline() will be negative.
|
||||
* Do not make this function static.
|
||||
*
|
||||
* Returns:
|
||||
* lpfc_do_offline() return code if not zero
|
||||
|
@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
|
|||
{
|
||||
struct completion online_compl;
|
||||
int status = 0;
|
||||
int rc;
|
||||
|
||||
if (!phba->cfg_enable_hba_reset)
|
||||
return -EIO;
|
||||
|
@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba)
|
|||
return status;
|
||||
|
||||
init_completion(&online_compl);
|
||||
lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
rc = lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
LPFC_EVT_ONLINE);
|
||||
if (rc == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
wait_for_completion(&online_compl);
|
||||
|
||||
if (status != 0)
|
||||
|
@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
|
|||
struct lpfc_hba *phba = vport->phba;
|
||||
struct completion online_compl;
|
||||
int status=0;
|
||||
int rc;
|
||||
|
||||
if (!phba->cfg_enable_hba_reset)
|
||||
return -EACCES;
|
||||
init_completion(&online_compl);
|
||||
|
||||
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
|
||||
lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
rc = lpfc_workq_post_event(phba, &status, &online_compl,
|
||||
LPFC_EVT_ONLINE);
|
||||
if (rc == 0)
|
||||
return -ENOMEM;
|
||||
wait_for_completion(&online_compl);
|
||||
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
|
||||
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
||||
|
@ -1278,6 +1293,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
|
|||
return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_dss_show - Return the current state of dss and the configured state
|
||||
* @dev: class converted to a Scsi_host structure.
|
||||
* @attr: device attribute, not used.
|
||||
* @buf: on return contains the formatted text.
|
||||
*
|
||||
* Returns: size of formatted string.
|
||||
**/
|
||||
static ssize_t
|
||||
lpfc_dss_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
|
||||
(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
|
||||
(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
|
||||
"" : "Not ");
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_param_show - Return a cfg attribute value in decimal
|
||||
*
|
||||
|
@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
|
|||
|
||||
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_param_init(name, defval, minval, maxval)
|
||||
|
||||
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_param_show(name)\
|
||||
lpfc_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
|
|||
|
||||
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_param_show(name)\
|
||||
lpfc_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
|
|||
|
||||
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_param_hex_show(name)\
|
||||
lpfc_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
|
|||
|
||||
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_param_hex_show(name)\
|
||||
lpfc_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
|
|||
|
||||
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_vport_param_init(name, defval, minval, maxval)
|
||||
|
||||
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_vport_param_show(name)\
|
||||
lpfc_vport_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
|
|||
|
||||
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_vport_param_show(name)\
|
||||
lpfc_vport_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
|
|||
|
||||
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_vport_param_hex_show(name)\
|
||||
lpfc_vport_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
|
|||
|
||||
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
|
||||
static uint lpfc_##name = defval;\
|
||||
module_param(lpfc_##name, uint, 0);\
|
||||
module_param(lpfc_##name, uint, S_IRUGO);\
|
||||
MODULE_PARM_DESC(lpfc_##name, desc);\
|
||||
lpfc_vport_param_hex_show(name)\
|
||||
lpfc_vport_param_init(name, defval, minval, maxval)\
|
||||
|
@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
|
|||
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
|
||||
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
|
||||
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
|
||||
|
||||
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
|
||||
|
||||
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
|
||||
|
||||
|
@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
|
|||
int stat1=0, stat2=0;
|
||||
unsigned int i, j, cnt=count;
|
||||
u8 wwpn[8];
|
||||
int rc;
|
||||
|
||||
if (!phba->cfg_enable_hba_reset)
|
||||
return -EACCES;
|
||||
|
@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
|
|||
"0463 lpfc_soft_wwpn attribute set failed to "
|
||||
"reinit adapter - %d\n", stat1);
|
||||
init_completion(&online_compl);
|
||||
lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
|
||||
rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
|
||||
LPFC_EVT_ONLINE);
|
||||
if (rc == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
wait_for_completion(&online_compl);
|
||||
if (stat2)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
|
@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
|
|||
|
||||
|
||||
static int lpfc_poll = 0;
|
||||
module_param(lpfc_poll, int, 0);
|
||||
module_param(lpfc_poll, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
|
||||
" 0 - none,"
|
||||
" 1 - poll with interrupts enabled"
|
||||
|
@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
|
|||
lpfc_poll_show, lpfc_poll_store);
|
||||
|
||||
int lpfc_sli_mode = 0;
|
||||
module_param(lpfc_sli_mode, int, 0);
|
||||
module_param(lpfc_sli_mode, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
|
||||
" 0 - auto (SLI-3 if supported),"
|
||||
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
|
||||
" 3 - select SLI-3");
|
||||
|
||||
int lpfc_enable_npiv = 1;
|
||||
module_param(lpfc_enable_npiv, int, 0);
|
||||
module_param(lpfc_enable_npiv, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
|
||||
lpfc_param_show(enable_npiv);
|
||||
lpfc_param_init(enable_npiv, 1, 0, 1);
|
||||
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
|
||||
|
||||
int lpfc_enable_rrq;
|
||||
module_param(lpfc_enable_rrq, int, 0);
|
||||
module_param(lpfc_enable_rrq, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
|
||||
lpfc_param_show(enable_rrq);
|
||||
lpfc_param_init(enable_rrq, 0, 0, 1);
|
||||
|
@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
|
|||
lpfc_txcmplq_hw_show, NULL);
|
||||
|
||||
int lpfc_iocb_cnt = 2;
|
||||
module_param(lpfc_iocb_cnt, int, 1);
|
||||
module_param(lpfc_iocb_cnt, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_iocb_cnt,
|
||||
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
|
||||
lpfc_param_show(iocb_cnt);
|
||||
|
@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
|
|||
# disappear until the timer expires. Value range is [0,255]. Default
|
||||
# value is 30.
|
||||
*/
|
||||
module_param(lpfc_devloss_tmo, int, 0);
|
||||
module_param(lpfc_devloss_tmo, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_devloss_tmo,
|
||||
"Seconds driver will hold I/O waiting "
|
||||
"for a device to come back");
|
||||
|
@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
|
|||
# Default value of this parameter is 1.
|
||||
*/
|
||||
static int lpfc_restrict_login = 1;
|
||||
module_param(lpfc_restrict_login, int, 0);
|
||||
module_param(lpfc_restrict_login, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_restrict_login,
|
||||
"Restrict virtual ports login to remote initiators.");
|
||||
lpfc_vport_param_show(restrict_login);
|
||||
|
@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
|
|||
return -EINVAL;
|
||||
}
|
||||
static int lpfc_topology = 0;
|
||||
module_param(lpfc_topology, int, 0);
|
||||
module_param(lpfc_topology, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
|
||||
lpfc_param_show(topology)
|
||||
lpfc_param_init(topology, 0, 0, 6)
|
||||
|
@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
|
||||
static int lpfc_link_speed = 0;
|
||||
module_param(lpfc_link_speed, int, 0);
|
||||
module_param(lpfc_link_speed, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
|
||||
lpfc_param_show(link_speed)
|
||||
|
||||
|
@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
|
||||
static int lpfc_aer_support = 1;
|
||||
module_param(lpfc_aer_support, int, 1);
|
||||
module_param(lpfc_aer_support, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
|
||||
lpfc_param_show(aer_support)
|
||||
|
||||
|
@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
|
|||
# The value is set in milliseconds.
|
||||
*/
|
||||
static int lpfc_max_scsicmpl_time;
|
||||
module_param(lpfc_max_scsicmpl_time, int, 0);
|
||||
module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
|
||||
"Use command completion time to control queue depth");
|
||||
lpfc_vport_param_show(max_scsicmpl_time);
|
||||
|
@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
|
|||
*/
|
||||
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
|
||||
|
||||
module_param(lpfc_prot_mask, uint, 0);
|
||||
module_param(lpfc_prot_mask, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
|
||||
|
||||
/*
|
||||
|
@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
|
|||
#
|
||||
*/
|
||||
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
|
||||
module_param(lpfc_prot_guard, byte, 0);
|
||||
module_param(lpfc_prot_guard, byte, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
|
||||
|
||||
/*
|
||||
* Delay initial NPort discovery when Clean Address bit is cleared in
|
||||
* FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
|
||||
* This parameter can have value 0 or 1.
|
||||
* When this parameter is set to 0, no delay is added to the initial
|
||||
* discovery.
|
||||
* When this parameter is set to non-zero value, initial Nport discovery is
|
||||
* delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
|
||||
* accept and FCID/Fabric name/Fabric portname is changed.
|
||||
* Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
|
||||
* when Clean Address bit is cleared in FLOGI/FDISC
|
||||
* accept and FCID/Fabric name/Fabric portname is changed.
|
||||
* Default value is 0.
|
||||
*/
|
||||
int lpfc_delay_discovery;
|
||||
module_param(lpfc_delay_discovery, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(lpfc_delay_discovery,
|
||||
"Delay NPort discovery when Clean Address bit is cleared. "
|
||||
"Allowed values: 0,1.");
|
||||
|
||||
/*
|
||||
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
|
||||
|
@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
|||
&dev_attr_txcmplq_hw,
|
||||
&dev_attr_lpfc_fips_level,
|
||||
&dev_attr_lpfc_fips_rev,
|
||||
&dev_attr_lpfc_dss,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
|||
lpfc_aer_support_init(phba, lpfc_aer_support);
|
||||
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
|
||||
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
|
||||
phba->cfg_enable_dss = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
|
|||
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
|
||||
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
|
||||
void lpfc_supported_pages(struct lpfcMboxq *);
|
||||
void lpfc_sli4_params(struct lpfcMboxq *);
|
||||
void lpfc_pc_sli4_params(struct lpfcMboxq *);
|
||||
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
|
||||
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
|
||||
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
|
||||
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
|
||||
|
@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
|
|||
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
|
||||
void lpfc_fdmi_tmo(unsigned long);
|
||||
void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
|
||||
void lpfc_delayed_disc_tmo(unsigned long);
|
||||
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
|
||||
|
||||
int lpfc_config_port_prep(struct lpfc_hba *);
|
||||
int lpfc_config_port_post(struct lpfc_hba *);
|
||||
|
@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions;
|
|||
extern struct fc_function_template lpfc_vport_transport_functions;
|
||||
extern int lpfc_sli_mode;
|
||||
extern int lpfc_enable_npiv;
|
||||
extern int lpfc_delay_discovery;
|
||||
|
||||
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
|
||||
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
|
||||
|
@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
|
|||
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
|
||||
uint16_t, uint16_t, uint16_t);
|
||||
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
|
||||
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
|
||||
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
|
||||
uint32_t);
|
||||
|
|
|
@ -1738,6 +1738,55 @@ fdmi_cmd_exit:
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
|
||||
* @ptr - Context object of the timer.
|
||||
*
|
||||
* This function set the WORKER_DELAYED_DISC_TMO flag and wake up
|
||||
* the worker thread.
|
||||
**/
|
||||
void
|
||||
lpfc_delayed_disc_tmo(unsigned long ptr)
|
||||
{
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
uint32_t tmo_posted;
|
||||
unsigned long iflag;
|
||||
|
||||
spin_lock_irqsave(&vport->work_port_lock, iflag);
|
||||
tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
|
||||
if (!tmo_posted)
|
||||
vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
|
||||
spin_unlock_irqrestore(&vport->work_port_lock, iflag);
|
||||
|
||||
if (!tmo_posted)
|
||||
lpfc_worker_wake_up(phba);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_delayed_disc_timeout_handler - Function called by worker thread to
|
||||
* handle delayed discovery.
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
*
|
||||
* This function start nport discovery of the vport.
|
||||
**/
|
||||
void
|
||||
lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
|
||||
{
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
if (!(vport->fc_flag & FC_DISC_DELAYED)) {
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
return;
|
||||
}
|
||||
vport->fc_flag &= ~FC_DISC_DELAYED;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
lpfc_do_scr_ns_plogi(vport->phba, vport);
|
||||
}
|
||||
|
||||
void
|
||||
lpfc_fdmi_tmo(unsigned long ptr)
|
||||
{
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2007 Emulex. All rights reserved. *
|
||||
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
|
@ -22,6 +22,44 @@
|
|||
#define _H_LPFC_DEBUG_FS
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
|
||||
/* size of output line, for discovery_trace and slow_ring_trace */
|
||||
#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
|
||||
|
||||
/* nodelist output buffer size */
|
||||
#define LPFC_NODELIST_SIZE 8192
|
||||
#define LPFC_NODELIST_ENTRY_SIZE 120
|
||||
|
||||
/* dumpHBASlim output buffer size */
|
||||
#define LPFC_DUMPHBASLIM_SIZE 4096
|
||||
|
||||
/* dumpHostSlim output buffer size */
|
||||
#define LPFC_DUMPHOSTSLIM_SIZE 4096
|
||||
|
||||
/* hbqinfo output buffer size */
|
||||
#define LPFC_HBQINFO_SIZE 8192
|
||||
|
||||
/* rdPciConf output buffer size */
|
||||
#define LPFC_PCI_CFG_SIZE 4096
|
||||
#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
|
||||
#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
|
||||
|
||||
/* queue info output buffer size */
|
||||
#define LPFC_QUE_INFO_GET_BUF_SIZE 2048
|
||||
|
||||
#define SIZE_U8 sizeof(uint8_t)
|
||||
#define SIZE_U16 sizeof(uint16_t)
|
||||
#define SIZE_U32 sizeof(uint32_t)
|
||||
|
||||
struct lpfc_debug {
|
||||
char *i_private;
|
||||
char op;
|
||||
#define LPFC_IDIAG_OP_RD 1
|
||||
#define LPFC_IDIAG_OP_WR 2
|
||||
char *buffer;
|
||||
int len;
|
||||
};
|
||||
|
||||
struct lpfc_debugfs_trc {
|
||||
char *fmt;
|
||||
uint32_t data1;
|
||||
|
@ -30,6 +68,26 @@ struct lpfc_debugfs_trc {
|
|||
uint32_t seq_cnt;
|
||||
unsigned long jif;
|
||||
};
|
||||
|
||||
struct lpfc_idiag_offset {
|
||||
uint32_t last_rd;
|
||||
};
|
||||
|
||||
#define LPFC_IDIAG_CMD_DATA_SIZE 4
|
||||
struct lpfc_idiag_cmd {
|
||||
uint32_t opcode;
|
||||
#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
|
||||
#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
|
||||
#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
|
||||
#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
|
||||
uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
|
||||
};
|
||||
|
||||
struct lpfc_idiag {
|
||||
uint32_t active;
|
||||
struct lpfc_idiag_cmd cmd;
|
||||
struct lpfc_idiag_offset offset;
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Mask for discovery_trace */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
|
||||
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||
|
@ -484,6 +484,59 @@ fail:
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
* @sp: pointer to service parameter data structure.
|
||||
*
|
||||
* This routine is called from FLOGI/FDISC completion handler functions.
|
||||
* lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
|
||||
* node nodename is changed in the completion service parameter else return
|
||||
* 0. This function also set flag in the vport data structure to delay
|
||||
* NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
|
||||
* in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
|
||||
* node nodename is changed in the completion service parameter.
|
||||
*
|
||||
* Return code
|
||||
* 0 - FCID and Fabric Nodename and Fabric portname is not changed.
|
||||
* 1 - FCID or Fabric Nodename or Fabric portname is changed.
|
||||
*
|
||||
**/
|
||||
static uint8_t
|
||||
lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
|
||||
struct serv_parm *sp)
|
||||
{
|
||||
uint8_t fabric_param_changed = 0;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
if ((vport->fc_prevDID != vport->fc_myDID) ||
|
||||
memcmp(&vport->fabric_portname, &sp->portName,
|
||||
sizeof(struct lpfc_name)) ||
|
||||
memcmp(&vport->fabric_nodename, &sp->nodeName,
|
||||
sizeof(struct lpfc_name)))
|
||||
fabric_param_changed = 1;
|
||||
|
||||
/*
|
||||
* Word 1 Bit 31 in common service parameter is overloaded.
|
||||
* Word 1 Bit 31 in FLOGI request is multiple NPort request
|
||||
* Word 1 Bit 31 in FLOGI response is clean address bit
|
||||
*
|
||||
* If fabric parameter is changed and clean address bit is
|
||||
* cleared delay nport discovery if
|
||||
* - vport->fc_prevDID != 0 (not initial discovery) OR
|
||||
* - lpfc_delay_discovery module parameter is set.
|
||||
*/
|
||||
if (fabric_param_changed && !sp->cmn.clean_address_bit &&
|
||||
(vport->fc_prevDID || lpfc_delay_discovery)) {
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_DISC_DELAYED;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
|
||||
return fabric_param_changed;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
|
@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nodelist *np;
|
||||
struct lpfc_nodelist *next_np;
|
||||
uint8_t fabric_param_changed;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_FABRIC;
|
||||
|
@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
ndlp->nlp_class_sup |= FC_COS_CLASS4;
|
||||
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
|
||||
sp->cmn.bbRcvSizeLsb;
|
||||
|
||||
fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
|
||||
memcpy(&vport->fabric_portname, &sp->portName,
|
||||
sizeof(struct lpfc_name));
|
||||
memcpy(&vport->fabric_nodename, &sp->nodeName,
|
||||
sizeof(struct lpfc_name));
|
||||
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
|
||||
|
||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||
|
@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
}
|
||||
}
|
||||
|
||||
if ((vport->fc_prevDID != vport->fc_myDID) &&
|
||||
if (fabric_param_changed &&
|
||||
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
|
||||
|
||||
/* If our NportID changed, we need to ensure all
|
||||
|
@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
IOCB_t *irsp;
|
||||
struct lpfc_sli *psli;
|
||||
struct lpfcMboxq *mbox;
|
||||
|
||||
psli = &phba->sli;
|
||||
/* we pass cmdiocb to state machine which needs rspiocb as well */
|
||||
|
@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
NLP_EVT_CMPL_LOGO);
|
||||
out:
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
|
||||
if ((vport->fc_flag & FC_PT2PT) &&
|
||||
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
|
||||
phba->pport->fc_myDID = 0;
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
lpfc_config_link(phba, mbox);
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->vport = vport;
|
||||
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
|
||||
MBX_NOT_FINISHED) {
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
|
|||
}
|
||||
break;
|
||||
case ELS_CMD_FDISC:
|
||||
lpfc_issue_els_fdisc(vport, ndlp, retry);
|
||||
if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
|
||||
lpfc_issue_els_fdisc(vport, ndlp, retry);
|
||||
break;
|
||||
}
|
||||
return;
|
||||
|
@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
|
||||
switch (irsp->ulpStatus) {
|
||||
case IOSTAT_FCP_RSP_ERROR:
|
||||
case IOSTAT_REMOTE_STOP:
|
||||
break;
|
||||
|
||||
case IOSTAT_REMOTE_STOP:
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
/* This IO was aborted by the target, we don't
|
||||
* know the rxid and because we did not send the
|
||||
* ABTS we cannot generate and RRQ.
|
||||
*/
|
||||
lpfc_set_rrq_active(phba, ndlp,
|
||||
cmdiocb->sli4_xritag, 0, 0);
|
||||
}
|
||||
break;
|
||||
case IOSTAT_LOCAL_REJECT:
|
||||
switch ((irsp->un.ulpWord[4] & 0xff)) {
|
||||
case IOERR_LOOP_OPEN_FAILURE:
|
||||
|
@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
|
|||
uint8_t *pcmd;
|
||||
struct RRQ *rrq;
|
||||
uint16_t rxid;
|
||||
uint16_t xri;
|
||||
struct lpfc_node_rrq *prrq;
|
||||
|
||||
|
||||
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
|
||||
pcmd += sizeof(uint32_t);
|
||||
rrq = (struct RRQ *)pcmd;
|
||||
rxid = bf_get(rrq_oxid, rrq);
|
||||
rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
|
||||
rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
|
||||
" x%x x%x\n",
|
||||
bf_get(rrq_did, rrq),
|
||||
bf_get(rrq_oxid, rrq),
|
||||
be32_to_cpu(bf_get(rrq_did, rrq)),
|
||||
be16_to_cpu(bf_get(rrq_oxid, rrq)),
|
||||
rxid,
|
||||
iocb->iotag, iocb->iocb.ulpContext);
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
|
||||
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
|
||||
ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
|
||||
prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
|
||||
if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
|
||||
xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
|
||||
else
|
||||
xri = rxid;
|
||||
prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
|
||||
if (prrq)
|
||||
lpfc_clr_rrq_active(phba, rxid, prrq);
|
||||
lpfc_clr_rrq_active(phba, xri, prrq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
if (vport->load_flag & FC_UNLOADING)
|
||||
goto dropit;
|
||||
|
||||
/* If NPort discovery is delayed drop incoming ELS */
|
||||
if ((vport->fc_flag & FC_DISC_DELAYED) &&
|
||||
(cmd != ELS_CMD_PLOGI))
|
||||
goto dropit;
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, did);
|
||||
if (!ndlp) {
|
||||
/* Cannot find existing Fabric ndlp, so allocate a new one */
|
||||
|
@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
|
||||
|
||||
lpfc_send_els_event(vport, ndlp, payload);
|
||||
|
||||
/* If Nport discovery is delayed, reject PLOGIs */
|
||||
if (vport->fc_flag & FC_DISC_DELAYED) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
break;
|
||||
}
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
if (!(phba->pport->fc_flag & FC_PT2PT) ||
|
||||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
|
||||
|
@ -6596,6 +6698,21 @@ void
|
|||
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
/*
|
||||
* If lpfc_delay_discovery parameter is set and the clean address
|
||||
* bit is cleared and fc fabric parameters chenged, delay FC NPort
|
||||
* discovery.
|
||||
*/
|
||||
spin_lock_irq(shost->host_lock);
|
||||
if (vport->fc_flag & FC_DISC_DELAYED) {
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
mod_timer(&vport->delayed_disc_tmo,
|
||||
jiffies + HZ * phba->fc_ratov);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, NameServer_DID);
|
||||
if (!ndlp) {
|
||||
|
@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
struct lpfc_nodelist *next_np;
|
||||
IOCB_t *irsp = &rspiocb->iocb;
|
||||
struct lpfc_iocbq *piocb;
|
||||
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
|
||||
struct serv_parm *sp;
|
||||
uint8_t fabric_param_changed;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
|
||||
|
@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
|
||||
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
|
||||
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
|
||||
if ((vport->fc_prevDID != vport->fc_myDID) &&
|
||||
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
|
||||
sp = prsp->virt + sizeof(uint32_t);
|
||||
fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
|
||||
memcpy(&vport->fabric_portname, &sp->portName,
|
||||
sizeof(struct lpfc_name));
|
||||
memcpy(&vport->fabric_nodename, &sp->nodeName,
|
||||
sizeof(struct lpfc_name));
|
||||
if (fabric_param_changed &&
|
||||
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
|
||||
/* If our NportID changed, we need to ensure all
|
||||
* remaining NPORTs get unreg_login'ed so we can
|
||||
|
@ -7581,6 +7708,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
|
|||
IOERR_SLI_ABORTED);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
|
||||
* @vport: pointer to lpfc vport data structure.
|
||||
*
|
||||
* This routine is invoked by the vport cleanup for deletions and the cleanup
|
||||
* for an ndlp on removal.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
|
||||
unsigned long iflag = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
|
||||
list_for_each_entry_safe(sglq_entry, sglq_next,
|
||||
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
|
||||
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
|
||||
sglq_entry->ndlp = NULL;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
|
|
@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba)
|
|||
lpfc_ramp_down_queue_handler(phba);
|
||||
if (work_port_events & WORKER_RAMP_UP_QUEUE)
|
||||
lpfc_ramp_up_queue_handler(phba);
|
||||
if (work_port_events & WORKER_DELAYED_DISC_TMO)
|
||||
lpfc_delayed_disc_timeout_handler(vport);
|
||||
}
|
||||
lpfc_destroy_vport_work_array(phba, vports);
|
||||
|
||||
|
@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
|
|||
|
||||
lpfc_port_link_failure(vport);
|
||||
|
||||
/* Stop delayed Nport discovery */
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag &= ~FC_DISC_DELAYED;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
del_timer_sync(&vport->delayed_disc_tmo);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
spin_unlock_irq(shost->host_lock);
|
||||
vport->unreg_vpi_cmpl = VPORT_OK;
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
lpfc_cleanup_vports_rrqs(vport);
|
||||
lpfc_cleanup_vports_rrqs(vport, NULL);
|
||||
/*
|
||||
* This shost reference might have been taken at the beginning of
|
||||
* lpfc_vport_delete()
|
||||
|
@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
|
||||
return;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_cleanup_vports_rrqs(vport, ndlp);
|
||||
lpfc_nlp_put(ndlp);
|
||||
return;
|
||||
}
|
||||
|
@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
|
||||
list_del_init(&ndlp->els_retry_evt.evt_listp);
|
||||
list_del_init(&ndlp->dev_loss_evt.evt_listp);
|
||||
|
||||
lpfc_cleanup_vports_rrqs(vport, ndlp);
|
||||
lpfc_unreg_rpi(vport, ndlp);
|
||||
|
||||
return 0;
|
||||
|
@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
|
|||
{
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct lpfc_nodelist *ndlp;
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, iflags);
|
||||
ndlp = __lpfc_findnode_did(vport, did);
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
return ndlp;
|
||||
}
|
||||
|
||||
|
|
|
@ -341,6 +341,12 @@ struct csp {
|
|||
uint8_t bbCreditMsb;
|
||||
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
|
||||
|
||||
/*
|
||||
* Word 1 Bit 31 in common service parameter is overloaded.
|
||||
* Word 1 Bit 31 in FLOGI request is multiple NPort request
|
||||
* Word 1 Bit 31 in FLOGI response is clean address bit
|
||||
*/
|
||||
#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
|
||||
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
|
||||
|
@ -3198,7 +3204,10 @@ typedef struct {
|
|||
#define IOERR_SLER_RRQ_RJT_ERR 0x4C
|
||||
#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
|
||||
#define IOERR_SLER_ABTS_ERR 0x4E
|
||||
|
||||
#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0
|
||||
#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1
|
||||
#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2
|
||||
#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3
|
||||
#define IOERR_DRVR_MASK 0x100
|
||||
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
|
||||
#define IOERR_SLI_BRESET 0x102
|
||||
|
|
|
@ -778,6 +778,7 @@ struct mbox_header {
|
|||
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
|
||||
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
|
||||
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
|
||||
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
|
||||
|
||||
/* FCoE Opcodes */
|
||||
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
|
||||
|
@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features {
|
|||
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
|
||||
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
|
||||
uint32_t word3;
|
||||
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
|
||||
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
|
||||
|
@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features {
|
|||
#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
|
||||
#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
|
||||
#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
|
||||
#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
|
||||
};
|
||||
|
||||
struct lpfc_mbx_supp_pages {
|
||||
|
@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages {
|
|||
#define LPFC_SLI4_PARAMETERS 2
|
||||
};
|
||||
|
||||
struct lpfc_mbx_sli4_params {
|
||||
struct lpfc_mbx_pc_sli4_params {
|
||||
uint32_t word1;
|
||||
#define qs_SHIFT 0
|
||||
#define qs_MASK 0x00000001
|
||||
|
@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params {
|
|||
uint32_t rsvd_13_63[51];
|
||||
};
|
||||
|
||||
struct lpfc_sli4_parameters {
|
||||
uint32_t word0;
|
||||
#define cfg_prot_type_SHIFT 0
|
||||
#define cfg_prot_type_MASK 0x000000FF
|
||||
#define cfg_prot_type_WORD word0
|
||||
uint32_t word1;
|
||||
#define cfg_ft_SHIFT 0
|
||||
#define cfg_ft_MASK 0x00000001
|
||||
#define cfg_ft_WORD word1
|
||||
#define cfg_sli_rev_SHIFT 4
|
||||
#define cfg_sli_rev_MASK 0x0000000f
|
||||
#define cfg_sli_rev_WORD word1
|
||||
#define cfg_sli_family_SHIFT 8
|
||||
#define cfg_sli_family_MASK 0x0000000f
|
||||
#define cfg_sli_family_WORD word1
|
||||
#define cfg_if_type_SHIFT 12
|
||||
#define cfg_if_type_MASK 0x0000000f
|
||||
#define cfg_if_type_WORD word1
|
||||
#define cfg_sli_hint_1_SHIFT 16
|
||||
#define cfg_sli_hint_1_MASK 0x000000ff
|
||||
#define cfg_sli_hint_1_WORD word1
|
||||
#define cfg_sli_hint_2_SHIFT 24
|
||||
#define cfg_sli_hint_2_MASK 0x0000001f
|
||||
#define cfg_sli_hint_2_WORD word1
|
||||
uint32_t word2;
|
||||
uint32_t word3;
|
||||
uint32_t word4;
|
||||
#define cfg_cqv_SHIFT 14
|
||||
#define cfg_cqv_MASK 0x00000003
|
||||
#define cfg_cqv_WORD word4
|
||||
uint32_t word5;
|
||||
uint32_t word6;
|
||||
#define cfg_mqv_SHIFT 14
|
||||
#define cfg_mqv_MASK 0x00000003
|
||||
#define cfg_mqv_WORD word6
|
||||
uint32_t word7;
|
||||
uint32_t word8;
|
||||
#define cfg_wqv_SHIFT 14
|
||||
#define cfg_wqv_MASK 0x00000003
|
||||
#define cfg_wqv_WORD word8
|
||||
uint32_t word9;
|
||||
uint32_t word10;
|
||||
#define cfg_rqv_SHIFT 14
|
||||
#define cfg_rqv_MASK 0x00000003
|
||||
#define cfg_rqv_WORD word10
|
||||
uint32_t word11;
|
||||
#define cfg_rq_db_window_SHIFT 28
|
||||
#define cfg_rq_db_window_MASK 0x0000000f
|
||||
#define cfg_rq_db_window_WORD word11
|
||||
uint32_t word12;
|
||||
#define cfg_fcoe_SHIFT 0
|
||||
#define cfg_fcoe_MASK 0x00000001
|
||||
#define cfg_fcoe_WORD word12
|
||||
#define cfg_phwq_SHIFT 15
|
||||
#define cfg_phwq_MASK 0x00000001
|
||||
#define cfg_phwq_WORD word12
|
||||
#define cfg_loopbk_scope_SHIFT 28
|
||||
#define cfg_loopbk_scope_MASK 0x0000000f
|
||||
#define cfg_loopbk_scope_WORD word12
|
||||
uint32_t sge_supp_len;
|
||||
uint32_t word14;
|
||||
#define cfg_sgl_page_cnt_SHIFT 0
|
||||
#define cfg_sgl_page_cnt_MASK 0x0000000f
|
||||
#define cfg_sgl_page_cnt_WORD word14
|
||||
#define cfg_sgl_page_size_SHIFT 8
|
||||
#define cfg_sgl_page_size_MASK 0x000000ff
|
||||
#define cfg_sgl_page_size_WORD word14
|
||||
#define cfg_sgl_pp_align_SHIFT 16
|
||||
#define cfg_sgl_pp_align_MASK 0x000000ff
|
||||
#define cfg_sgl_pp_align_WORD word14
|
||||
uint32_t word15;
|
||||
uint32_t word16;
|
||||
uint32_t word17;
|
||||
uint32_t word18;
|
||||
uint32_t word19;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_get_sli4_parameters {
|
||||
struct mbox_header header;
|
||||
struct lpfc_sli4_parameters sli4_parameters;
|
||||
};
|
||||
|
||||
/* Mailbox Completion Queue Error Messages */
|
||||
#define MB_CQE_STATUS_SUCCESS 0x0
|
||||
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
|
||||
|
@ -2103,7 +2192,8 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
|
||||
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
|
||||
struct lpfc_mbx_supp_pages supp_pages;
|
||||
struct lpfc_mbx_sli4_params sli4_params;
|
||||
struct lpfc_mbx_pc_sli4_params sli4_params;
|
||||
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
|
||||
struct lpfc_mbx_nop nop;
|
||||
} un;
|
||||
};
|
||||
|
@ -2381,6 +2471,10 @@ struct wqe_common {
|
|||
#define wqe_wqes_SHIFT 15
|
||||
#define wqe_wqes_MASK 0x00000001
|
||||
#define wqe_wqes_WORD word10
|
||||
/* Note that this field overlaps above fields */
|
||||
#define wqe_wqid_SHIFT 1
|
||||
#define wqe_wqid_MASK 0x0000007f
|
||||
#define wqe_wqid_WORD word10
|
||||
#define wqe_pri_SHIFT 16
|
||||
#define wqe_pri_MASK 0x00000007
|
||||
#define wqe_pri_WORD word10
|
||||
|
@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe {
|
|||
uint32_t total_xfer_len;
|
||||
uint32_t initial_xfer_len;
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
uint32_t rsvd_12_15[4]; /* word 12-15 */
|
||||
uint32_t rsrvd12;
|
||||
struct ulp_bde64 ph_bde; /* words 13-15 */
|
||||
};
|
||||
|
||||
struct fcp_iread64_wqe {
|
||||
|
@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe {
|
|||
uint32_t total_xfer_len; /* word 4 */
|
||||
uint32_t rsrvd5; /* word 5 */
|
||||
struct wqe_common wqe_com; /* words 6-11 */
|
||||
uint32_t rsvd_12_15[4]; /* word 12-15 */
|
||||
uint32_t rsrvd12;
|
||||
struct ulp_bde64 ph_bde; /* words 13-15 */
|
||||
};
|
||||
|
||||
struct fcp_icmnd64_wqe {
|
||||
|
|
|
@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|
|||
|| ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
|
||||
&& !(phba->lmt & LMT_16Gb))) {
|
||||
/* Reset link speed to auto */
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
|
||||
"1302 Invalid speed for this board: "
|
||||
"Reset link speed to auto: x%x\n",
|
||||
phba->cfg_link_speed);
|
||||
|
@ -945,17 +945,13 @@ static void
|
|||
lpfc_rrq_timeout(unsigned long ptr)
|
||||
{
|
||||
struct lpfc_hba *phba;
|
||||
uint32_t tmo_posted;
|
||||
unsigned long iflag;
|
||||
|
||||
phba = (struct lpfc_hba *)ptr;
|
||||
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
|
||||
tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
|
||||
if (!tmo_posted)
|
||||
phba->hba_flag |= HBA_RRQ_ACTIVE;
|
||||
phba->hba_flag |= HBA_RRQ_ACTIVE;
|
||||
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
|
||||
if (!tmo_posted)
|
||||
lpfc_worker_wake_up(phba);
|
||||
lpfc_worker_wake_up(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
|
|||
/* Wait for any activity on ndlps to settle */
|
||||
msleep(10);
|
||||
}
|
||||
lpfc_cleanup_vports_rrqs(vport, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
|
|||
{
|
||||
del_timer_sync(&vport->els_tmofunc);
|
||||
del_timer_sync(&vport->fc_fdmitmo);
|
||||
del_timer_sync(&vport->delayed_disc_tmo);
|
||||
lpfc_can_disctmo(vport);
|
||||
return;
|
||||
}
|
||||
|
@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
|
|||
del_timer_sync(&phba->fabric_block_timer);
|
||||
del_timer_sync(&phba->eratt_poll);
|
||||
del_timer_sync(&phba->hb_tmofunc);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
del_timer_sync(&phba->rrq_tmr);
|
||||
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
||||
}
|
||||
phba->hb_outstanding = 0;
|
||||
|
||||
switch (phba->pci_dev_grp) {
|
||||
|
@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
|||
init_timer(&vport->els_tmofunc);
|
||||
vport->els_tmofunc.function = lpfc_els_timeout;
|
||||
vport->els_tmofunc.data = (unsigned long)vport;
|
||||
|
||||
init_timer(&vport->delayed_disc_tmo);
|
||||
vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
|
||||
vport->delayed_disc_tmo.data = (unsigned long)vport;
|
||||
|
||||
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
|
||||
if (error)
|
||||
goto out_put_shost;
|
||||
|
@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|||
goto out_free_bsmbx;
|
||||
}
|
||||
|
||||
/* Get the Supported Pages. It is always available. */
|
||||
/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
|
||||
lpfc_supported_pages(mboxq);
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||
if (unlikely(rc)) {
|
||||
rc = -EIO;
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
|
||||
mqe = &mboxq->u.mqe;
|
||||
memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
|
||||
LPFC_MAX_SUPPORTED_PAGES);
|
||||
for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
|
||||
switch (pn_page[i]) {
|
||||
case LPFC_SLI4_PARAMETERS:
|
||||
phba->sli4_hba.pc_sli4_params.supported = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
if (!rc) {
|
||||
mqe = &mboxq->u.mqe;
|
||||
memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
|
||||
LPFC_MAX_SUPPORTED_PAGES);
|
||||
for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
|
||||
switch (pn_page[i]) {
|
||||
case LPFC_SLI4_PARAMETERS:
|
||||
phba->sli4_hba.pc_sli4_params.supported = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Read the port's SLI4 Parameters capabilities if supported. */
|
||||
if (phba->sli4_hba.pc_sli4_params.supported)
|
||||
rc = lpfc_pc_sli4_params_get(phba, mboxq);
|
||||
if (rc) {
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
rc = -EIO;
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
}
|
||||
|
||||
/* Read the port's SLI4 Parameters capabilities if supported. */
|
||||
if (phba->sli4_hba.pc_sli4_params.supported)
|
||||
rc = lpfc_pc_sli4_params_get(phba, mboxq);
|
||||
/*
|
||||
* Get sli4 parameters that override parameters from Port capabilities.
|
||||
* If this call fails it is not a critical error so continue loading.
|
||||
*/
|
||||
lpfc_get_sli4_parameters(phba, mboxq);
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
if (rc) {
|
||||
rc = -EIO;
|
||||
goto out_free_bsmbx;
|
||||
}
|
||||
/* Create all the SLI4 queues */
|
||||
rc = lpfc_sli4_queue_create(phba);
|
||||
if (rc)
|
||||
|
@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|||
mqe = &mboxq->u.mqe;
|
||||
|
||||
/* Read the port's SLI4 Parameters port capabilities */
|
||||
lpfc_sli4_params(mboxq);
|
||||
lpfc_pc_sli4_params(mboxq);
|
||||
if (!phba->sli4_hba.intr_enable)
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||
else {
|
||||
|
@ -7853,6 +7861,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @mboxq: Pointer to the mailboxq memory for the mailbox command response.
|
||||
*
|
||||
* This function is called in the SLI4 code path to read the port's
|
||||
* sli4 capabilities.
|
||||
*
|
||||
* This function may be be called from any context that can block-wait
|
||||
* for the completion. The expectation is that this routine is called
|
||||
* typically from probe_one or from the online routine.
|
||||
**/
|
||||
int
|
||||
lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
{
|
||||
int rc;
|
||||
struct lpfc_mqe *mqe = &mboxq->u.mqe;
|
||||
struct lpfc_pc_sli4_params *sli4_params;
|
||||
int length;
|
||||
struct lpfc_sli4_parameters *mbx_sli4_parameters;
|
||||
|
||||
/* Read the port's SLI4 Config Parameters */
|
||||
length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
if (!phba->sli4_hba.intr_enable)
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||
else
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
|
||||
lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
sli4_params = &phba->sli4_hba.pc_sli4_params;
|
||||
mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
|
||||
sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
|
||||
sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
|
||||
sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
|
||||
sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
|
||||
mbx_sli4_parameters);
|
||||
sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
|
||||
mbx_sli4_parameters);
|
||||
if (bf_get(cfg_phwq, mbx_sli4_parameters))
|
||||
phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
|
||||
else
|
||||
phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
|
||||
sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
|
||||
sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
|
||||
sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
|
||||
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
|
||||
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
|
||||
sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
|
||||
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
|
||||
mbx_sli4_parameters);
|
||||
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
|
||||
mbx_sli4_parameters);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
|
||||
* @pdev: pointer to PCI device
|
||||
|
|
|
@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
|
||||
if (phba->cfg_enable_bg)
|
||||
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
|
||||
mb->un.varCfgPort.cdss = 1; /* Configure Security */
|
||||
if (phba->cfg_enable_dss)
|
||||
mb->un.varCfgPort.cdss = 1; /* Configure Security */
|
||||
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
|
||||
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
|
||||
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
|
||||
|
@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
|
|||
* @mbox: pointer to lpfc mbox command.
|
||||
* @subsystem: The sli4 config sub mailbox subsystem.
|
||||
* @opcode: The sli4 config sub mailbox command opcode.
|
||||
* @length: Length of the sli4 config mailbox command.
|
||||
* @length: Length of the sli4 config mailbox command (including sub-header).
|
||||
*
|
||||
* This routine sets up the header fields of SLI4 specific mailbox command
|
||||
* for sending IOCTL command.
|
||||
|
@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
|
|||
if (emb) {
|
||||
/* Set up main header fields */
|
||||
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
|
||||
sli4_config->header.cfg_mhdr.payload_length =
|
||||
LPFC_MBX_CMD_HDR_LENGTH + length;
|
||||
sli4_config->header.cfg_mhdr.payload_length = length;
|
||||
/* Set up sub-header fields following main header */
|
||||
bf_set(lpfc_mbox_hdr_opcode,
|
||||
&sli4_config->header.cfg_shdr.request, opcode);
|
||||
bf_set(lpfc_mbox_hdr_subsystem,
|
||||
&sli4_config->header.cfg_shdr.request, subsystem);
|
||||
sli4_config->header.cfg_shdr.request.request_length = length;
|
||||
sli4_config->header.cfg_shdr.request.request_length =
|
||||
length - LPFC_MBX_CMD_HDR_LENGTH;
|
||||
return length;
|
||||
}
|
||||
|
||||
|
@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
|
|||
|
||||
/* Set up host requested features. */
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
|
||||
bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
|
||||
|
||||
/* Enable DIF (block guard) only if configured to do so. */
|
||||
if (phba->cfg_enable_bg)
|
||||
|
@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox)
|
|||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
|
||||
* mailbox command.
|
||||
* lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
|
||||
* @mbox: pointer to lpfc mbox command to initialize.
|
||||
*
|
||||
* The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
|
||||
* retrieve the particular SLI4 features supported by the port.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_params(struct lpfcMboxq *mbox)
|
||||
lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
|
||||
{
|
||||
struct lpfc_mbx_sli4_params *sli4_params;
|
||||
struct lpfc_mbx_pc_sli4_params *sli4_params;
|
||||
|
||||
memset(mbox, 0, sizeof(*mbox));
|
||||
sli4_params = &mbox->u.mqe.un.sli4_params;
|
||||
|
|
|
@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
ndlp->nlp_maxframe =
|
||||
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
|
||||
|
||||
/* no need to reg_login if we are already in one of these states */
|
||||
/*
|
||||
* Need to unreg_login if we are already in one of these states and
|
||||
* change to NPR state. This will block the port until after the ACC
|
||||
* completes and the reg_login is issued and completed.
|
||||
*/
|
||||
switch (ndlp->nlp_state) {
|
||||
case NLP_STE_NPR_NODE:
|
||||
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
|
||||
|
@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
case NLP_STE_PRLI_ISSUE:
|
||||
case NLP_STE_UNMAPPED_NODE:
|
||||
case NLP_STE_MAPPED_NODE:
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
|
||||
return 1;
|
||||
lpfc_unreg_rpi(vport, ndlp);
|
||||
ndlp->nlp_prev_state = ndlp->nlp_state;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
}
|
||||
|
||||
if ((vport->fc_flag & FC_PT2PT) &&
|
||||
|
|
|
@ -608,6 +608,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
|||
return bcnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
|
||||
* @vport: pointer to lpfc vport data structure.
|
||||
*
|
||||
* This routine is invoked by the vport cleanup for deletions and the cleanup
|
||||
* for an ndlp on removal.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_scsi_buf *psb, *next_psb;
|
||||
unsigned long iflag = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
||||
list_for_each_entry_safe(psb, next_psb,
|
||||
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
|
||||
if (psb->rdata && psb->rdata->pnode
|
||||
&& psb->rdata->pnode->vport == vport)
|
||||
psb->rdata = NULL;
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
|
|||
psb->status = IOSTAT_SUCCESS;
|
||||
spin_unlock(
|
||||
&phba->sli4_hba.abts_scsi_buf_list_lock);
|
||||
ndlp = psb->rdata->pnode;
|
||||
if (psb->rdata && psb->rdata->pnode)
|
||||
ndlp = psb->rdata->pnode;
|
||||
else
|
||||
ndlp = NULL;
|
||||
|
||||
rrq_empty = list_empty(&phba->active_rrq_list);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
if (ndlp)
|
||||
|
@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
|||
static struct lpfc_scsi_buf*
|
||||
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_scsi_buf *lpfc_cmd = NULL;
|
||||
struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
|
||||
struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
|
||||
struct lpfc_scsi_buf *lpfc_cmd ;
|
||||
unsigned long iflag = 0;
|
||||
int found = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
|
||||
list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
|
||||
while (!found && lpfc_cmd) {
|
||||
list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
|
||||
list) {
|
||||
if (lpfc_test_rrq_active(phba, ndlp,
|
||||
lpfc_cmd->cur_iocbq.sli4_xritag)) {
|
||||
lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
|
||||
list_remove_head(scsi_buf_list, lpfc_cmd,
|
||||
struct lpfc_scsi_buf, list);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
|
||||
iflag);
|
||||
if (lpfc_cmd == start_lpfc_cmd) {
|
||||
lpfc_cmd = NULL;
|
||||
break;
|
||||
} else
|
||||
continue;
|
||||
}
|
||||
lpfc_cmd->cur_iocbq.sli4_xritag))
|
||||
continue;
|
||||
list_del(&lpfc_cmd->list);
|
||||
found = 1;
|
||||
lpfc_cmd->seg_cnt = 0;
|
||||
lpfc_cmd->nonsg_phys = 0;
|
||||
lpfc_cmd->prot_seg_cnt = 0;
|
||||
break;
|
||||
}
|
||||
return lpfc_cmd;
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
|
||||
iflag);
|
||||
if (!found)
|
||||
return NULL;
|
||||
else
|
||||
return lpfc_cmd;
|
||||
}
|
||||
/**
|
||||
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
|
||||
|
@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
|||
struct scatterlist *sgel = NULL;
|
||||
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
|
||||
struct sli4_sge *first_data_sgl;
|
||||
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
|
||||
dma_addr_t physaddr;
|
||||
uint32_t num_bde = 0;
|
||||
uint32_t dma_len;
|
||||
uint32_t dma_offset = 0;
|
||||
int nseg;
|
||||
struct ulp_bde64 *bde;
|
||||
|
||||
/*
|
||||
* There are three possibilities here - use scatter-gather segment, use
|
||||
|
@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
|||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl += 1;
|
||||
|
||||
first_data_sgl = sgl;
|
||||
lpfc_cmd->seg_cnt = nseg;
|
||||
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
|
||||
|
@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
|||
dma_offset += dma_len;
|
||||
sgl++;
|
||||
}
|
||||
/* setup the performance hint (first data BDE) if enabled */
|
||||
if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
|
||||
bde = (struct ulp_bde64 *)
|
||||
&(iocb_cmd->unsli3.sli3Words[5]);
|
||||
bde->addrLow = first_data_sgl->addr_lo;
|
||||
bde->addrHigh = first_data_sgl->addr_hi;
|
||||
bde->tus.f.bdeSize =
|
||||
le32_to_cpu(first_data_sgl->sge_len);
|
||||
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bde->tus.w = cpu_to_le32(bde->tus.w);
|
||||
}
|
||||
} else {
|
||||
sgl += 1;
|
||||
/* clear the last flag in the fcp_rsp map entry */
|
||||
|
@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
|||
lpfc_worker_wake_up(phba);
|
||||
break;
|
||||
case IOSTAT_LOCAL_REJECT:
|
||||
case IOSTAT_REMOTE_STOP:
|
||||
if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
|
||||
lpfc_cmd->result ==
|
||||
IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
|
||||
lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
|
||||
lpfc_cmd->result ==
|
||||
IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
|
||||
cmd->result = ScsiResult(DID_NO_CONNECT, 0);
|
||||
break;
|
||||
}
|
||||
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
|
||||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
|
||||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
|
||||
|
@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
|||
cmd->result = ScsiResult(DID_REQUEUE, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
|
||||
lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
|
||||
pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
|
||||
|
@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
|||
"on unprotected cmd\n");
|
||||
}
|
||||
}
|
||||
|
||||
if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
|
||||
&& (phba->sli_rev == LPFC_SLI_REV4)
|
||||
&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
|
||||
/* This IO was aborted by the target, we don't
|
||||
* know the rxid and because we did not send the
|
||||
* ABTS we cannot generate and RRQ.
|
||||
*/
|
||||
lpfc_set_rrq_active(phba, pnode,
|
||||
lpfc_cmd->cur_iocbq.sli4_xritag,
|
||||
0, 0);
|
||||
}
|
||||
/* else: fall through */
|
||||
default:
|
||||
cmd->result = ScsiResult(DID_ERROR, 0);
|
||||
|
@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
|||
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
|
||||
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
|
||||
SAM_STAT_BUSY);
|
||||
} else {
|
||||
} else
|
||||
cmd->result = ScsiResult(DID_OK, 0);
|
||||
}
|
||||
|
||||
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
|
||||
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
|
||||
|
@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
|
|||
* transport is still transitioning.
|
||||
*/
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
|
||||
cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
|
||||
goto out_fail_command;
|
||||
}
|
||||
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
|
||||
goto out_host_busy;
|
||||
goto out_tgt_busy;
|
||||
|
||||
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
|
||||
if (lpfc_cmd == NULL) {
|
||||
|
@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
|
|||
out_host_busy:
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
out_tgt_busy:
|
||||
return SCSI_MLQUEUE_TARGET_BUSY;
|
||||
|
||||
out_fail_command:
|
||||
done(cmnd);
|
||||
return 0;
|
||||
|
|
|
@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
|
|||
/* set consumption flag every once in a while */
|
||||
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
|
||||
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
|
||||
|
||||
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
|
||||
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
|
||||
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
|
||||
|
||||
/* Update the host index before invoking device */
|
||||
|
@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|||
uint16_t adj_xri;
|
||||
struct lpfc_node_rrq *rrq;
|
||||
int empty;
|
||||
uint32_t did = 0;
|
||||
|
||||
|
||||
if (!ndlp)
|
||||
return -EINVAL;
|
||||
|
||||
if (!phba->cfg_enable_rrq)
|
||||
return -EINVAL;
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING) {
|
||||
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
||||
goto out;
|
||||
}
|
||||
did = ndlp->nlp_DID;
|
||||
|
||||
/*
|
||||
* set the active bit even if there is no mem available.
|
||||
*/
|
||||
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
|
||||
if (!ndlp)
|
||||
return -EINVAL;
|
||||
|
||||
if (NLP_CHK_FREE_REQ(ndlp))
|
||||
goto out;
|
||||
|
||||
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
|
||||
goto out;
|
||||
|
||||
if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
|
||||
return -EINVAL;
|
||||
goto out;
|
||||
|
||||
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
|
||||
if (rrq) {
|
||||
rrq->send_rrq = send_rrq;
|
||||
|
@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|||
rrq->vport = ndlp->vport;
|
||||
rrq->rxid = rxid;
|
||||
empty = list_empty(&phba->active_rrq_list);
|
||||
if (phba->cfg_enable_rrq && send_rrq)
|
||||
/*
|
||||
* We need the xri before we can add this to the
|
||||
* phba active rrq list.
|
||||
*/
|
||||
rrq->send_rrq = send_rrq;
|
||||
else
|
||||
rrq->send_rrq = 0;
|
||||
rrq->send_rrq = send_rrq;
|
||||
list_add_tail(&rrq->list, &phba->active_rrq_list);
|
||||
if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
|
||||
phba->hba_flag |= HBA_RRQ_ACTIVE;
|
||||
|
@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
return -ENOMEM;
|
||||
out:
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2921 Can't set rrq active xri:0x%x rxid:0x%x"
|
||||
" DID:0x%x Send:%d\n",
|
||||
xritag, rxid, did, send_rrq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
|
||||
* lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @xritag: xri used in this exchange.
|
||||
* @rrq: The RRQ to be cleared.
|
||||
*
|
||||
* This function is called with hbalock held. This function
|
||||
**/
|
||||
static void
|
||||
__lpfc_clr_rrq_active(struct lpfc_hba *phba,
|
||||
uint16_t xritag,
|
||||
struct lpfc_node_rrq *rrq)
|
||||
void
|
||||
lpfc_clr_rrq_active(struct lpfc_hba *phba,
|
||||
uint16_t xritag,
|
||||
struct lpfc_node_rrq *rrq)
|
||||
{
|
||||
uint16_t adj_xri;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
|
||||
ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
|
||||
if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
|
||||
ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
|
||||
|
||||
/* The target DID could have been swapped (cable swap)
|
||||
* we should use the ndlp from the findnode if it is
|
||||
* available.
|
||||
*/
|
||||
if (!ndlp)
|
||||
if ((!ndlp) && rrq->ndlp)
|
||||
ndlp = rrq->ndlp;
|
||||
|
||||
if (!ndlp)
|
||||
goto out;
|
||||
|
||||
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
|
||||
if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
|
||||
rrq->send_rrq = 0;
|
||||
rrq->xritag = 0;
|
||||
rrq->rrq_stop_time = 0;
|
||||
}
|
||||
out:
|
||||
mempool_free(rrq, phba->rrq_pool);
|
||||
}
|
||||
|
||||
|
@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
|
|||
struct lpfc_node_rrq *nextrrq;
|
||||
unsigned long next_time;
|
||||
unsigned long iflags;
|
||||
LIST_HEAD(send_rrq);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
||||
next_time = jiffies + HZ * (phba->fc_ratov + 1);
|
||||
list_for_each_entry_safe(rrq, nextrrq,
|
||||
&phba->active_rrq_list, list) {
|
||||
if (time_after(jiffies, rrq->rrq_stop_time)) {
|
||||
list_del(&rrq->list);
|
||||
if (!rrq->send_rrq)
|
||||
/* this call will free the rrq */
|
||||
__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
||||
else {
|
||||
/* if we send the rrq then the completion handler
|
||||
* will clear the bit in the xribitmap.
|
||||
*/
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
if (lpfc_send_rrq(phba, rrq)) {
|
||||
lpfc_clr_rrq_active(phba, rrq->xritag,
|
||||
rrq);
|
||||
}
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
}
|
||||
} else if (time_before(rrq->rrq_stop_time, next_time))
|
||||
&phba->active_rrq_list, list) {
|
||||
if (time_after(jiffies, rrq->rrq_stop_time))
|
||||
list_move(&rrq->list, &send_rrq);
|
||||
else if (time_before(rrq->rrq_stop_time, next_time))
|
||||
next_time = rrq->rrq_stop_time;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
if (!list_empty(&phba->active_rrq_list))
|
||||
mod_timer(&phba->rrq_tmr, next_time);
|
||||
list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
|
||||
list_del(&rrq->list);
|
||||
if (!rrq->send_rrq)
|
||||
/* this call will free the rrq */
|
||||
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
||||
else if (lpfc_send_rrq(phba, rrq)) {
|
||||
/* if we send the rrq then the completion handler
|
||||
* will clear the bit in the xribitmap.
|
||||
*/
|
||||
lpfc_clr_rrq_active(phba, rrq->xritag,
|
||||
rrq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
|
|||
/**
|
||||
* lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
|
||||
* @vport: Pointer to vport context object.
|
||||
*
|
||||
* Remove all active RRQs for this vport from the phba->active_rrq_list and
|
||||
* clear the rrq.
|
||||
* @ndlp: Pointer to the lpfc_node_list structure.
|
||||
* If ndlp is NULL Remove all active RRQs for this vport from the
|
||||
* phba->active_rrq_list and clear the rrq.
|
||||
* If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
|
||||
**/
|
||||
void
|
||||
lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
|
||||
lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_node_rrq *rrq;
|
||||
struct lpfc_node_rrq *nextrrq;
|
||||
unsigned long iflags;
|
||||
LIST_HEAD(rrq_list);
|
||||
|
||||
if (phba->sli_rev != LPFC_SLI_REV4)
|
||||
return;
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
|
||||
if (rrq->vport == vport) {
|
||||
list_del(&rrq->list);
|
||||
__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
||||
}
|
||||
if (!ndlp) {
|
||||
lpfc_sli4_vport_delete_els_xri_aborted(vport);
|
||||
lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
|
||||
}
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
|
||||
if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
|
||||
list_move(&rrq->list, &rrq_list);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
|
||||
list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
|
||||
list_del(&rrq->list);
|
||||
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
|
|||
struct lpfc_node_rrq *nextrrq;
|
||||
unsigned long next_time;
|
||||
unsigned long iflags;
|
||||
LIST_HEAD(rrq_list);
|
||||
|
||||
if (phba->sli_rev != LPFC_SLI_REV4)
|
||||
return;
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
|
||||
next_time = jiffies + HZ * (phba->fc_ratov * 2);
|
||||
list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
|
||||
list_del(&rrq->list);
|
||||
__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
||||
}
|
||||
list_splice_init(&phba->active_rrq_list, &rrq_list);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
|
||||
list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
|
||||
list_del(&rrq->list);
|
||||
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
|
||||
}
|
||||
if (!list_empty(&phba->active_rrq_list))
|
||||
mod_timer(&phba->rrq_tmr, next_time);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
|
||||
* lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @ndlp: Targets nodelist pointer for this exchange.
|
||||
* @xritag the xri in the bitmap to test.
|
||||
|
@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
|
|||
* returns 0 = rrq not active for this xri
|
||||
* 1 = rrq is valid for this xri.
|
||||
**/
|
||||
static int
|
||||
__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
int
|
||||
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
uint16_t xritag)
|
||||
{
|
||||
uint16_t adj_xri;
|
||||
|
@ -801,52 +835,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @xritag: xri used in this exchange.
|
||||
* @rrq: The RRQ to be cleared.
|
||||
*
|
||||
* This function is takes the hbalock.
|
||||
**/
|
||||
void
|
||||
lpfc_clr_rrq_active(struct lpfc_hba *phba,
|
||||
uint16_t xritag,
|
||||
struct lpfc_node_rrq *rrq)
|
||||
{
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
__lpfc_clr_rrq_active(phba, xritag, rrq);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @ndlp: Targets nodelist pointer for this exchange.
|
||||
* @xritag the xri in the bitmap to test.
|
||||
*
|
||||
* This function takes the hbalock.
|
||||
* returns 0 = rrq not active for this xri
|
||||
* 1 = rrq is valid for this xri.
|
||||
**/
|
||||
int
|
||||
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
uint16_t xritag)
|
||||
{
|
||||
int ret;
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
|
||||
* @phba: Pointer to HBA context object.
|
||||
|
@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
|
|||
return NULL;
|
||||
adj_xri = sglq->sli4_xritag -
|
||||
phba->sli4_hba.max_cfg_param.xri_base;
|
||||
if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
|
||||
if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
|
||||
/* This xri has an rrq outstanding for this DID.
|
||||
* put it back in the list and get another xri.
|
||||
*/
|
||||
|
@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
|
|||
} else {
|
||||
sglq->state = SGL_FREED;
|
||||
sglq->ndlp = NULL;
|
||||
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
|
||||
list_add_tail(&sglq->list,
|
||||
&phba->sli4_hba.lpfc_sgl_list);
|
||||
|
||||
/* Check if TXQ queue needs to be serviced */
|
||||
if (pring->txq_cnt)
|
||||
|
@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|||
"0378 No support for fcpi mode.\n");
|
||||
ftr_rsp++;
|
||||
}
|
||||
|
||||
if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
|
||||
phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
|
||||
else
|
||||
phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
|
||||
/*
|
||||
* If the port cannot support the host's requested features
|
||||
* then turn off the global config parameters to disable the
|
||||
|
@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|||
spin_lock_irq(&phba->hbalock);
|
||||
phba->link_state = LPFC_LINK_DOWN;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
|
||||
if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
|
||||
rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
|
||||
out_unset_queue:
|
||||
/* Unset all the queues set up in this routine when error out */
|
||||
if (rc)
|
||||
|
@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
|||
cq->type = type;
|
||||
cq->subtype = subtype;
|
||||
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
|
||||
cq->assoc_qid = eq->queue_id;
|
||||
cq->host_index = 0;
|
||||
cq->hba_index = 0;
|
||||
|
||||
|
@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|||
goto out;
|
||||
}
|
||||
mq->type = LPFC_MQ;
|
||||
mq->assoc_qid = cq->queue_id;
|
||||
mq->subtype = subtype;
|
||||
mq->host_index = 0;
|
||||
mq->hba_index = 0;
|
||||
|
@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|||
goto out;
|
||||
}
|
||||
wq->type = LPFC_WQ;
|
||||
wq->assoc_qid = cq->queue_id;
|
||||
wq->subtype = subtype;
|
||||
wq->host_index = 0;
|
||||
wq->hba_index = 0;
|
||||
|
@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
goto out;
|
||||
}
|
||||
hrq->type = LPFC_HRQ;
|
||||
hrq->assoc_qid = cq->queue_id;
|
||||
hrq->subtype = subtype;
|
||||
hrq->host_index = 0;
|
||||
hrq->hba_index = 0;
|
||||
|
@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
goto out;
|
||||
}
|
||||
drq->type = LPFC_DRQ;
|
||||
drq->assoc_qid = cq->queue_id;
|
||||
drq->subtype = subtype;
|
||||
drq->host_index = 0;
|
||||
drq->hba_index = 0;
|
||||
|
@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
if (!mbox)
|
||||
return -ENOMEM;
|
||||
length = (sizeof(struct lpfc_mbx_rq_destroy) -
|
||||
sizeof(struct mbox_header));
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||
LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
|
@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
|
|||
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
|
||||
sizeof(struct lpfc_mbx_post_sgl_pages) -
|
||||
sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
|
||||
|
||||
post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
|
||||
&mbox->u.mqe.un.post_sgl_pages;
|
||||
|
@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
|
|||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
|
||||
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
|
||||
sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr),
|
||||
LPFC_SLI4_MBX_EMBED);
|
||||
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
|
||||
hdr_tmpl, rpi_page->page_count);
|
||||
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
|
||||
|
|
|
@ -125,9 +125,9 @@ struct lpfc_queue {
|
|||
uint32_t entry_count; /* Number of entries to support on the queue */
|
||||
uint32_t entry_size; /* Size of each queue entry. */
|
||||
uint32_t queue_id; /* Queue ID assigned by the hardware */
|
||||
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
|
||||
struct list_head page_list;
|
||||
uint32_t page_count; /* Number of pages allocated for this queue */
|
||||
|
||||
uint32_t host_index; /* The host's index for putting or getting */
|
||||
uint32_t hba_index; /* The last known hba index for get or put */
|
||||
union sli4_qe qe[1]; /* array to index entries (must be last) */
|
||||
|
@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params {
|
|||
uint32_t hdr_pp_align;
|
||||
uint32_t sgl_pages_max;
|
||||
uint32_t sgl_pp_align;
|
||||
uint8_t cqv;
|
||||
uint8_t mqv;
|
||||
uint8_t wqv;
|
||||
uint8_t rqv;
|
||||
};
|
||||
|
||||
/* SLI4 HBA data structure entries */
|
||||
|
@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
|
|||
struct sli4_wcqe_xri_aborted *);
|
||||
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
|
||||
struct sli4_wcqe_xri_aborted *);
|
||||
void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
|
||||
void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
|
||||
int lpfc_sli4_brdreset(struct lpfc_hba *);
|
||||
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
|
||||
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
|
||||
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.emulex.com *
|
||||
* *
|
||||
|
@ -18,7 +18,7 @@
|
|||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "8.3.20"
|
||||
#define LPFC_DRIVER_VERSION "8.3.21"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
|
||||
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче