Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (158 commits)
  [SCSI] Fix printing of failed 32-byte commands
  [SCSI] Fix printing of variable length commands
  [SCSI] libsrp: fix bug in ADDITIONAL CDB LENGTH interpretation
  [SCSI] scsi_dh_alua: Add IBM Power Virtual SCSI ALUA device to dev list
  [SCSI] scsi_dh_alua: add netapp to dev list
  [SCSI] qla2xxx: Update version number to 8.03.02-k1.
  [SCSI] qla2xxx: EEH: Restore PCI saved state during pci slot reset.
  [SCSI] qla2xxx: Add firmware ETS burst support.
  [SCSI] qla2xxx: Correct loop-resync issues during SNS scans.
  [SCSI] qla2xxx: Correct use-after-free issue in terminate_rport_io callback.
  [SCSI] qla2xxx: Correct EH bus-reset handling.
  [SCSI] qla2xxx: Proper clean-up of BSG requests when request times out.
  [SCSI] qla2xxx: Initialize payload receive length in failure path of vendor commands
  [SCSI] fix duplicate removal on error path in scsi_sysfs_add_sdev
  [SCSI] fix refcounting bug in scsi_get_host_dev
  [SCSI] fix memory leak in scsi_report_lun_scan
  [SCSI] lpfc: correct PPC build failure
  [SCSI] raid_class: add raid1e
  [SCSI] mpt2sas: Do not call sas_is_tlr_enabled for RAID volumes.
  [SCSI] zfcp: Introduce header file for qdio structs and inline functions
  ...
This commit is contained in:
Linus Torvalds 2010-02-26 16:55:27 -08:00
Родитель 64d497f553 77c9cfc51b
Коммит 654451748b
121 изменённых файлов: 10061 добавлений и 3328 удалений

Просмотреть файл

@ -1,3 +1,19 @@
1 Release Date : Thur. Oct 29, 2009 09:12:45 PST 2009 -
(emaild-id:megaraidlinux@lsi.com)
Bo Yang
2 Current Version : 00.00.04.17.1-rc1
3 Older Version : 00.00.04.12
1. Add the pad_0 in mfi frame structure to 0 to fix the
context value larger than 32bit value issue.
2. Add the logic drive list to the driver. Driver will
keep the logic drive list internal after driver load.
3. driver fixed the device update issue after get the AEN
PD delete/ADD, LD add/delete from FW.
1 Release Date : Tues. July 28, 2009 10:12:45 PST 2009 -
(emaild-id:megaraidlinux@lsi.com)
Bo Yang

Просмотреть файл

@ -2141,6 +2141,17 @@ S: Supported
F: Documentation/fault-injection/
F: lib/fault-inject.c
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
M: Robert Love <robert.w.love@intel.com>
L: devel@open-fcoe.org
W: www.Open-FCoE.org
S: Supported
F: drivers/scsi/libfc/
F: drivers/scsi/fcoe/
F: include/scsi/fc/
F: include/scsi/libfc.h
F: include/scsi/libfcoe.h
FILE LOCKING (flock() and fcntl()/lockf())
M: Matthew Wilcox <matthew@wil.cx>
L: linux-fsdevel@vger.kernel.org

Просмотреть файл

@ -126,8 +126,6 @@ static int mfcounter = 0;
* Public data...
*/
static struct proc_dir_entry *mpt_proc_root_dir;
#define WHOINIT_UNKNOWN 0xAA
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -146,6 +144,9 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *mpt_proc_root_dir;
#endif
/*
* Driver Callback Index's

Просмотреть файл

@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.04.13"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.13"
#define MPT_LINUX_VERSION_COMMON "3.04.14"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \

Просмотреть файл

@ -360,8 +360,8 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
u16 iocstatus;
/* bus reset is only good for SCSI IO, RAID PASSTHRU */
if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
(function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
"TaskMgmt, not SCSI_IO!!\n", ioc->name));
return -EPERM;

Просмотреть файл

@ -195,29 +195,34 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
unsigned long flags;
int ready;
MPT_ADAPTER *ioc;
int loops = 40; /* seconds */
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
spin_lock_irqsave(shost->host_lock, flags);
while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY) {
while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
|| (loops > 0 && ioc->active == 0)) {
spin_unlock_irqrestore(shost->host_lock, flags);
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_block_error_handler.%d: %d:%d, port status is "
"DID_IMM_RETRY, deferring %s recovery.\n",
"%x, active flag %d, deferring %s recovery.\n",
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun, caller));
SCpnt->device->id, SCpnt->device->lun,
ready, ioc->active, caller));
msleep(1000);
spin_lock_irqsave(shost->host_lock, flags);
loops --;
}
spin_unlock_irqrestore(shost->host_lock, flags);
if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata) {
if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
|| ioc->active == 0) {
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"%s.%d: %d:%d, failing recovery, "
"port state %d, vdevice %p.\n", caller,
"port state %x, active %d, vdevice %p.\n", caller,
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun, ready,
SCpnt->device->hostdata));
ioc->active, SCpnt->device->hostdata));
return FAILED;
}
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT

Просмотреть файл

@ -1075,6 +1075,19 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
return 0;
}
static void
mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
{
scsi_device_set_state(sdev, SDEV_BLOCK);
}
static void
mptsas_block_io_starget(struct scsi_target *starget)
{
if (starget)
starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
}
/**
* mptsas_target_reset_queue
*
@ -1098,10 +1111,11 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
id = sas_event_data->TargetID;
channel = sas_event_data->Bus;
if (!(vtarget = mptsas_find_vtarget(ioc, channel, id)))
return;
vtarget->deleted = 1; /* block IO */
vtarget = mptsas_find_vtarget(ioc, channel, id);
if (vtarget) {
mptsas_block_io_starget(vtarget->starget);
vtarget->deleted = 1; /* block IO */
}
target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
GFP_ATOMIC);
@ -1868,7 +1882,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
if (ioc->sas_discovery_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
// scsi_print_command(SCpnt);
if (ioc->debug_level & MPT_DEBUG_SCSI)
scsi_print_command(SCpnt);
return mptscsih_qcmd(SCpnt,done);
}
@ -2686,6 +2701,187 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
return error;
}
struct rep_manu_request{
u8 smp_frame_type;
u8 function;
u8 reserved;
u8 request_length;
};
struct rep_manu_reply{
u8 smp_frame_type; /* 0x41 */
u8 function; /* 0x01 */
u8 function_result;
u8 response_length;
u16 expander_change_count;
u8 reserved0[2];
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
u16 component_id;
u8 component_revision_id;
u8 reserved3;
u8 vendor_specific[8];
};
/**
* mptsas_exp_repmanufacture_info -
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
*
* Fills in the sas_expander_device object when SMP port is created.
*
* Returns 0 for success, non-zero for failure.
*/
static int
mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
u64 sas_address, struct sas_expander_device *edev)
{
MPT_FRAME_HDR *mf;
SmpPassthroughRequest_t *smpreq;
SmpPassthroughReply_t *smprep;
struct rep_manu_reply *manufacture_reply;
struct rep_manu_request *manufacture_request;
int ret;
int flagsLength;
unsigned long timeleft;
char *psge;
unsigned long flags;
void *data_out = NULL;
dma_addr_t data_out_dma = 0;
u32 sz;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
if (ret)
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
ret = -ENOMEM;
goto out_unlock;
}
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
if (!data_out) {
printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
ret = -ENOMEM;
goto put_mf;
}
manufacture_request = data_out;
manufacture_request->smp_frame_type = 0x40;
manufacture_request->function = 1;
manufacture_request->reserved = 0;
manufacture_request->request_length = 0;
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
smpreq->PhysicalPort = 0xFF;
*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
smpreq->RequestDataLength = sizeof(struct rep_manu_request);
psge = (char *)
(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= sizeof(struct rep_manu_request);
ioc->add_sge(psge, flagsLength, data_out_dma);
psge += ioc->SGE_size;
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_IOC_TO_HOST |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= sizeof(struct rep_manu_reply);
ioc->add_sge(psge, flagsLength, data_out_dma +
sizeof(struct rep_manu_request));
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
mpt_free_msg_frame(ioc, mf);
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
mpt_HardResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
u8 *tmp;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
if (le16_to_cpu(smprep->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
goto out_free;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
strncpy(edev->vendor_id, manufacture_reply->vendor_id,
SAS_EXPANDER_VENDOR_ID_LEN);
strncpy(edev->product_id, manufacture_reply->product_id,
SAS_EXPANDER_PRODUCT_ID_LEN);
strncpy(edev->product_rev, manufacture_reply->product_rev,
SAS_EXPANDER_PRODUCT_REV_LEN);
edev->level = manufacture_reply->sas_format;
if (manufacture_reply->sas_format) {
strncpy(edev->component_vendor_id,
manufacture_reply->component_vendor_id,
SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
manufacture_reply->component_revision_id;
}
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
ret = -ENXIO;
}
out_free:
if (data_out_dma)
pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return ret;
}
static void
mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info)
@ -2967,6 +3163,11 @@ static int mptsas_probe_one_phy(struct device *dev,
goto out;
}
mptsas_set_rphy(ioc, phy_info, rphy);
if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
mptsas_exp_repmanufacture_info(ioc,
identify.sas_address,
rphy_to_expander_device(rphy));
}
out:

Просмотреть файл

@ -1438,9 +1438,14 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
&& (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
} else {
if (SCpnt->request && SCpnt->request->ioprio) {
if (((SCpnt->request->ioprio & 0x7) == 1) ||
!(SCpnt->request->ioprio & 0x7))
scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
}
} else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
}
/* Use the above information to set up the message frame
*/

Просмотреть файл

@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
* Copyright IBM Corporation 2002, 2009
* Copyright IBM Corporation 2002, 2010
*/
/*
@ -32,6 +32,7 @@
#include <linux/seq_file.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
#define ZFCP_BUS_ID_SIZE 20
@ -49,36 +50,6 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name,
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
{
int idx;
adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
GFP_KERNEL);
if (!adapter->req_list)
return -ENOMEM;
for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
INIT_LIST_HEAD(&adapter->req_list[idx]);
return 0;
}
/**
* zfcp_reqlist_isempty - is the request list empty
* @adapter: pointer to struct zfcp_adapter
*
* Returns: true if list is empty, false otherwise
*/
int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
{
unsigned int idx;
for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
if (!list_empty(&adapter->req_list[idx]))
return 0;
return 1;
}
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct ccw_device *cdev;
@ -110,7 +81,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
flush_work(&unit->scsi_work);
out_unit:
put_device(&port->sysfs_device);
put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
@ -255,7 +226,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
read_lock_irqsave(&port->unit_list_lock, flags);
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
if (!get_device(&unit->sysfs_device))
if (!get_device(&unit->dev))
unit = NULL;
read_unlock_irqrestore(&port->unit_list_lock, flags);
return unit;
@ -280,7 +251,7 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
if (!get_device(&port->sysfs_device))
if (!get_device(&port->dev))
port = NULL;
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
@ -298,10 +269,9 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
*/
static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
sysfs_device);
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
put_device(&unit->port->sysfs_device);
put_device(&unit->port->dev);
kfree(unit);
}
@ -318,11 +288,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
struct zfcp_unit *unit;
int retval = -ENOMEM;
get_device(&port->sysfs_device);
get_device(&port->dev);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit) {
put_device(&unit->sysfs_device);
put_device(&unit->dev);
retval = -EEXIST;
goto err_out;
}
@ -333,10 +303,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->port = port;
unit->fcp_lun = fcp_lun;
unit->sysfs_device.parent = &port->sysfs_device;
unit->sysfs_device.release = zfcp_unit_release;
unit->dev.parent = &port->dev;
unit->dev.release = zfcp_unit_release;
if (dev_set_name(&unit->sysfs_device, "0x%016llx",
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
goto err_out;
@ -353,13 +323,12 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
if (device_register(&unit->sysfs_device)) {
put_device(&unit->sysfs_device);
if (device_register(&unit->dev)) {
put_device(&unit->dev);
goto err_out;
}
if (sysfs_create_group(&unit->sysfs_device.kobj,
&zfcp_sysfs_unit_attrs))
if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
goto err_out_put;
write_lock_irq(&port->unit_list_lock);
@ -371,9 +340,9 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
return unit;
err_out_put:
device_unregister(&unit->sysfs_device);
device_unregister(&unit->dev);
err_out:
put_device(&port->sysfs_device);
put_device(&port->dev);
return ERR_PTR(retval);
}
@ -539,7 +508,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed;
if (zfcp_reqlist_alloc(adapter))
adapter->req_list = zfcp_reqlist_alloc();
if (!adapter->req_list)
goto failed;
if (zfcp_dbf_adapter_register(adapter))
@ -560,8 +530,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
spin_lock_init(&adapter->req_list_lock);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
@ -640,8 +608,7 @@ void zfcp_device_unregister(struct device *dev,
static void zfcp_port_release(struct device *dev)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port,
sysfs_device);
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
zfcp_ccw_adapter_put(port->adapter);
kfree(port);
@ -669,7 +636,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
put_device(&port->sysfs_device);
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
@ -689,22 +656,21 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
port->sysfs_device.parent = &adapter->ccw_device->dev;
port->sysfs_device.release = zfcp_port_release;
port->dev.parent = &adapter->ccw_device->dev;
port->dev.release = zfcp_port_release;
if (dev_set_name(&port->sysfs_device, "0x%016llx",
(unsigned long long)wwpn)) {
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
if (device_register(&port->sysfs_device)) {
put_device(&port->sysfs_device);
if (device_register(&port->dev)) {
put_device(&port->dev);
goto err_out;
}
if (sysfs_create_group(&port->sysfs_device.kobj,
if (sysfs_create_group(&port->dev.kobj,
&zfcp_sysfs_port_attrs))
goto err_out_put;
@ -717,7 +683,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
err_out_put:
device_unregister(&port->sysfs_device);
device_unregister(&port->dev);
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);

Просмотреть файл

@ -3,13 +3,14 @@
*
* Registration and callback for the s390 common I/O layer.
*
* Copyright IBM Corporation 2002, 2009
* Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
#define ZFCP_MODEL_PRIV 0x4
@ -122,12 +123,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev)
zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
zfcp_device_unregister(&unit->sysfs_device,
&zfcp_sysfs_unit_attrs);
zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
list_for_each_entry_safe(port, p, &port_remove_lh, list)
zfcp_device_unregister(&port->sysfs_device,
&zfcp_sysfs_port_attrs);
zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
zfcp_adapter_unregister(adapter);
}
@ -162,7 +161,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
}
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter));
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,

Просмотреть файл

@ -140,9 +140,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
memcpy(response->fsf_status_qual,
fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
response->fsf_req_status = fsf_req->status;
response->sbal_first = fsf_req->queue_req.sbal_first;
response->sbal_last = fsf_req->queue_req.sbal_last;
response->sbal_response = fsf_req->queue_req.sbal_response;
response->sbal_first = fsf_req->qdio_req.sbal_first;
response->sbal_last = fsf_req->qdio_req.sbal_last;
response->sbal_response = fsf_req->qdio_req.sbal_response;
response->pool = fsf_req->pool != NULL;
response->erp_action = (unsigned long)fsf_req->erp_action;
@ -576,7 +576,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
struct zfcp_adapter *adapter = dbf->adapter;
zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
&adapter->erp_counter, 0, 0, 0);
&adapter->erp_counter, 0, 0,
ZFCP_DBF_INVALID_LUN);
}
/**
@ -590,8 +591,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
struct zfcp_dbf *dbf = port->adapter->dbf;
zfcp_dbf_rec_target(id, ref, dbf, &port->status,
&port->erp_counter, port->wwpn, port->d_id,
0);
&port->erp_counter, port->wwpn, port->d_id,
ZFCP_DBF_INVALID_LUN);
}
/**
@ -642,10 +643,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
r->u.trigger.ps = atomic_read(&port->status);
r->u.trigger.wwpn = port->wwpn;
}
if (unit) {
if (unit)
r->u.trigger.us = atomic_read(&unit->status);
r->u.trigger.fcp_lun = unit->fcp_lun;
}
r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
@ -668,7 +668,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
r->u.action.action = (unsigned long)erp_action;
r->u.action.status = erp_action->status;
r->u.action.step = erp_action->step;
r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
r->u.action.fsf_req = erp_action->fsf_req_id;
debug_event(dbf->rec, 5, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}

Просмотреть файл

@ -30,6 +30,8 @@
#define ZFCP_DBF_TAG_SIZE 4
#define ZFCP_DBF_ID_SIZE 7
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
struct zfcp_dbf_dump {
u8 tag[ZFCP_DBF_TAG_SIZE];
u32 total_size; /* size of total dump data */
@ -192,10 +194,10 @@ struct zfcp_dbf_san_record {
struct zfcp_dbf_san_record_ct_response ct_resp;
struct zfcp_dbf_san_record_els els;
} u;
#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
u8 payload[32];
} __attribute__ ((packed));
#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
struct zfcp_dbf_scsi_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u8 tag2[ZFCP_DBF_TAG_SIZE];
@ -301,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
/**
* zfcp_dbf_scsi_result - trace event for SCSI command completion
* @tag: tag indicating success or failure of SCSI command
* @level: trace level applicable for this event
* @adapter: adapter that has been used to issue the SCSI command
* @dbf: adapter dbf trace
* @scmd: SCSI command pointer
* @fsf_req: request used to issue SCSI command (might be NULL)
* @req: FSF request used to issue SCSI command
*/
static inline
void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
struct zfcp_fsf_req *req)
{
zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
if (scmd->result != 0)
zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
else if (scmd->retries > 0)
zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
else
zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
}
/**
* zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
* @dbf: adapter dbf trace
* @scmd: SCSI command pointer
*/
static inline
void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
{
zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
}
/**

Просмотреть файл

@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
* Copyright IBM Corporation 2002, 2009
* Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_DEF_H
@ -33,15 +33,13 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "zfcp_fsf.h"
#include "zfcp_qdio.h"
/********************* GENERAL DEFINES *********************************/
#define REQUEST_LIST_SIZE 128
struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
@ -129,12 +127,6 @@ struct zfcp_adapter_mempool {
mempool_t *qtcb_pool;
};
struct zfcp_qdio_queue {
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
u8 first; /* index of next free bfr in queue */
atomic_t count; /* number of free buffers in queue */
};
struct zfcp_erp_action {
struct list_head list;
int action; /* requested action code */
@ -143,8 +135,7 @@ struct zfcp_erp_action {
struct zfcp_unit *unit;
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
for this action */
unsigned long fsf_req_id;
struct timer_list timer;
};
@ -167,29 +158,6 @@ struct zfcp_latencies {
spinlock_t lock;
};
/** struct zfcp_qdio - basic QDIO data structure
* @resp_q: response queue
* @req_q: request queue
* @stat_lock: lock to protect req_q_util and req_q_time
* @req_q_lock; lock to serialize access to request queue
* @req_q_time: time of last fill level change
* @req_q_util: used for accounting
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
* @adapter: adapter used in conjunction with this QDIO structure
*/
struct zfcp_qdio {
struct zfcp_qdio_queue resp_q;
struct zfcp_qdio_queue req_q;
spinlock_t stat_lock;
spinlock_t req_q_lock;
unsigned long long req_q_time;
u64 req_q_util;
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
struct zfcp_adapter *adapter;
};
struct zfcp_adapter {
struct kref ref;
u64 peer_wwnn; /* P2P peer WWNN */
@ -207,8 +175,7 @@ struct zfcp_adapter {
struct list_head port_list; /* remote port list */
rwlock_t port_list_lock; /* port list lock */
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
struct zfcp_reqlist *req_list;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
@ -241,7 +208,7 @@ struct zfcp_adapter {
};
struct zfcp_port {
struct device sysfs_device; /* sysfs device */
struct device dev;
struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
struct zfcp_adapter *adapter; /* adapter used to access port */
@ -263,7 +230,7 @@ struct zfcp_port {
};
struct zfcp_unit {
struct device sysfs_device; /* sysfs device */
struct device dev;
struct list_head list; /* list of logical units */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
@ -276,34 +243,12 @@ struct zfcp_unit {
struct work_struct scsi_work;
};
/**
* struct zfcp_queue_req - queue related values for a request
* @sbal_number: number of free SBALs
* @sbal_first: first SBAL for this request
* @sbal_last: last SBAL for this request
* @sbal_limit: last possible SBAL for this request
* @sbale_curr: current SBALE at creation of this request
* @sbal_response: SBAL used in interrupt
* @qdio_outb_usage: usage of outbound queue
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_queue_req {
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
u8 sbal_limit;
u8 sbale_curr;
u8 sbal_response;
u16 qdio_outb_usage;
u16 qdio_inb_usage;
};
/**
* struct zfcp_fsf_req - basic FSF request structure
* @list: list of FSF requests
* @req_id: unique request ID
* @adapter: adapter this request belongs to
* @queue_req: queue related values
* @qdio_req: qdio queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
* @fsf_command: FSF command issued
@ -321,7 +266,7 @@ struct zfcp_fsf_req {
struct list_head list;
unsigned long req_id;
struct zfcp_adapter *adapter;
struct zfcp_queue_req queue_req;
struct zfcp_qdio_req qdio_req;
struct completion completion;
u32 status;
u32 fsf_command;
@ -352,45 +297,4 @@ struct zfcp_data {
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
/*
* Helper functions for request ID management.
*/
static inline int zfcp_reqlist_hash(unsigned long req_id)
{
return req_id % REQUEST_LIST_SIZE;
}
static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req)
{
list_del(&fsf_req->list);
}
static inline struct zfcp_fsf_req *
zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
{
struct zfcp_fsf_req *request;
unsigned int idx;
idx = zfcp_reqlist_hash(req_id);
list_for_each_entry(request, &adapter->req_list[idx], list)
if (request->req_id == req_id)
return request;
return NULL;
}
static inline struct zfcp_fsf_req *
zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
{
struct zfcp_fsf_req *request;
unsigned int idx;
for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
list_for_each_entry(request, &adapter->req_list[idx], list)
if (request == req)
return request;
}
return NULL;
}
#endif /* ZFCP_DEF_H */

Просмотреть файл

@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
* Copyright IBM Corporation 2002, 2009
* Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
#define ZFCP_MAX_ERPS 3
@ -174,7 +175,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
if (!get_device(&unit->sysfs_device))
if (!get_device(&unit->dev))
return NULL;
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
erp_action = &unit->erp_action;
@ -184,7 +185,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
if (!get_device(&port->sysfs_device))
if (!get_device(&port->dev))
return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
@ -478,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_fsf_req *req;
if (!act->fsf_req)
if (!act->fsf_req_id)
return;
spin_lock(&adapter->req_list_lock);
if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
act->fsf_req->erp_action == act) {
spin_lock(&adapter->req_list->lock);
req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
if (req && req->erp_action == act) {
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_action("erscf_1", act);
act->fsf_req->erp_action = NULL;
req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_action("erscf_2", act);
if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
act->fsf_req = NULL;
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
act->fsf_req_id = 0;
} else
act->fsf_req = NULL;
spin_unlock(&adapter->req_list_lock);
act->fsf_req_id = 0;
spin_unlock(&adapter->req_list->lock);
}
/**
@ -1179,19 +1181,19 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
get_device(&unit->sysfs_device);
get_device(&unit->dev);
if (scsi_queue_work(unit->port->adapter->scsi_host,
&unit->scsi_work) <= 0)
put_device(&unit->sysfs_device);
put_device(&unit->dev);
}
put_device(&unit->sysfs_device);
put_device(&unit->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
put_device(&port->sysfs_device);
put_device(&port->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:

Просмотреть файл

@ -21,7 +21,6 @@ extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
extern void zfcp_device_unregister(struct device *,
@ -144,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
extern struct qdio_buffer_element
*zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
extern struct qdio_buffer_element
*zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
struct zfcp_queue_req *, unsigned long,
struct zfcp_qdio_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);

Просмотреть файл

@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
* Copyright IBM Corporation 2008, 2009
* Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@ -316,7 +316,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
out:
put_device(&port->sysfs_device);
put_device(&port->dev);
}
/**
@ -325,9 +325,9 @@ out:
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
get_device(&port->sysfs_device);
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
put_device(&port->sysfs_device);
put_device(&port->dev);
}
/**
@ -389,7 +389,7 @@ static void zfcp_fc_adisc_handler(void *data)
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->sysfs_device);
put_device(&port->dev);
kmem_cache_free(zfcp_data.adisc_cache, adisc);
}
@ -436,7 +436,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
get_device(&port->sysfs_device);
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
@ -455,7 +455,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
out:
put_device(&port->sysfs_device);
put_device(&port->dev);
}
/**
@ -468,9 +468,9 @@ out:
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
get_device(&port->sysfs_device);
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
put_device(&port->sysfs_device);
put_device(&port->dev);
}
static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
@ -617,8 +617,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
zfcp_device_unregister(&port->sysfs_device,
&zfcp_sysfs_port_attrs);
zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
}
return ret;
@ -731,7 +730,7 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
return -EINVAL;
d_id = port->d_id;
put_device(&port->sysfs_device);
put_device(&port->dev);
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);

Просмотреть файл

@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
* Copyright IBM Corporation 2002, 2009
* Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@ -14,6 +14,8 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_dbf.h"
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
@ -393,7 +395,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
case FSF_PROT_LINK_DOWN:
zfcp_fsf_link_down_info_eval(req, "fspse_5",
&psq->link_down_info);
/* FIXME: reopening adapter now? better wait for link up */
/* go through reopen to flush pending requests */
zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
break;
case FSF_PROT_REEST_QUEUE:
@ -457,15 +459,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *req, *tmp;
unsigned long flags;
LIST_HEAD(remove_queue);
unsigned int i;
BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
spin_lock_irqsave(&adapter->req_list_lock, flags);
for (i = 0; i < REQUEST_LIST_SIZE; i++)
list_splice_init(&adapter->req_list[i], &remove_queue);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
zfcp_reqlist_move(adapter->req_list, &remove_queue);
list_for_each_entry_safe(req, tmp, &remove_queue, list) {
list_del(&req->list);
@ -495,8 +492,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) = bottom->fc_link_speed;
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
fc_host_active_fc4s(shost)[2] = 1; /* FCP */
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
@ -619,6 +614,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) = bottom->supported_speed;
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
FC_FC4_LIST_SIZE);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@ -725,12 +724,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
req->queue_req.sbal_number = 1;
req->queue_req.sbal_first = req_q->first;
req->queue_req.sbal_last = req_q->first;
req->queue_req.sbale_curr = 1;
req->qdio_req.sbal_number = 1;
req->qdio_req.sbal_first = req_q->first;
req->qdio_req.sbal_last = req_q->first;
req->qdio_req.sbale_curr = 1;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].addr = (void *) req->req_id;
sbale[0].flags |= SBAL_FLAGS0_COMMAND;
@ -745,6 +744,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
return ERR_PTR(-ENOMEM);
}
req->seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
@ -752,8 +752,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
req->seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
sbale[1].addr = (void *) req->qtcb;
sbale[1].length = sizeof(struct fsf_qtcb);
}
@ -770,25 +768,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
unsigned long flags;
int idx;
int with_qtcb = (req->qtcb != NULL);
int with_qtcb = (req->qtcb != NULL);
int req_id = req->req_id;
/* put allocated FSF request into hash table */
spin_lock_irqsave(&adapter->req_list_lock, flags);
idx = zfcp_reqlist_hash(req->req_id);
list_add_tail(&req->list, &adapter->req_list[idx]);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
zfcp_reqlist_add(adapter->req_list, req);
req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
req->issued = get_clock();
if (zfcp_qdio_send(qdio, &req->queue_req)) {
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
spin_lock_irqsave(&adapter->req_list_lock, flags);
/* lookup request again, list might have changed */
if (zfcp_reqlist_find_safe(adapter, req))
zfcp_reqlist_remove(adapter, req);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
zfcp_reqlist_find_rm(adapter->req_list, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
return -EIO;
}
@ -826,9 +816,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
req->queue_req.sbale_curr = 2;
req->qdio_req.sbale_curr = 2;
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
@ -837,7 +827,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
sbale->addr = (void *) sr_buf;
sbale->length = sizeof(*sr_buf);
@ -934,7 +924,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1029,7 +1019,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
{
struct zfcp_adapter *adapter = req->adapter;
struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
&req->queue_req);
&req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
@ -1047,15 +1037,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
return 0;
}
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
@ -1251,7 +1241,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1262,13 +1252,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
FSF_FEATURE_UPDATE_ALERT;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req = req;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -1293,7 +1283,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
goto out_unlock;
}
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_exchange_config_data_handler;
@ -1349,19 +1339,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
erp_action->fsf_req = req;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -1398,7 +1388,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1484,7 +1474,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
}
out:
put_device(&port->sysfs_device);
put_device(&port->dev);
}
/**
@ -1513,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1521,15 +1511,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
hton24(req->qtcb->bottom.support.d_id, port->d_id);
req->data = port;
req->erp_action = erp_action;
erp_action->fsf_req = req;
get_device(&port->sysfs_device);
erp_action->fsf_req_id = req->req_id;
get_device(&port->dev);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
put_device(&port->sysfs_device);
erp_action->fsf_req_id = 0;
put_device(&port->dev);
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -1583,7 +1573,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1591,13 +1581,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
req->data = erp_action->port;
req->erp_action = erp_action;
req->qtcb->header.port_handle = erp_action->port->handle;
erp_action->fsf_req = req;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -1660,7 +1650,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1715,7 +1705,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1809,7 +1799,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1817,13 +1807,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
req->qtcb->header.port_handle = erp_action->port->handle;
req->erp_action = erp_action;
req->handler = zfcp_fsf_close_physical_port_handler;
erp_action->fsf_req = req;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -1982,7 +1972,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -1991,7 +1981,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
req->handler = zfcp_fsf_open_unit_handler;
req->data = erp_action->unit;
req->erp_action = erp_action;
erp_action->fsf_req = req;
erp_action->fsf_req_id = req->req_id;
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@ -2000,7 +1990,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -2068,7 +2058,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2077,13 +2067,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
req->handler = zfcp_fsf_close_unit_handler;
req->data = erp_action->unit;
req->erp_action = erp_action;
erp_action->fsf_req = req;
erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
erp_action->fsf_req = NULL;
erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@ -2111,8 +2101,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
blktrc.flags |= ZFCP_BLK_REQ_ERROR;
blktrc.inb_usage = req->queue_req.qdio_inb_usage;
blktrc.outb_usage = req->queue_req.qdio_outb_usage;
blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
blktrc.flags |= ZFCP_BLK_LAT_VALID;
@ -2169,12 +2159,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
zfcp_fsf_req_trace(req, scpnt);
skip_fsfstatus:
if (scpnt->result != 0)
zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
else if (scpnt->retries > 0)
zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
else
zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
scpnt->host_scribble = NULL;
(scpnt->scsi_done) (scpnt);
@ -2274,7 +2259,7 @@ skip_fsfstatus:
else {
zfcp_fsf_send_fcp_command_task_handler(req);
req->unit = NULL;
put_device(&unit->sysfs_device);
put_device(&unit->dev);
}
}
@ -2312,7 +2297,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
get_device(&unit->sysfs_device);
get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@ -2346,11 +2331,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
scsi_sglist(scsi_cmnd),
FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@ -2369,7 +2354,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
failed_scsi_cmnd:
put_device(&unit->sysfs_device);
put_device(&unit->dev);
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
@ -2415,7 +2400,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@ -2478,14 +2463,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= direction;
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
direction, fsf_cfdc->sg,
FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
@ -2516,15 +2501,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
unsigned long flags, req_id;
unsigned long req_id;
int idx;
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
req_id = (unsigned long) sbale->addr;
spin_lock_irqsave(&adapter->req_list_lock, flags);
fsf_req = zfcp_reqlist_find(adapter, req_id);
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req)
/*
@ -2534,11 +2518,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
panic("error: unknown req_id (%lx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
list_del(&fsf_req->list);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
fsf_req->queue_req.sbal_response = sbal_idx;
fsf_req->queue_req.qdio_inb_usage =
fsf_req->qdio_req.sbal_response = sbal_idx;
fsf_req->qdio_req.qdio_inb_usage =
atomic_read(&qdio->resp_q.count);
zfcp_fsf_req_complete(fsf_req);

Просмотреть файл

@ -10,6 +10,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
@ -28,12 +29,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
static struct qdio_buffer_element *
zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
{
return &q->sbal[sbal_idx]->element[sbale_idx];
}
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
{
struct zfcp_adapter *adapter = qdio->adapter;
@ -106,7 +101,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
if (unlikely(retval)) {
atomic_set(&queue->count, count);
/* FIXME: Recover this with an adapter reopen? */
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
} else {
queue->first += count;
queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@ -145,32 +140,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
zfcp_qdio_resp_put_back(qdio, count);
}
/**
* zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
* @qdio: pointer to struct zfcp_qdio
* @q_rec: pointer to struct zfcp_queue_rec
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req)
{
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
}
/**
* zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
* @fsf_req: pointer to struct fsf_req
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req)
{
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
q_req->sbale_curr);
}
static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req, int max_sbals)
struct zfcp_qdio_req *q_req, int max_sbals)
{
int count = atomic_read(&qdio->req_q.count);
count = min(count, max_sbals);
@ -179,7 +150,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long sbtype)
{
struct qdio_buffer_element *sbale;
@ -214,7 +185,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
}
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned int sbtype)
{
if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@ -224,7 +195,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
}
static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req)
struct zfcp_qdio_req *q_req)
{
struct qdio_buffer **sbal = qdio->req_q.sbal;
int first = q_req->sbal_first;
@ -235,7 +206,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
}
static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req,
struct zfcp_qdio_req *q_req,
unsigned int sbtype, void *start_addr,
unsigned int total_length)
{
@ -271,8 +242,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
struct zfcp_queue_req *q_req,
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long sbtype, struct scatterlist *sg,
int max_sbals)
{
@ -304,10 +274,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_queue_req
* @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct zfcp_qdio_queue *req_q = &qdio->req_q;
int first = q_req->sbal_first;

Просмотреть файл

@ -0,0 +1,109 @@
/*
* zfcp device driver
*
* Header file for zfcp qdio interface
*
* Copyright IBM Corporation 2010
*/
#ifndef ZFCP_QDIO_H
#define ZFCP_QDIO_H
#include <asm/qdio.h>
/**
* struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
* @sbal: qdio buffers
* @first: index of next free buffer in queue
* @count: number of free buffers in queue
*/
struct zfcp_qdio_queue {
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
u8 first;
atomic_t count;
};
/**
* struct zfcp_qdio - basic qdio data structure
* @resp_q: response queue
* @req_q: request queue
* @stat_lock: lock to protect req_q_util and req_q_time
* @req_q_lock: lock to serialize access to request queue
* @req_q_time: time of last fill level change
* @req_q_util: used for accounting
* @req_q_full: queue full incidents
* @req_q_wq: used to wait for SBAL availability
* @adapter: adapter used in conjunction with this qdio structure
*/
struct zfcp_qdio {
struct zfcp_qdio_queue resp_q;
struct zfcp_qdio_queue req_q;
spinlock_t stat_lock;
spinlock_t req_q_lock;
unsigned long long req_q_time;
u64 req_q_util;
atomic_t req_q_full;
wait_queue_head_t req_q_wq;
struct zfcp_adapter *adapter;
};
/**
* struct zfcp_qdio_req - qdio queue related values for a request
* @sbal_number: number of free sbals
* @sbal_first: first sbal for this request
* @sbal_last: last sbal for this request
* @sbal_limit: last possible sbal for this request
* @sbale_curr: current sbale at creation of this request
* @sbal_response: sbal used in interrupt
* @qdio_outb_usage: usage of outbound queue
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_qdio_req {
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
u8 sbal_limit;
u8 sbale_curr;
u8 sbal_response;
u16 qdio_outb_usage;
u16 qdio_inb_usage;
};
/**
* zfcp_qdio_sbale - return pointer to sbale in qdio queue
* @q: queue where to find sbal
* @sbal_idx: sbal index in queue
* @sbale_idx: sbale index in sbal
*/
static inline struct qdio_buffer_element *
zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
{
return &q->sbal[sbal_idx]->element[sbale_idx];
}
/**
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
* @q_rec: pointer to struct zfcp_qdio_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
}
/**
* zfcp_qdio_sbale_curr - return current sbale on req_q for a request
* @qdio: pointer to struct zfcp_qdio
* @fsf_req: pointer to struct zfcp_fsf_req
* Returns: pointer to qdio_buffer_element (sbale) structure
*/
static inline struct qdio_buffer_element *
zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
q_req->sbale_curr);
}
#endif /* ZFCP_QDIO_H */

Просмотреть файл

@ -0,0 +1,183 @@
/*
* zfcp device driver
*
* Data structure and helper functions for tracking pending FSF
* requests.
*
* Copyright IBM Corporation 2009
*/
#ifndef ZFCP_REQLIST_H
#define ZFCP_REQLIST_H
/* number of hash buckets */
#define ZFCP_REQ_LIST_BUCKETS 128
/**
* struct zfcp_reqlist - Container for request list (reqlist)
* @lock: Spinlock for protecting the hash list
* @list: Array of hashbuckets, each is a list of requests in this bucket
*/
struct zfcp_reqlist {
spinlock_t lock;
struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
};
static inline int zfcp_reqlist_hash(unsigned long req_id)
{
return req_id % ZFCP_REQ_LIST_BUCKETS;
}
/**
* zfcp_reqlist_alloc - Allocate and initialize reqlist
*
* Returns pointer to allocated reqlist on success, or NULL on
* allocation failure.
*/
static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
{
unsigned int i;
struct zfcp_reqlist *rl;
rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
if (!rl)
return NULL;
spin_lock_init(&rl->lock);
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
INIT_LIST_HEAD(&rl->buckets[i]);
return rl;
}
/**
* zfcp_reqlist_isempty - Check whether the request list empty
* @rl: pointer to reqlist
*
* Returns: 1 if list is empty, 0 if not
*/
static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
{
unsigned int i;
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
if (!list_empty(&rl->buckets[i]))
return 0;
return 1;
}
/**
* zfcp_reqlist_free - Free allocated memory for reqlist
* @rl: The reqlist where to free memory
*/
static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
{
/* sanity check */
BUG_ON(!zfcp_reqlist_isempty(rl));
kfree(rl);
}
static inline struct zfcp_fsf_req *
_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
{
struct zfcp_fsf_req *req;
unsigned int i;
i = zfcp_reqlist_hash(req_id);
list_for_each_entry(req, &rl->buckets[i], list)
if (req->req_id == req_id)
return req;
return NULL;
}
/**
* zfcp_reqlist_find - Lookup FSF request by its request id
* @rl: The reqlist where to lookup the FSF request
* @req_id: The request id to look for
*
* Returns a pointer to the FSF request with the specified request id
* or NULL if there is no known FSF request with this id.
*/
static inline struct zfcp_fsf_req *
zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
{
unsigned long flags;
struct zfcp_fsf_req *req;
spin_lock_irqsave(&rl->lock, flags);
req = _zfcp_reqlist_find(rl, req_id);
spin_unlock_irqrestore(&rl->lock, flags);
return req;
}
/**
* zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
* @rl: reqlist where to search and remove entry
* @req_id: The request id of the request to look for
*
* This functions tries to find the FSF request with the specified
* id and then removes it from the reqlist. The reqlist lock is held
* during both steps of the operation.
*
* Returns: Pointer to the FSF request if the request has been found,
* NULL if it has not been found.
*/
static inline struct zfcp_fsf_req *
zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
{
unsigned long flags;
struct zfcp_fsf_req *req;
spin_lock_irqsave(&rl->lock, flags);
req = _zfcp_reqlist_find(rl, req_id);
if (req)
list_del(&req->list);
spin_unlock_irqrestore(&rl->lock, flags);
return req;
}
/**
* zfcp_reqlist_add - Add entry to reqlist
* @rl: reqlist where to add the entry
* @req: The entry to add
*
* The request id always increases. As an optimization new requests
* are added here with list_add_tail at the end of the bucket lists
* while old requests are looked up starting at the beginning of the
* lists.
*/
static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
struct zfcp_fsf_req *req)
{
unsigned int i;
unsigned long flags;
i = zfcp_reqlist_hash(req->req_id);
spin_lock_irqsave(&rl->lock, flags);
list_add_tail(&req->list, &rl->buckets[i]);
spin_unlock_irqrestore(&rl->lock, flags);
}
/**
* zfcp_reqlist_move - Move all entries from reqlist to simple list
* @rl: The zfcp_reqlist where to remove all entries
* @list: The list where to move all entries
*/
static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
struct list_head *list)
{
unsigned int i;
unsigned long flags;
spin_lock_irqsave(&rl->lock, flags);
for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
list_splice_init(&rl->buckets[i], list);
spin_unlock_irqrestore(&rl->lock, flags);
}
#endif /* ZFCP_REQLIST_H */

Просмотреть файл

@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
* Copyright IBM Corporation 2002, 2009
* Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@ -15,6 +15,7 @@
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
@ -43,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
unit->device = NULL;
put_device(&unit->sysfs_device);
put_device(&unit->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@ -59,10 +60,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
set_host_byte(scpnt, result);
if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
/* return directly */
zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
scpnt->scsi_done(scpnt);
}
@ -86,18 +86,10 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
unit = scpnt->device->hostdata;
BUG_ON(!adapter || (adapter != unit->port->adapter));
BUG_ON(!scpnt->scsi_done);
if (unlikely(!unit)) {
zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
return 0;
}
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
scpnt->scsi_done(scpnt);
return 0;
}
@ -189,9 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
spin_lock(&adapter->req_list_lock);
old_req = zfcp_reqlist_find(adapter, old_reqid);
spin_unlock(&adapter->req_list_lock);
old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@ -521,7 +511,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
put_device(&port->sysfs_device);
put_device(&port->dev);
}
}
@ -563,23 +553,23 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
get_device(&port->sysfs_device);
get_device(&port->dev);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
put_device(&port->sysfs_device);
put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
get_device(&port->sysfs_device);
get_device(&port->dev);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
put_device(&port->sysfs_device);
put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@ -608,7 +598,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
}
}
put_device(&port->sysfs_device);
put_device(&port->dev);
}
@ -626,7 +616,7 @@ void zfcp_scsi_scan(struct work_struct *work)
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
put_device(&unit->sysfs_device);
put_device(&unit->dev);
}
struct fc_function_template zfcp_transport_functions = {

Просмотреть файл

@ -3,7 +3,7 @@
*
* sysfs attributes.
*
* Copyright IBM Corporation 2008, 2009
* Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@ -19,8 +19,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, \
sysfs_device); \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
@ -87,8 +86,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, \
sysfs_device); \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
return sprintf(buf, "1\n"); \
@ -99,12 +97,11 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
struct _feat_def *_feat = container_of(dev, struct _feat_def, \
sysfs_device); \
struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
unsigned long val; \
int retval = 0; \
\
if (!(_feat && get_device(&_feat->sysfs_device))) \
if (!(_feat && get_device(&_feat->dev))) \
return -EBUSY; \
\
if (strict_strtoul(buf, 0, &val) || val != 0) { \
@ -118,7 +115,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
put_device(&_feat->sysfs_device); \
put_device(&_feat->dev); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
@ -224,10 +221,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
put_device(&port->sysfs_device);
put_device(&port->dev);
zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
@ -258,13 +255,12 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port,
sysfs_device);
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
if (!(port && get_device(&port->sysfs_device)))
if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@ -280,7 +276,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
zfcp_erp_wait(unit->port->adapter);
flush_work(&unit->scsi_work);
out:
put_device(&port->sysfs_device);
put_device(&port->dev);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@ -289,13 +285,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_port *port = container_of(dev, struct zfcp_port,
sysfs_device);
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
if (!(port && get_device(&port->sysfs_device)))
if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@ -314,12 +309,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
put_device(&unit->sysfs_device);
put_device(&unit->dev);
zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
out:
put_device(&port->sysfs_device);
put_device(&port->dev);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);

Просмотреть файл

@ -3924,7 +3924,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
{
struct sccb_mgr_tar_info *currTar_Info;
if ((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN)) {
if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
return;
}
currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -24,6 +24,10 @@
#define FW_VER_LEN 32
#define MCC_Q_LEN 128
#define MCC_CQ_LEN 256
#define MAX_MCC_CMD 16
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
struct be_dma_mem {
void *va;
@ -57,6 +61,11 @@ static inline void *queue_head_node(struct be_queue_info *q)
return q->dma_mem.va + q->head * q->entry_size;
}
static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
{
return q->dma_mem.va + wrb_num * q->entry_size;
}
static inline void *queue_tail_node(struct be_queue_info *q)
{
return q->dma_mem.va + q->tail * q->entry_size;
@ -104,15 +113,19 @@ struct be_ctrl_info {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
/* MCC Async callback */
void (*async_cb) (void *adapter, bool link_up);
void *adapter_ctxt;
wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
unsigned int mcc_tag[MAX_MCC_CMD];
unsigned int mcc_numtag[MAX_MCC_CMD + 1];
unsigned short mcc_alloc_index;
unsigned short mcc_free_index;
unsigned int mcc_tag_available;
};
#include "be_cmds.h"
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 5s timeout */
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -19,7 +19,7 @@
#include "be_mgmt.h"
#include "be_main.h"
static void be_mcc_notify(struct beiscsi_hba *phba)
void be_mcc_notify(struct beiscsi_hba *phba)
{
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
u32 val = 0;
@ -29,6 +29,52 @@ static void be_mcc_notify(struct beiscsi_hba *phba)
iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
}
unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
{
unsigned int tag = 0;
unsigned int num = 0;
mcc_tag_rdy:
if (phba->ctrl.mcc_tag_available) {
tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
phba->ctrl.mcc_numtag[tag] = 0;
} else {
udelay(100);
num++;
if (num < mcc_timeout)
goto mcc_tag_rdy;
}
if (tag) {
phba->ctrl.mcc_tag_available--;
if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
phba->ctrl.mcc_alloc_index = 0;
else
phba->ctrl.mcc_alloc_index++;
}
return tag;
}
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
{
spin_lock(&ctrl->mbox_lock);
tag = tag & 0x000000FF;
ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
ctrl->mcc_free_index = 0;
else
ctrl->mcc_free_index++;
ctrl->mcc_tag_available++;
spin_unlock(&ctrl->mbox_lock);
}
bool is_link_state_evt(u32 trailer)
{
return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
ASYNC_TRAILER_EVENT_CODE_MASK) ==
ASYNC_EVENT_CODE_LINK_STATE);
}
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
if (compl->flags != 0) {
@ -64,12 +110,30 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
return 0;
}
static inline bool is_link_state_evt(u32 trailer)
int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
ASYNC_TRAILER_EVENT_CODE_MASK) ==
ASYNC_EVENT_CODE_LINK_STATE);
u16 compl_status, extd_status;
unsigned short tag;
be_dws_le_to_cpu(compl, 4);
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
CQE_STATUS_COMPL_MASK;
/* The ctrl.mcc_numtag[tag] is filled with
* [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
* [7:0] = compl_status
*/
tag = (compl->tag0 & 0x000000FF);
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
ctrl->mcc_numtag[tag] = 0x80000000;
ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
wake_up_interruptible(&ctrl->mcc_wait[tag]);
return 0;
}
static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
@ -89,7 +153,7 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
}
static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
struct be_async_event_link_state *evt)
{
switch (evt->port_link_status) {
@ -97,13 +161,13 @@ static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
evt->physical_port);
phba->state |= BE_ADAPTER_LINK_DOWN;
iscsi_host_for_each_session(phba->shost,
be2iscsi_fail_session);
break;
case ASYNC_EVENT_LINK_UP:
phba->state = BE_ADAPTER_UP;
SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
evt->physical_port);
iscsi_host_for_each_session(phba->shost,
be2iscsi_fail_session);
break;
default:
SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
@ -162,7 +226,6 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct beiscsi_hba *phba)
{
#define mcc_timeout 120000 /* 5s timeout */
int i, status;
for (i = 0; i < mcc_timeout; i++) {
status = beiscsi_process_mcc(phba);
@ -372,9 +435,10 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
BUG_ON(atomic_read(&mccq->used) >= mccq->len);
wrb = queue_head_node(mccq);
memset(wrb, 0, sizeof(*wrb));
wrb->tag0 = (mccq->head & 0x000000FF) << 16;
queue_head_inc(mccq);
atomic_inc(&mccq->used);
memset(wrb, 0, sizeof(*wrb));
return wrb;
}

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -425,14 +425,20 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
int be_poll_mcc(struct be_ctrl_info *ctrl);
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr);
unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
int be_mcc_notify_wait(struct beiscsi_hba *phba);
void be_mcc_notify(struct beiscsi_hba *phba);
unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
struct be_async_event_link_state *evt);
int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl);
int be_mbox_notify(struct be_ctrl_info *ctrl);
@ -448,6 +454,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq);
bool is_link_state_evt(u32 trailer);
struct be_default_pdu_context {
u32 dw[4];
} __packed;

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -101,6 +101,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
struct iscsi_session *sess = cls_session->dd_data;
struct beiscsi_session *beiscsi_sess = sess->dd_data;
SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
pci_pool_destroy(beiscsi_sess->bhs_pool);
iscsi_session_teardown(cls_session);
}
@ -224,6 +225,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
int len = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep) {
SE_DEBUG(DBG_LVL_1,
@ -254,6 +256,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
struct iscsi_session *session = conn->session;
int ret;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
ret = iscsi_set_param(cls_conn, param, buf, buflen);
if (ret)
return ret;
@ -271,8 +274,8 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
conn->max_recv_dlength = 65536;
break;
case ISCSI_PARAM_MAX_BURST:
if (session->first_burst > 262144)
session->first_burst = 262144;
if (session->max_burst > 262144)
session->max_burst = 262144;
break;
default:
return 0;
@ -293,12 +296,41 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
struct be_cmd_resp_get_mac_addr *resp;
struct be_mcc_wrb *wrb;
unsigned int tag, wrb_num;
int len = 0;
unsigned short status, extd_status;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
be_cmd_get_mac_addr(phba, phba->mac_address);
len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
tag = be_cmd_get_mac_addr(phba);
if (!tag) {
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n");
return -1;
} else
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
" status = %d extd_status = %d \n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -1;
} else {
wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag);
resp = embedded_payload(wrb);
memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
len = sysfs_format_mac(buf, phba->mac_address,
ETH_ALEN);
}
break;
default:
return iscsi_host_get_param(shost, param, buf);
@ -378,6 +410,7 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep)
@ -422,8 +455,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_out *ptcpcnct_out;
unsigned short status, extd_status;
unsigned int tag, wrb_num;
int ret = -1;
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
if (beiscsi_ep->ep_cid == 0xFFFF) {
SE_DEBUG(DBG_LVL_1, "No free cid available\n");
@ -431,15 +470,44 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
}
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
beiscsi_ep->ep_cid);
phba->ep_array[beiscsi_ep->ep_cid] = ep;
if (beiscsi_ep->ep_cid >
(phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) {
phba->ep_array[beiscsi_ep->ep_cid -
phba->fw_config.iscsi_cid_start] = ep;
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
phba->params.cxns_per_ctrl * 2)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
return ret;
}
beiscsi_ep->cid_vld = 0;
return mgmt_open_connection(phba, dst_addr, beiscsi_ep);
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
if (!tag) {
SE_DEBUG(DBG_LVL_1,
"mgmt_invalidate_connection Failed for cid=%d \n",
beiscsi_ep->ep_cid);
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
}
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
if (status || extd_status) {
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
" status = %d extd_status = %d \n",
status, extd_status);
free_mcc_tag(&phba->ctrl, tag);
return -1;
} else {
wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag);
ptcpcnct_out = embedded_payload(wrb);
beiscsi_ep = ep->dd_data;
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
beiscsi_ep->cid_vld = 1;
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
}
return 0;
}
/**
@ -459,14 +527,12 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
* beiscsi_free_ep - free endpoint
* @ep: pointer to iscsi endpoint structure
*/
static void beiscsi_free_ep(struct iscsi_endpoint *ep)
static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_ep->phba = NULL;
iscsi_destroy_endpoint(ep);
}
/**
@ -495,9 +561,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
if (phba->state) {
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n");
SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n");
return ERR_PTR(ret);
}
@ -509,9 +575,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
beiscsi_ep = ep->dd_data;
beiscsi_ep->phba = phba;
beiscsi_ep->openiscsi_ep = ep;
if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n");
ret = -ENOMEM;
goto free_ep;
}
@ -519,7 +585,7 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ep;
free_ep:
beiscsi_free_ep(ep);
beiscsi_free_ep(beiscsi_ep);
return ERR_PTR(ret);
}
@ -546,20 +612,22 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
* @ep: The iscsi endpoint
* @flag: The type of connection closure
*/
static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag)
static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
{
int ret = 0;
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
unsigned int tag;
struct beiscsi_hba *phba = beiscsi_ep->phba;
if (MGMT_STATUS_SUCCESS !=
mgmt_upload_connection(phba, beiscsi_ep->ep_cid,
CONNECTION_UPLOAD_GRACEFUL)) {
tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
if (!tag) {
SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
beiscsi_ep->ep_cid);
ret = -1;
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
free_mcc_tag(&phba->ctrl, tag);
}
return ret;
}
@ -574,19 +642,17 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
struct beiscsi_conn *beiscsi_conn;
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_hba *phba;
int flag = 0;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n");
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
beiscsi_ep->ep_cid);
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
beiscsi_close_conn(ep, flag);
}
beiscsi_free_ep(ep);
}
/**
@ -619,23 +685,31 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
struct iscsi_session *session = conn->session;
struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
unsigned int status;
unsigned int tag;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep) {
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
return;
}
status = mgmt_invalidate_connection(phba, beiscsi_ep,
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n",
beiscsi_ep->ep_cid);
tag = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid, 1,
savecfg_flag);
if (status != MGMT_STATUS_SUCCESS) {
if (!tag) {
SE_DEBUG(DBG_LVL_1,
"mgmt_invalidate_connection Failed for cid=%d \n",
beiscsi_ep->ep_cid);
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
free_mcc_tag(&phba->ctrl, tag);
}
beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
beiscsi_free_ep(beiscsi_ep);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_conn_stop(cls_conn, flag);
}

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -40,31 +40,29 @@
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
/* DEVICE ID's for BE2 */
#define BE_DEVICE_ID1 0x212
#define OC_DEVICE_ID1 0x702
#define OC_DEVICE_ID2 0x703
#define OC_DEVICE_ID3 0x712
#define OC_DEVICE_ID4 0x222
#define BE2_MAX_SESSIONS 64
/* DEVICE ID's for BE3 */
#define BE_DEVICE_ID2 0x222
#define OC_DEVICE_ID3 0x712
#define BE2_IO_DEPTH 1024
#define BE2_MAX_SESSIONS 256
#define BE2_CMDS_PER_CXN 128
#define BE2_LOGOUTS BE2_MAX_SESSIONS
#define BE2_TMFS 16
#define BE2_NOPOUT_REQ 16
#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
#define BE2_MAX_ICDS 2048
#define BE2_SGE 32
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
#define BE2_IO_DEPTH \
(BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
#define MAX_CPUS 31
#define BEISCSI_SGLIST_ELEMENTS BE2_SGE
#define BEISCSI_SGLIST_ELEMENTS 30
#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@ -330,6 +328,7 @@ struct beiscsi_hba {
struct workqueue_struct *wq; /* The actuak work queue */
struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
};
struct beiscsi_session {
@ -656,11 +655,12 @@ struct amap_iscsi_wrb {
} __packed;
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
int index);
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
void beiscsi_process_all_cqs(struct work_struct *work);
struct pdu_nop_out {
u32 dw[12];
};
@ -802,7 +802,6 @@ struct hwi_controller {
struct be_ring default_pdu_hdr;
struct be_ring default_pdu_data;
struct hwi_context_memory *phwi_ctxt;
unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
};
enum hwh_type_enum {

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -48,6 +48,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
pfw_cfg->ulp[0].sq_base;
phba->fw_config.iscsi_cid_count =
pfw_cfg->ulp[0].sq_count;
if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
SE_DEBUG(DBG_LVL_8,
"FW reported MAX CXNS as %d \t"
"Max Supported = %d.\n",
phba->fw_config.iscsi_cid_count,
BE2_MAX_SESSIONS);
phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
}
} else {
shost_printk(KERN_WARNING, phba->shost,
"Failed in mgmt_get_fw_config \n");
@ -77,6 +85,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
}
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
req = nonemb_cmd.va;
memset(req, 0, sizeof(*req));
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
@ -140,10 +149,17 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
{
struct be_dma_mem nonemb_cmd;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
struct be_sge *sge = nonembedded_sgl(wrb);
struct be_mcc_wrb *wrb;
struct be_sge *sge;
struct invalidate_commands_params_in *req;
int status = 0;
unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
sizeof(struct invalidate_commands_params_in),
@ -156,8 +172,10 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
req = nonemb_cmd.va;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
memset(req, 0, sizeof(*req));
wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@ -172,14 +190,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
status = be_mcc_notify_wait(phba);
if (status)
SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return status;
return tag;
}
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
@ -189,13 +205,19 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
unsigned short savecfg_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
struct iscsi_invalidate_connection_params_in *req =
embedded_payload(wrb);
int status = 0;
struct be_mcc_wrb *wrb;
struct iscsi_invalidate_connection_params_in *req;
unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
wrb->tag0 |= tag;
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
@ -208,35 +230,37 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
else
req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
req->save_cfg = savecfg_flag;
status = be_mcc_notify_wait(phba);
if (status)
SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return status;
return tag;
}
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid, unsigned int upload_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
struct tcp_upload_params_in *req = embedded_payload(wrb);
int status = 0;
struct be_mcc_wrb *wrb;
struct tcp_upload_params_in *req;
unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req->id = (unsigned short)cid;
req->upload_type = (unsigned char)upload_flag;
status = be_mcc_notify_wait(phba);
if (status)
SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return status;
return tag;
}
int mgmt_open_connection(struct beiscsi_hba *phba,
@ -248,13 +272,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_in *req;
unsigned short def_hdr_id;
unsigned short def_data_id;
struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address;
int status = 0;
unsigned int tag = 0;
unsigned int i;
unsigned short cid = beiscsi_ep->ep_cid;
@ -266,7 +290,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@ -311,46 +342,36 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
req->do_offload = 1;
req->dataout_template_pa.lo = ptemplate_address->lo;
req->dataout_template_pa.hi = ptemplate_address->hi;
status = be_mcc_notify_wait(phba);
if (!status) {
struct iscsi_endpoint *ep;
struct tcp_connect_and_offload_out *ptcpcnct_out =
embedded_payload(wrb);
ep = phba->ep_array[ptcpcnct_out->cid];
beiscsi_ep = ep->dd_data;
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
beiscsi_ep->cid_vld = 1;
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
} else
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return status;
return tag;
}
int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr)
unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
int status;
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_addr *req;
unsigned int tag = 0;
SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
tag = alloc_mcc_tag(phba);
if (!tag) {
spin_unlock(&ctrl->mbox_lock);
return tag;
}
wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
sizeof(*req));
status = be_mcc_notify_wait(phba);
if (!status) {
struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
memcpy(mac_addr, resp->mac_address, ETH_ALEN);
}
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
return status;
return tag;
}

Просмотреть файл

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2009 ServerEngines
* Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -231,6 +231,7 @@ struct beiscsi_endpoint {
struct beiscsi_hba *phba;
struct beiscsi_sess *sess;
struct beiscsi_conn *conn;
struct iscsi_endpoint *openiscsi_ep;
unsigned short ip_type;
char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
unsigned long dst_addr;
@ -249,7 +250,4 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
unsigned short issue_reset,
unsigned short savecfg_flag);
unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba,
char *buf, unsigned int len);
#endif

Просмотреть файл

@ -1426,8 +1426,8 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
break;
case ISCSI_PARAM_CONN_ADDRESS:
if (bnx2i_conn->ep)
len = sprintf(buf, NIPQUAD_FMT "\n",
NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
len = sprintf(buf, "%pI4\n",
&bnx2i_conn->ep->cm_sk->dst_ip);
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
@ -1990,6 +1990,7 @@ static struct scsi_host_template bnx2i_host_template = {
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_target_reset,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
.cmd_per_lun = 32,

Просмотреть файл

@ -219,18 +219,15 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
break;
}
sa = (cdbp[8] << 8) + cdbp[9];
name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
if (name) {
name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa);
if (name)
printk("%s", name);
if ((cdb_len > 0) && (len != cdb_len))
printk(", in_cdb_len=%d, ext_len=%d",
len, cdb_len);
} else {
else
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
if ((cdb_len > 0) && (len != cdb_len))
printk(", in_cdb_len=%d, ext_len=%d",
len, cdb_len);
}
if ((cdb_len > 0) && (len != cdb_len))
printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
break;
case MAINTENANCE_IN:
sa = cdbp[1] & 0x1f;
@ -349,6 +346,9 @@ void scsi_print_command(struct scsi_cmnd *cmd)
{
int k;
if (cmd->cmnd == NULL)
return;
scmd_printk(KERN_INFO, cmd, "CDB: ");
print_opcode_name(cmd->cmnd, cmd->cmd_len);

Просмотреть файл

@ -591,8 +591,7 @@ static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
cxgb3i_conn_max_recv_dlength(conn);
spin_lock_bh(&conn->session->lock);
sprintf(conn->portal_address, NIPQUAD_FMT,
NIPQUAD(c3cn->daddr.sin_addr.s_addr));
sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
conn->portal_port = ntohs(c3cn->daddr.sin_port);
spin_unlock_bh(&conn->session->lock);
@ -709,6 +708,12 @@ static int cxgb3i_host_set_param(struct Scsi_Host *shost,
{
struct cxgb3i_hba *hba = iscsi_host_priv(shost);
if (!hba->ndev) {
shost_printk(KERN_ERR, shost, "Could not set host param. "
"Netdev for host not set.\n");
return -ENODEV;
}
cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
switch (param) {
@ -739,6 +744,12 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
struct cxgb3i_hba *hba = iscsi_host_priv(shost);
int len = 0;
if (!hba->ndev) {
shost_printk(KERN_ERR, shost, "Could not set host param. "
"Netdev for host not set.\n");
return -ENODEV;
}
cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
switch (param) {
@ -753,7 +764,7 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
__be32 addr;
addr = cxgb3i_get_private_ipv4addr(hba->ndev);
len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
len = sprintf(buf, "%pI4", &addr);
break;
}
default:

Просмотреть файл

@ -1675,10 +1675,11 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
} else
c3cn->saddr.sin_addr.s_addr = sipv4;
c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
c3cn,
&c3cn->saddr.sin_addr.s_addr,
ntohs(c3cn->saddr.sin_port),
NIPQUAD(c3cn->daddr.sin_addr.s_addr),
&c3cn->daddr.sin_addr.s_addr,
ntohs(c3cn->daddr.sin_port));
c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);

Просмотреть файл

@ -461,10 +461,8 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
skb = skb_peek(&c3cn->receive_queue);
}
read_unlock(&c3cn->callback_lock);
if (c3cn) {
c3cn->copied_seq += read;
cxgb3i_c3cn_rx_credits(c3cn, read);
}
c3cn->copied_seq += read;
cxgb3i_c3cn_rx_credits(c3cn, read);
conn->rxdata_octets += read;
if (err) {

Просмотреть файл

@ -717,6 +717,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
{"IBM", "2145" },
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
{"NETAPP", "LUN"},
{"AIX", "NVDISK"},
{NULL, NULL}
};

Просмотреть файл

@ -1509,7 +1509,7 @@ static int option_setup(char *str)
char *cur = str;
int i = 1;
while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) {
while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL)

Просмотреть файл

@ -1449,9 +1449,6 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
if (offset > 15)
goto do_reject;
if (esp->flags & ESP_FLAG_DISABLE_SYNC)
offset = 0;
if (offset) {
int one_clock;
@ -2405,12 +2402,6 @@ static int esp_slave_configure(struct scsi_device *dev)
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
/* Bypass async domain validation */
dev->ppr = 0;
dev->sdtr = 0;
}
goal_tags = 0;
if (dev->tagged_supported) {
@ -2660,7 +2651,10 @@ static void esp_set_offset(struct scsi_target *target, int offset)
struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_offset = offset;
if (esp->flags & ESP_FLAG_DISABLE_SYNC)
tp->nego_goal_offset = 0;
else
tp->nego_goal_offset = offset;
tp->flags |= ESP_TGT_CHECK_NEGO;
}

Просмотреть файл

@ -36,7 +36,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
#define DRV_VERSION "1.0.0.1121"
#define DRV_VERSION "1.4.0.98"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "

Просмотреть файл

@ -620,6 +620,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
shost_printk(KERN_INFO, fnic->lport->host,
"firmware supports FIP\n");
/* enable directed and multicast */
vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
} else {
@ -698,6 +700,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
goto err_out_remove_scsi_host;
}
fc_lport_init_stats(lp);
fc_lport_config(lp);
if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +

Просмотреть файл

@ -94,7 +94,7 @@ enum vnic_devcmd_cmd {
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
/* hang detection notification */
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -32,109 +32,101 @@
#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
#endif
/* typedefs */
#ifdef __KERNEL__
typedef u32 ulong32;
typedef u64 ulong64;
#endif
#define PACKED __attribute__((packed))
/* scatter/gather element */
typedef struct {
ulong32 sg_ptr; /* address */
ulong32 sg_len; /* length */
} PACKED gdth_sg_str;
u32 sg_ptr; /* address */
u32 sg_len; /* length */
} __attribute__((packed)) gdth_sg_str;
/* scatter/gather element - 64bit addresses */
typedef struct {
ulong64 sg_ptr; /* address */
ulong32 sg_len; /* length */
} PACKED gdth_sg64_str;
u64 sg_ptr; /* address */
u32 sg_len; /* length */
} __attribute__((packed)) gdth_sg64_str;
/* command structure */
typedef struct {
ulong32 BoardNode; /* board node (always 0) */
ulong32 CommandIndex; /* command number */
ushort OpCode; /* the command (READ,..) */
u32 BoardNode; /* board node (always 0) */
u32 CommandIndex; /* command number */
u16 OpCode; /* the command (READ,..) */
union {
struct {
ushort DeviceNo; /* number of cache drive */
ulong32 BlockNo; /* block number */
ulong32 BlockCnt; /* block count */
ulong32 DestAddr; /* dest. addr. (if s/g: -1) */
ulong32 sg_canz; /* s/g element count */
u16 DeviceNo; /* number of cache drive */
u32 BlockNo; /* block number */
u32 BlockCnt; /* block count */
u32 DestAddr; /* dest. addr. (if s/g: -1) */
u32 sg_canz; /* s/g element count */
gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED cache; /* cache service cmd. str. */
} __attribute__((packed)) cache; /* cache service cmd. str. */
struct {
ushort DeviceNo; /* number of cache drive */
ulong64 BlockNo; /* block number */
ulong32 BlockCnt; /* block count */
ulong64 DestAddr; /* dest. addr. (if s/g: -1) */
ulong32 sg_canz; /* s/g element count */
u16 DeviceNo; /* number of cache drive */
u64 BlockNo; /* block number */
u32 BlockCnt; /* block count */
u64 DestAddr; /* dest. addr. (if s/g: -1) */
u32 sg_canz; /* s/g element count */
gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED cache64; /* cache service cmd. str. */
} __attribute__((packed)) cache64; /* cache service cmd. str. */
struct {
ushort param_size; /* size of p_param buffer */
ulong32 subfunc; /* IOCTL function */
ulong32 channel; /* device */
ulong64 p_param; /* buffer */
} PACKED ioctl; /* IOCTL command structure */
u16 param_size; /* size of p_param buffer */
u32 subfunc; /* IOCTL function */
u32 channel; /* device */
u64 p_param; /* buffer */
} __attribute__((packed)) ioctl; /* IOCTL command structure */
struct {
ushort reserved;
u16 reserved;
union {
struct {
ulong32 msg_handle; /* message handle */
ulong64 msg_addr; /* message buffer address */
} PACKED msg;
unchar data[12]; /* buffer for rtc data, ... */
u32 msg_handle; /* message handle */
u64 msg_addr; /* message buffer address */
} __attribute__((packed)) msg;
u8 data[12]; /* buffer for rtc data, ... */
} su;
} PACKED screen; /* screen service cmd. str. */
} __attribute__((packed)) screen; /* screen service cmd. str. */
struct {
ushort reserved;
ulong32 direction; /* data direction */
ulong32 mdisc_time; /* disc. time (0: no timeout)*/
ulong32 mcon_time; /* connect time(0: no to.) */
ulong32 sdata; /* dest. addr. (if s/g: -1) */
ulong32 sdlen; /* data length (bytes) */
ulong32 clen; /* SCSI cmd. length(6,10,12) */
unchar cmd[12]; /* SCSI command */
unchar target; /* target ID */
unchar lun; /* LUN */
unchar bus; /* SCSI bus number */
unchar priority; /* only 0 used */
ulong32 sense_len; /* sense data length */
ulong32 sense_data; /* sense data addr. */
ulong32 link_p; /* linked cmds (not supp.) */
ulong32 sg_ranz; /* s/g element count */
u16 reserved;
u32 direction; /* data direction */
u32 mdisc_time; /* disc. time (0: no timeout)*/
u32 mcon_time; /* connect time(0: no to.) */
u32 sdata; /* dest. addr. (if s/g: -1) */
u32 sdlen; /* data length (bytes) */
u32 clen; /* SCSI cmd. length(6,10,12) */
u8 cmd[12]; /* SCSI command */
u8 target; /* target ID */
u8 lun; /* LUN */
u8 bus; /* SCSI bus number */
u8 priority; /* only 0 used */
u32 sense_len; /* sense data length */
u32 sense_data; /* sense data addr. */
u32 link_p; /* linked cmds (not supp.) */
u32 sg_ranz; /* s/g element count */
gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED raw; /* raw service cmd. struct. */
} __attribute__((packed)) raw; /* raw service cmd. struct. */
struct {
ushort reserved;
ulong32 direction; /* data direction */
ulong32 mdisc_time; /* disc. time (0: no timeout)*/
ulong32 mcon_time; /* connect time(0: no to.) */
ulong64 sdata; /* dest. addr. (if s/g: -1) */
ulong32 sdlen; /* data length (bytes) */
ulong32 clen; /* SCSI cmd. length(6,..,16) */
unchar cmd[16]; /* SCSI command */
unchar target; /* target ID */
unchar lun; /* LUN */
unchar bus; /* SCSI bus number */
unchar priority; /* only 0 used */
ulong32 sense_len; /* sense data length */
ulong64 sense_data; /* sense data addr. */
ulong32 sg_ranz; /* s/g element count */
u16 reserved;
u32 direction; /* data direction */
u32 mdisc_time; /* disc. time (0: no timeout)*/
u32 mcon_time; /* connect time(0: no to.) */
u64 sdata; /* dest. addr. (if s/g: -1) */
u32 sdlen; /* data length (bytes) */
u32 clen; /* SCSI cmd. length(6,..,16) */
u8 cmd[16]; /* SCSI command */
u8 target; /* target ID */
u8 lun; /* LUN */
u8 bus; /* SCSI bus number */
u8 priority; /* only 0 used */
u32 sense_len; /* sense data length */
u64 sense_data; /* sense data addr. */
u32 sg_ranz; /* s/g element count */
gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
} PACKED raw64; /* raw service cmd. struct. */
} __attribute__((packed)) raw64; /* raw service cmd. struct. */
} u;
/* additional variables */
unchar Service; /* controller service */
unchar reserved;
ushort Status; /* command result */
ulong32 Info; /* additional information */
u8 Service; /* controller service */
u8 reserved;
u16 Status; /* command result */
u32 Info; /* additional information */
void *RequestBuffer; /* request buffer */
} PACKED gdth_cmd_str;
} __attribute__((packed)) gdth_cmd_str;
/* controller event structure */
#define ES_ASYNC 1
@ -142,129 +134,129 @@ typedef struct {
#define ES_TEST 3
#define ES_SYNC 4
typedef struct {
ushort size; /* size of structure */
u16 size; /* size of structure */
union {
char stream[16];
struct {
ushort ionode;
ushort service;
ulong32 index;
} PACKED driver;
u16 ionode;
u16 service;
u32 index;
} __attribute__((packed)) driver;
struct {
ushort ionode;
ushort service;
ushort status;
ulong32 info;
unchar scsi_coord[3];
} PACKED async;
u16 ionode;
u16 service;
u16 status;
u32 info;
u8 scsi_coord[3];
} __attribute__((packed)) async;
struct {
ushort ionode;
ushort service;
ushort status;
ulong32 info;
ushort hostdrive;
unchar scsi_coord[3];
unchar sense_key;
} PACKED sync;
u16 ionode;
u16 service;
u16 status;
u32 info;
u16 hostdrive;
u8 scsi_coord[3];
u8 sense_key;
} __attribute__((packed)) sync;
struct {
ulong32 l1, l2, l3, l4;
} PACKED test;
u32 l1, l2, l3, l4;
} __attribute__((packed)) test;
} eu;
ulong32 severity;
unchar event_string[256];
} PACKED gdth_evt_data;
u32 severity;
u8 event_string[256];
} __attribute__((packed)) gdth_evt_data;
typedef struct {
ulong32 first_stamp;
ulong32 last_stamp;
ushort same_count;
ushort event_source;
ushort event_idx;
unchar application;
unchar reserved;
u32 first_stamp;
u32 last_stamp;
u16 same_count;
u16 event_source;
u16 event_idx;
u8 application;
u8 reserved;
gdth_evt_data event_data;
} PACKED gdth_evt_str;
} __attribute__((packed)) gdth_evt_str;
#ifdef GDTH_IOCTL_PROC
/* IOCTL structure (write) */
typedef struct {
ulong32 magic; /* IOCTL magic */
ushort ioctl; /* IOCTL */
ushort ionode; /* controller number */
ushort service; /* controller service */
ushort timeout; /* timeout */
u32 magic; /* IOCTL magic */
u16 ioctl; /* IOCTL */
u16 ionode; /* controller number */
u16 service; /* controller service */
u16 timeout; /* timeout */
union {
struct {
unchar command[512]; /* controller command */
unchar data[1]; /* add. data */
u8 command[512]; /* controller command */
u8 data[1]; /* add. data */
} general;
struct {
unchar lock; /* lock/unlock */
unchar drive_cnt; /* drive count */
ushort drives[MAX_HDRIVES];/* drives */
u8 lock; /* lock/unlock */
u8 drive_cnt; /* drive count */
u16 drives[MAX_HDRIVES];/* drives */
} lockdrv;
struct {
unchar lock; /* lock/unlock */
unchar channel; /* channel */
u8 lock; /* lock/unlock */
u8 channel; /* channel */
} lockchn;
struct {
int erase; /* erase event ? */
int handle;
unchar evt[EVENT_SIZE]; /* event structure */
u8 evt[EVENT_SIZE]; /* event structure */
} event;
struct {
unchar bus; /* SCSI bus */
unchar target; /* target ID */
unchar lun; /* LUN */
unchar cmd_len; /* command length */
unchar cmd[12]; /* SCSI command */
u8 bus; /* SCSI bus */
u8 target; /* target ID */
u8 lun; /* LUN */
u8 cmd_len; /* command length */
u8 cmd[12]; /* SCSI command */
} scsi;
struct {
ushort hdr_no; /* host drive number */
unchar flag; /* old meth./add/remove */
u16 hdr_no; /* host drive number */
u8 flag; /* old meth./add/remove */
} rescan;
} iu;
} gdth_iowr_str;
/* IOCTL structure (read) */
typedef struct {
ulong32 size; /* buffer size */
ulong32 status; /* IOCTL error code */
u32 size; /* buffer size */
u32 status; /* IOCTL error code */
union {
struct {
unchar data[1]; /* data */
u8 data[1]; /* data */
} general;
struct {
ushort version; /* driver version */
u16 version; /* driver version */
} drvers;
struct {
unchar type; /* controller type */
ushort info; /* slot etc. */
ushort oem_id; /* OEM ID */
ushort bios_ver; /* not used */
ushort access; /* not used */
ushort ext_type; /* extended type */
ushort device_id; /* device ID */
ushort sub_device_id; /* sub device ID */
u8 type; /* controller type */
u16 info; /* slot etc. */
u16 oem_id; /* OEM ID */
u16 bios_ver; /* not used */
u16 access; /* not used */
u16 ext_type; /* extended type */
u16 device_id; /* device ID */
u16 sub_device_id; /* sub device ID */
} ctrtype;
struct {
unchar version; /* OS version */
unchar subversion; /* OS subversion */
ushort revision; /* revision */
u8 version; /* OS version */
u8 subversion; /* OS subversion */
u16 revision; /* revision */
} osvers;
struct {
ushort count; /* controller count */
u16 count; /* controller count */
} ctrcnt;
struct {
int handle;
unchar evt[EVENT_SIZE]; /* event structure */
u8 evt[EVENT_SIZE]; /* event structure */
} event;
struct {
unchar bus; /* SCSI bus, 0xff: invalid */
unchar target; /* target ID */
unchar lun; /* LUN */
unchar cluster_type; /* cluster properties */
u8 bus; /* SCSI bus, 0xff: invalid */
u8 target; /* target ID */
u8 lun; /* LUN */
u8 cluster_type; /* cluster properties */
} hdr_list[MAX_HDRIVES]; /* index is host drive number */
} iu;
} gdth_iord_str;
@ -272,53 +264,53 @@ typedef struct {
/* GDTIOCTL_GENERAL */
typedef struct {
ushort ionode; /* controller number */
ushort timeout; /* timeout */
ulong32 info; /* error info */
ushort status; /* status */
ulong data_len; /* data buffer size */
ulong sense_len; /* sense buffer size */
u16 ionode; /* controller number */
u16 timeout; /* timeout */
u32 info; /* error info */
u16 status; /* status */
unsigned long data_len; /* data buffer size */
unsigned long sense_len; /* sense buffer size */
gdth_cmd_str command; /* command */
} gdth_ioctl_general;
/* GDTIOCTL_LOCKDRV */
typedef struct {
ushort ionode; /* controller number */
unchar lock; /* lock/unlock */
unchar drive_cnt; /* drive count */
ushort drives[MAX_HDRIVES]; /* drives */
u16 ionode; /* controller number */
u8 lock; /* lock/unlock */
u8 drive_cnt; /* drive count */
u16 drives[MAX_HDRIVES]; /* drives */
} gdth_ioctl_lockdrv;
/* GDTIOCTL_LOCKCHN */
typedef struct {
ushort ionode; /* controller number */
unchar lock; /* lock/unlock */
unchar channel; /* channel */
u16 ionode; /* controller number */
u8 lock; /* lock/unlock */
u8 channel; /* channel */
} gdth_ioctl_lockchn;
/* GDTIOCTL_OSVERS */
typedef struct {
unchar version; /* OS version */
unchar subversion; /* OS subversion */
ushort revision; /* revision */
u8 version; /* OS version */
u8 subversion; /* OS subversion */
u16 revision; /* revision */
} gdth_ioctl_osvers;
/* GDTIOCTL_CTRTYPE */
typedef struct {
ushort ionode; /* controller number */
unchar type; /* controller type */
ushort info; /* slot etc. */
ushort oem_id; /* OEM ID */
ushort bios_ver; /* not used */
ushort access; /* not used */
ushort ext_type; /* extended type */
ushort device_id; /* device ID */
ushort sub_device_id; /* sub device ID */
u16 ionode; /* controller number */
u8 type; /* controller type */
u16 info; /* slot etc. */
u16 oem_id; /* OEM ID */
u16 bios_ver; /* not used */
u16 access; /* not used */
u16 ext_type; /* extended type */
u16 device_id; /* device ID */
u16 sub_device_id; /* sub device ID */
} gdth_ioctl_ctrtype;
/* GDTIOCTL_EVENT */
typedef struct {
ushort ionode;
u16 ionode;
int erase; /* erase event? */
int handle; /* event handle */
gdth_evt_str event;
@ -326,22 +318,22 @@ typedef struct {
/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
typedef struct {
ushort ionode; /* controller number */
unchar flag; /* add/remove */
ushort hdr_no; /* drive no. */
u16 ionode; /* controller number */
u8 flag; /* add/remove */
u16 hdr_no; /* drive no. */
struct {
unchar bus; /* SCSI bus */
unchar target; /* target ID */
unchar lun; /* LUN */
unchar cluster_type; /* cluster properties */
u8 bus; /* SCSI bus */
u8 target; /* target ID */
u8 lun; /* LUN */
u8 cluster_type; /* cluster properties */
} hdr_list[MAX_HDRIVES]; /* index is host drive number */
} gdth_ioctl_rescan;
/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
typedef struct {
ushort ionode; /* controller number */
ushort number; /* bus/host drive number */
ushort status; /* status */
u16 ionode; /* controller number */
u16 number; /* bus/host drive number */
u16 status; /* status */
} gdth_ioctl_reset;
#endif

Просмотреть файл

@ -43,7 +43,7 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int i, found;
gdth_cmd_str gdtcmd;
gdth_cpar_str *pcpar;
ulong64 paddr;
u64 paddr;
char cmnd[MAX_COMMAND_SIZE];
memset(cmnd, 0xff, 12);
@ -156,8 +156,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
off_t begin = 0,pos = 0;
int id, i, j, k, sec, flag;
int no_mdrv = 0, drv_no, is_mirr;
ulong32 cnt;
ulong64 paddr;
u32 cnt;
u64 paddr;
int rc = -ENOMEM;
gdth_cmd_str *gdtcmd;
@ -220,14 +220,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
if (ha->more_proc)
sprintf(hrec, "%d.%02d.%02d-%c%03X",
(unchar)(ha->binfo.upd_fw_ver>>24),
(unchar)(ha->binfo.upd_fw_ver>>16),
(unchar)(ha->binfo.upd_fw_ver),
(u8)(ha->binfo.upd_fw_ver>>24),
(u8)(ha->binfo.upd_fw_ver>>16),
(u8)(ha->binfo.upd_fw_ver),
ha->bfeat.raid ? 'R':'N',
ha->binfo.upd_revision);
else
sprintf(hrec, "%d.%02d", (unchar)(ha->cpar.version>>8),
(unchar)(ha->cpar.version));
sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8),
(u8)(ha->cpar.version));
size = sprintf(buffer+len,
" Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n",
@ -281,7 +281,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
pds->bid = ha->raw[i].local_no;
pds->first = 0;
pds->entries = ha->raw[i].pdev_cnt;
cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(ulong32)) /
cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
sizeof(pds->list[0]);
if (pds->entries > cnt)
pds->entries = cnt;
@ -604,7 +604,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
size = sprintf(buffer+len,
" Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
(ulong32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
(u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
len += size; pos = begin + len;
if (pos < offset) {
len = 0;
@ -664,9 +664,9 @@ free_fail:
}
static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
ulong64 *paddr)
u64 *paddr)
{
ulong flags;
unsigned long flags;
char *ret_val;
if (size == 0)
@ -691,9 +691,9 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
return ret_val;
}
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
{
ulong flags;
unsigned long flags;
if (buf == ha->pscratch) {
spin_lock_irqsave(&ha->smp_lock, flags);
@ -705,16 +705,16 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
}
#ifdef GDTH_IOCTL_PROC
static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
{
ulong flags;
unsigned long flags;
int ret_val;
spin_lock_irqsave(&ha->smp_lock, flags);
ret_val = FALSE;
if (ha->scratch_busy) {
if (((gdth_iord_str *)ha->pscratch)->size == (ulong32)size)
if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
ret_val = TRUE;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
@ -724,11 +724,11 @@ static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
{
ulong flags;
unsigned long flags;
int i;
Scsi_Cmnd *scp;
struct gdth_cmndinfo *cmndinfo;
unchar b, t;
u8 b, t;
spin_lock_irqsave(&ha->smp_lock, flags);
@ -738,8 +738,8 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
b = scp->device->channel;
t = scp->device->id;
if (!SPECIAL_SCP(scp) && t == (unchar)id &&
b == (unchar)busnum) {
if (!SPECIAL_SCP(scp) && t == (u8)id &&
b == (u8)busnum) {
cmndinfo->wait_for_completion = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
while (!cmndinfo->wait_for_completion)

Просмотреть файл

@ -17,8 +17,8 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int length, gdth_ha_str *ha);
static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
ulong64 *paddr);
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
u64 *paddr);
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -33,7 +33,7 @@ struct access_method {
struct CommandList *c);
void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
unsigned long (*fifo_full)(struct ctlr_info *h);
unsigned long (*intr_pending)(struct ctlr_info *h);
bool (*intr_pending)(struct ctlr_info *h);
unsigned long (*command_completed)(struct ctlr_info *h);
};
@ -55,19 +55,20 @@ struct ctlr_info {
char *product_name;
char firm_ver[4]; /* Firmware version */
struct pci_dev *pdev;
__u32 board_id;
u32 board_id;
void __iomem *vaddr;
unsigned long paddr;
int nr_cmds; /* Number of commands allowed on this controller */
struct CfgTable __iomem *cfgtable;
int max_sg_entries;
int interrupts_enabled;
int major;
int max_commands;
int commands_outstanding;
int max_outstanding; /* Debug */
int usage_count; /* number of opens all all minor devices */
# define DOORBELL_INT 0
# define PERF_MODE_INT 1
# define PERF_MODE_INT 0
# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
unsigned int intr[4];
@ -93,6 +94,9 @@ struct ctlr_info {
int nr_frees;
int busy_initializing;
int busy_scanning;
int scan_finished;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
struct mutex busy_shutting_down;
struct list_head scan_list;
struct completion scan_wait;
@ -102,6 +106,24 @@ struct ctlr_info {
int ndevices; /* number of used elements in .dev[] array. */
#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
/*
* Performant mode tables.
*/
u32 trans_support;
u32 trans_offset;
struct TransTable_struct *transtable;
unsigned long transMethod;
/*
* Performant mode completion buffer
*/
u64 *reply_pool;
dma_addr_t reply_pool_dhandle;
u64 *reply_pool_head;
size_t reply_pool_size;
unsigned char reply_pool_wraparound;
u32 *blockFetchTable;
unsigned char *hba_inquiry_data;
};
#define HPSA_ABORT_MSG 0
#define HPSA_DEVICE_RESET_MSG 1
@ -164,9 +186,16 @@ struct ctlr_info {
#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
#define HPSA_ERROR_BIT 0x02
#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
/* Performant mode flags */
#define SA5_PERF_INTR_PENDING 0x04
#define SA5_PERF_INTR_OFF 0x05
#define SA5_OUTDB_STATUS_PERF_BIT 0x01
#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
#define SA5_OUTDB_CLEAR 0xA0
#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
#define SA5_OUTDB_STATUS 0x9C
#define HPSA_INTR_ON 1
#define HPSA_INTR_OFF 0
@ -176,10 +205,8 @@ struct ctlr_info {
static void SA5_submit_command(struct ctlr_info *h,
struct CommandList *c)
{
#ifdef HPSA_DEBUG
printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
c->busaddr);
#endif /* HPSA_DEBUG */
dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
c->Header.Tag.lower);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
h->commands_outstanding++;
if (h->commands_outstanding > h->max_outstanding)
@ -202,6 +229,52 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
}
}
static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
{
if (val) { /* turn on interrupts */
h->interrupts_enabled = 1;
writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
} else {
h->interrupts_enabled = 0;
writel(SA5_PERF_INTR_OFF,
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
}
}
static unsigned long SA5_performant_completed(struct ctlr_info *h)
{
unsigned long register_value = FIFO_EMPTY;
/* flush the controller write of the reply queue by reading
* outbound doorbell status register.
*/
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
/* msi auto clears the interrupt pending bit. */
if (!(h->msi_vector || h->msix_vector)) {
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
/* Do a read in order to flush the write to the controller
* (as per spec.)
*/
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
}
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
register_value = *(h->reply_pool_head);
(h->reply_pool_head)++;
h->commands_outstanding--;
} else {
register_value = FIFO_EMPTY;
}
/* Check for wraparound */
if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
h->reply_pool_head = h->reply_pool;
h->reply_pool_wraparound ^= 1;
}
return register_value;
}
/*
* Returns true if fifo is full.
*
@ -228,10 +301,10 @@ static unsigned long SA5_completed(struct ctlr_info *h)
#ifdef HPSA_DEBUG
if (register_value != FIFO_EMPTY)
printk(KERN_INFO "hpsa: Read %lx back from board\n",
dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
register_value);
else
printk(KERN_INFO "hpsa: FIFO Empty read\n");
dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
#endif
return register_value;
@ -239,18 +312,28 @@ static unsigned long SA5_completed(struct ctlr_info *h)
/*
* Returns true if an interrupt is pending..
*/
static unsigned long SA5_intr_pending(struct ctlr_info *h)
static bool SA5_intr_pending(struct ctlr_info *h)
{
unsigned long register_value =
readl(h->vaddr + SA5_INTR_STATUS);
#ifdef HPSA_DEBUG
printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
#endif /* HPSA_DEBUG */
if (register_value & SA5_INTR_PENDING)
return 1;
return 0 ;
dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
return register_value & SA5_INTR_PENDING;
}
static bool SA5_performant_intr_pending(struct ctlr_info *h)
{
unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
if (!register_value)
return false;
if (h->msi_vector || h->msix_vector)
return true;
/* Read outbound doorbell to flush */
register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
return register_value & SA5_OUTDB_STATUS_PERF_BIT;
}
static struct access_method SA5_access = {
SA5_submit_command,
@ -260,14 +343,19 @@ static struct access_method SA5_access = {
SA5_completed,
};
static struct access_method SA5_performant_access = {
SA5_submit_command,
SA5_performant_intr_mask,
SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
struct board_type {
__u32 board_id;
u32 board_id;
char *product_name;
struct access_method *access;
};
/* end of old hpsa_scsi.h file */
#endif /* HPSA_H */

Просмотреть файл

@ -101,19 +101,20 @@
#define CFGTBL_AccCmds 0x00000001l
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
#define CFGTBL_BusType_Fibre1G 0x00000100l
#define CFGTBL_BusType_Fibre2G 0x00000200l
struct vals32 {
__u32 lower;
__u32 upper;
u32 lower;
u32 upper;
};
union u64bit {
struct vals32 val32;
__u64 val;
u64 val;
};
/* FIXME this is a per controller value (barf!) */
@ -126,34 +127,34 @@ union u64bit {
#define HPSA_INQUIRY 0x12
struct InquiryData {
__u8 data_byte[36];
u8 data_byte[36];
};
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
struct ReportLUNdata {
__u8 LUNListLength[4];
__u32 reserved;
__u8 LUN[HPSA_MAX_LUN][8];
u8 LUNListLength[4];
u32 reserved;
u8 LUN[HPSA_MAX_LUN][8];
};
struct ReportExtendedLUNdata {
__u8 LUNListLength[4];
__u8 extended_response_flag;
__u8 reserved[3];
__u8 LUN[HPSA_MAX_LUN][24];
u8 LUNListLength[4];
u8 extended_response_flag;
u8 reserved[3];
u8 LUN[HPSA_MAX_LUN][24];
};
struct SenseSubsystem_info {
__u8 reserved[36];
__u8 portname[8];
__u8 reserved1[1108];
u8 reserved[36];
u8 portname[8];
u8 reserved1[1108];
};
#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
struct ReadCapdata {
__u8 total_size[4]; /* Total size in blocks */
__u8 block_size[4]; /* Size of blocks in bytes */
u8 total_size[4]; /* Total size in blocks */
u8 block_size[4]; /* Size of blocks in bytes */
};
#if 0
@ -174,112 +175,131 @@ struct ReadCapdata {
/* Command List Structure */
union SCSI3Addr {
struct {
__u8 Dev;
__u8 Bus:6;
__u8 Mode:2; /* b00 */
u8 Dev;
u8 Bus:6;
u8 Mode:2; /* b00 */
} PeripDev;
struct {
__u8 DevLSB;
__u8 DevMSB:6;
__u8 Mode:2; /* b01 */
u8 DevLSB;
u8 DevMSB:6;
u8 Mode:2; /* b01 */
} LogDev;
struct {
__u8 Dev:5;
__u8 Bus:3;
__u8 Targ:6;
__u8 Mode:2; /* b10 */
u8 Dev:5;
u8 Bus:3;
u8 Targ:6;
u8 Mode:2; /* b10 */
} LogUnit;
};
struct PhysDevAddr {
__u32 TargetId:24;
__u32 Bus:6;
__u32 Mode:2;
u32 TargetId:24;
u32 Bus:6;
u32 Mode:2;
/* 2 level target device addr */
union SCSI3Addr Target[2];
};
struct LogDevAddr {
__u32 VolId:30;
__u32 Mode:2;
__u8 reserved[4];
u32 VolId:30;
u32 Mode:2;
u8 reserved[4];
};
union LUNAddr {
__u8 LunAddrBytes[8];
u8 LunAddrBytes[8];
union SCSI3Addr SCSI3Lun[4];
struct PhysDevAddr PhysDev;
struct LogDevAddr LogDev;
};
struct CommandListHeader {
__u8 ReplyQueue;
__u8 SGList;
__u16 SGTotal;
u8 ReplyQueue;
u8 SGList;
u16 SGTotal;
struct vals32 Tag;
union LUNAddr LUN;
};
struct RequestBlock {
__u8 CDBLen;
u8 CDBLen;
struct {
__u8 Type:3;
__u8 Attribute:3;
__u8 Direction:2;
u8 Type:3;
u8 Attribute:3;
u8 Direction:2;
} Type;
__u16 Timeout;
__u8 CDB[16];
u16 Timeout;
u8 CDB[16];
};
struct ErrDescriptor {
struct vals32 Addr;
__u32 Len;
u32 Len;
};
struct SGDescriptor {
struct vals32 Addr;
__u32 Len;
__u32 Ext;
u32 Len;
u32 Ext;
};
union MoreErrInfo {
struct {
__u8 Reserved[3];
__u8 Type;
__u32 ErrorInfo;
u8 Reserved[3];
u8 Type;
u32 ErrorInfo;
} Common_Info;
struct {
__u8 Reserved[2];
__u8 offense_size; /* size of offending entry */
__u8 offense_num; /* byte # of offense 0-base */
__u32 offense_value;
u8 Reserved[2];
u8 offense_size; /* size of offending entry */
u8 offense_num; /* byte # of offense 0-base */
u32 offense_value;
} Invalid_Cmd;
};
struct ErrorInfo {
__u8 ScsiStatus;
__u8 SenseLen;
__u16 CommandStatus;
__u32 ResidualCnt;
u8 ScsiStatus;
u8 SenseLen;
u16 CommandStatus;
u32 ResidualCnt;
union MoreErrInfo MoreErrInfo;
__u8 SenseInfo[SENSEINFOBYTES];
u8 SenseInfo[SENSEINFOBYTES];
};
/* Command types */
#define CMD_IOCTL_PEND 0x01
#define CMD_SCSI 0x03
struct ctlr_info; /* defined in hpsa.h */
/* The size of this structure needs to be divisible by 8
* od on all architectures, because the controller uses 2
* lower bits of the address, and the driver uses 1 lower
* bit (3 bits total.)
/* This structure needs to be divisible by 32 for new
* indexing method and performant mode.
*/
#define PAD32 32
#define PAD64DIFF 0
#define USEEXTRA ((sizeof(void *) - 4)/4)
#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
/* The size of this structure needs to be divisible by 32
* on all architectures because low 5 bits of the addresses
* are used as follows:
*
* bit 0: to device, used to indicate "performant mode" command
* from device, indidcates error status.
* bit 1-3: to device, indicates block fetch table entry for
* reducing DMA in fetching commands from host memory.
* bit 4: used to indicate whether tag is "direct lookup" (index),
* or a bus address.
*/
struct CommandList {
struct CommandListHeader Header;
struct RequestBlock Request;
struct ErrDescriptor ErrDesc;
struct SGDescriptor SG[MAXSGENTRIES];
/* information associated with the command */
__u32 busaddr; /* physical addr of this record */
u32 busaddr; /* physical addr of this record */
struct ErrorInfo *err_info; /* pointer to the allocated mem */
struct ctlr_info *h;
int cmd_type;
@ -291,35 +311,63 @@ struct CommandList {
struct completion *waiting;
int retry_count;
void *scsi_cmd;
/* on 64 bit architectures, to get this to be 32-byte-aligned
* it so happens we need no padding, on 32 bit systems,
* we need 8 bytes of padding. This does that.
*/
#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
u8 pad[COMMANDLIST_PAD];
};
/* Configuration Table Structure */
struct HostWrite {
__u32 TransportRequest;
__u32 Reserved;
__u32 CoalIntDelay;
__u32 CoalIntCount;
u32 TransportRequest;
u32 Reserved;
u32 CoalIntDelay;
u32 CoalIntCount;
};
#define SIMPLE_MODE 0x02
#define PERFORMANT_MODE 0x04
#define MEMQ_MODE 0x08
struct CfgTable {
__u8 Signature[4];
__u32 SpecValence;
__u32 TransportSupport;
__u32 TransportActive;
struct HostWrite HostWrite;
__u32 CmdsOutMax;
__u32 BusTypes;
__u32 Reserved;
__u8 ServerName[16];
__u32 HeartBeat;
__u32 SCSI_Prefetch;
u8 Signature[4];
u32 SpecValence;
u32 TransportSupport;
u32 TransportActive;
struct HostWrite HostWrite;
u32 CmdsOutMax;
u32 BusTypes;
u32 TransMethodOffset;
u8 ServerName[16];
u32 HeartBeat;
u32 SCSI_Prefetch;
u32 MaxScatterGatherElements;
u32 MaxLogicalUnits;
u32 MaxPhysicalDevices;
u32 MaxPhysicalDrivesPerLogicalUnit;
u32 MaxPerformantModeCommands;
};
#define NUM_BLOCKFETCH_ENTRIES 8
struct TransTable_struct {
u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
u32 RepQSize;
u32 RepQCount;
u32 RepQCtrAddrLow32;
u32 RepQCtrAddrHigh32;
u32 RepQAddr0Low32;
u32 RepQAddr0High32;
};
struct hpsa_pci_info {
unsigned char bus;
unsigned char dev_fn;
unsigned short domain;
__u32 board_id;
u32 board_id;
};
#pragma pack()

Просмотреть файл

@ -2336,7 +2336,7 @@ static int option_setup(char *str)
char *cur = str;
int i = 1;
while (cur && isdigit(*cur) && i <= IM_MAX_HOSTS) {
while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL)
cur++;

Просмотреть файл

@ -40,7 +40,7 @@
* (CRQ), which is just a buffer of 16 byte entries in the receiver's
* Senders cannot access the buffer directly, but send messages by
* making a hypervisor call and passing in the 16 bytes. The hypervisor
* puts the message in the next 16 byte space in round-robbin fashion,
* puts the message in the next 16 byte space in round-robin fashion,
* turns on the high order bit of the message (the valid bit), and
* generates an interrupt to the receiver (if interrupts are turned on.)
* The receiver just turns off the valid bit when they have copied out

Просмотреть файл

@ -584,9 +584,10 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct socket *sock = tcp_sw_conn->sock;
/* userspace may have goofed up and not bound us */
if (!tcp_sw_conn->sock)
if (!sock)
return;
/*
* Make sure our recv side is stopped.
@ -597,6 +598,11 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
sock->sk->sk_err = EIO;
wake_up_interruptible(sock->sk->sk_sleep);
}
iscsi_conn_stop(cls_conn, flag);
iscsi_sw_tcp_release_conn(conn);
}

Просмотреть файл

@ -1919,10 +1919,11 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
struct iscsi_task *task = NULL;
struct iscsi_task *task = NULL, *running_task;
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
int i;
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
@ -1947,8 +1948,15 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
}
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task)
if (!task) {
/*
* Raced with completion. Just reset timer, and let it
* complete normally
*/
rc = BLK_EH_RESET_TIMER;
goto done;
}
/*
* If we have sent (at least queued to the network layer) a pdu or
* recvd one for the task since the last timeout ask for
@ -1956,10 +1964,10 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* we can check if it is the task or connection when we send the
* nop as a ping.
*/
if (time_after_eq(task->last_xfer, task->last_timeout)) {
if (time_after(task->last_xfer, task->last_timeout)) {
ISCSI_DBG_EH(session, "Command making progress. Asking "
"scsi-ml for more time to complete. "
"Last data recv at %lu. Last timeout was at "
"Last data xfer at %lu. Last timeout was at "
"%lu\n.", task->last_xfer, task->last_timeout);
task->have_checked_conn = false;
rc = BLK_EH_RESET_TIMER;
@ -1977,6 +1985,43 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
goto done;
}
for (i = 0; i < conn->session->cmds_max; i++) {
running_task = conn->session->cmds[i];
if (!running_task->sc || running_task == task ||
running_task->state != ISCSI_TASK_RUNNING)
continue;
/*
* Only check if cmds started before this one have made
* progress, or this could never fail
*/
if (time_after(running_task->sc->jiffies_at_alloc,
task->sc->jiffies_at_alloc))
continue;
if (time_after(running_task->last_xfer, task->last_timeout)) {
/*
* This task has not made progress, but a task
* started before us has transferred data since
* we started/last-checked. We could be queueing
* too many tasks or the LU is bad.
*
* If the device is bad the cmds ahead of us on
* other devs will complete, and this loop will
* eventually fail starting the scsi eh.
*/
ISCSI_DBG_EH(session, "Command has not made progress "
"but commands ahead of it have. "
"Asking scsi-ml for more time to "
"complete. Our last xfer vs running task "
"last xfer %lu/%lu. Last check %lu.\n",
task->last_xfer, running_task->last_xfer,
task->last_timeout);
rc = BLK_EH_RESET_TIMER;
goto done;
}
}
/* Assumes nop timeout is shorter than scsi cmd timeout */
if (task->have_checked_conn)
goto done;

Просмотреть файл

@ -1,5 +1,5 @@
/*
* SCSI RDAM Protocol lib functions
* SCSI RDMA Protocol lib functions
*
* Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
*
@ -328,7 +328,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
int offset, err = 0;
u8 format;
offset = cmd->add_cdb_len * 4;
offset = cmd->add_cdb_len & ~3;
dir = srp_cmd_direction(cmd);
if (dir == DMA_FROM_DEVICE)
@ -366,7 +366,7 @@ static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
{
struct srp_direct_buf *md;
struct srp_indirect_buf *id;
int len = 0, offset = cmd->add_cdb_len * 4;
int len = 0, offset = cmd->add_cdb_len & ~3;
u8 fmt;
if (dir == DMA_TO_DEVICE)
@ -440,6 +440,6 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
}
EXPORT_SYMBOL_GPL(srp_cmd_queue);
MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
MODULE_AUTHOR("FUJITA Tomonori");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -315,6 +315,9 @@ struct lpfc_vport {
#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@ -448,6 +451,8 @@ struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
uint32_t oxid;
uint32_t flags;
#define UNSOL_VALID 0x00000001
};
struct lpfc_hba {
@ -499,6 +504,10 @@ struct lpfc_hba {
(struct lpfc_hba *);
void (*lpfc_stop_port)
(struct lpfc_hba *);
int (*lpfc_hba_init_link)
(struct lpfc_hba *);
int (*lpfc_hba_down_link)
(struct lpfc_hba *);
/* SLI4 specific HBA data structure */
@ -613,6 +622,7 @@ struct lpfc_hba {
uint32_t cfg_enable_bg;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_suppress_link_up;
lpfc_vpd_t vpd; /* vital product data */
@ -790,7 +800,7 @@ struct lpfc_hba {
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[64];
uint32_t ctx_idx;

Просмотреть файл

@ -481,6 +481,41 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
return len;
}
/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: one or more lpfc_polling_flags values.
* @count: not used.
*
* Returns:
* -EINVAL if the buffer is not "up" or "down"
* return from link state change function if non-zero
* length of the buf on success
**/
static ssize_t
lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int status = -EINVAL;
if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
(phba->link_state == LPFC_LINK_DOWN))
status = phba->lpfc_hba_init_link(phba);
else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
(phba->link_state >= LPFC_LINK_UP))
status = phba->lpfc_hba_down_link(phba);
if (status == 0)
return strlen(buf);
else
return status;
}
/**
* lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
* @dev: class device that is converted into a Scsi_host.
@ -1219,7 +1254,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
int val = 0;\
uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
phba->cfg_##attr);\
@ -1247,7 +1282,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
int val = 0;\
uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n",\
phba->cfg_##attr);\
@ -1274,7 +1309,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
**/
#define lpfc_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
phba->cfg_##attr = val;\
@ -1309,7 +1344,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
**/
#define lpfc_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
phba->cfg_##attr = val;\
@ -1350,7 +1385,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
int val=0;\
uint val = 0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
@ -1382,7 +1417,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
int val = 0;\
uint val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
@ -1409,7 +1444,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
int val = 0;\
uint val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
@ -1434,7 +1469,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
**/
#define lpfc_vport_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
@ -1466,7 +1501,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
**/
#define lpfc_vport_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
@ -1502,7 +1537,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
int val=0;\
uint val = 0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
@ -1515,22 +1550,22 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@ -1540,16 +1575,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@ -1559,22 +1594,22 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@ -1584,16 +1619,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@ -1614,7 +1649,8 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
lpfc_link_state_store);
static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@ -1896,6 +1932,15 @@ lpfc_param_init(enable_npiv, 0, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
lpfc_enable_npiv_show, NULL);
/*
# lpfc_suppress_link_up: Bring link up at initialization
# 0x0 = bring link up (issue MBX_INIT_LINK)
# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
# 0x2 = never bring up link
# Default value is 0.
*/
LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization");
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@ -3114,12 +3159,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled (default)
# 0 = MSI disabled
# 1 = MSI enabled
# 2 = MSI-X enabled
# Value range is [0,2]. Default value is 0.
# 2 = MSI-X enabled (default)
# Value range is [0,2]. Default value is 2.
*/
LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
@ -3278,6 +3323,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
&dev_attr_lpfc_suppress_link_up,
NULL,
};
@ -4456,7 +4502,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
return;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,98 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
/* bsg definitions
* No pointers to user data are allowed, all application buffers and sizes will
* derived through the bsg interface.
*
* These are the vendor unique structures passed in using the bsg
* FC_BSG_HST_VENDOR message code type.
*/
#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
#define LPFC_BSG_VENDOR_DIAG_MODE 4
#define LPFC_BSG_VENDOR_DIAG_TEST 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
struct set_ct_event {
uint32_t command;
uint32_t type_mask;
uint32_t ev_req_id;
uint32_t ev_reg_id;
};
struct get_ct_event {
uint32_t command;
uint32_t ev_reg_id;
uint32_t ev_req_id;
};
struct get_ct_event_reply {
uint32_t immed_data;
uint32_t type;
};
struct send_mgmt_resp {
uint32_t command;
uint32_t tag;
};
#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
struct diag_mode_set {
uint32_t command;
uint32_t type;
uint32_t timeout;
};
struct diag_mode_test {
uint32_t command;
};
#define LPFC_WWNN_TYPE 0
#define LPFC_WWPN_TYPE 1
struct get_mgmt_rev {
uint32_t command;
};
#define MANAGEMENT_MAJOR_REV 1
#define MANAGEMENT_MINOR_REV 0
/* the MgmtRevInfo structure */
struct MgmtRevInfo {
uint32_t a_Major;
uint32_t a_Minor;
};
struct get_mgmt_rev_reply {
struct MgmtRevInfo info;
};
struct dfc_mbox_req {
uint32_t command;
uint32_t inExtWLen;
uint32_t outExtWLen;
uint8_t mbOffset;
};

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2008 Emulex. All rights reserved. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -44,18 +44,26 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
struct lpfc_nodelist *);
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
void lpfc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
void lpfc_cleanup_rpis(struct lpfc_vport *, int);
void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_linkdown_port(struct lpfc_vport *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -73,6 +81,7 @@ void lpfc_set_disctmo(struct lpfc_vport *);
int lpfc_can_disctmo(struct lpfc_vport *);
int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_unreg_all_rpis(struct lpfc_vport *);
void lpfc_unreg_hba_rpis(struct lpfc_hba *);
void lpfc_unreg_default_rpis(struct lpfc_vport *);
void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
@ -99,7 +108,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t);
struct serv_parm *, uint32_t, int);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_more_plogi(struct lpfc_vport *);
void lpfc_more_adisc(struct lpfc_vport *);
@ -197,6 +206,7 @@ void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
void lpfc_issue_init_vpi(struct lpfc_vport *);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
@ -206,7 +216,11 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_unused_fcf(struct lpfc_hba *);
int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@ -365,6 +379,8 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
void lpfc_create_static_vport(struct lpfc_hba *);
void lpfc_stop_hba_timers(struct lpfc_hba *);
void lpfc_stop_port(struct lpfc_hba *);
void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
@ -378,5 +394,5 @@ struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
/* functions to support SGIOv4/bsg interface */
int lpfc_bsg_request(struct fc_bsg_job *);
int lpfc_bsg_timeout(struct fc_bsg_job *);
void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -97,7 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct list_head head;
struct lpfc_dmabuf *bdeBuf;
lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
return;
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@ -181,7 +182,8 @@ lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
uint32_t size;
/* Forward abort event to any process registered to receive ct event */
lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
return;
/* If there is no BDE associated with IOCB, there is nothing to do */
if (icmd->ulpBdeCount == 0)
@ -1843,12 +1845,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
c = (rev & 0x0000ff00) >> 8;
b4 = (rev & 0x000000ff);
if (flag)
sprintf(fwrevision, "%d.%d%d%c%d ", b1,
b2, b3, c, b4);
else
sprintf(fwrevision, "%d.%d%d%c%d ", b1,
b2, b3, c, b4);
sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
}
return;
}

Просмотреть файл

@ -50,9 +50,6 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint8_t retry);
static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb);
static void lpfc_register_new_vport(struct lpfc_hba *phba,
struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
static int lpfc_max_els_tries = 3;
@ -592,6 +589,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
}
/*
* If VPI is unreged, driver need to do INIT_VPI
* before re-registering
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
}
if (phba->sli_rev < LPFC_SLI_REV4) {
@ -604,10 +610,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
} else {
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->vpi_state & LPFC_VPI_REGISTERED) {
if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
(vport->vpi_state & LPFC_VPI_REGISTERED)) {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
} else
} else if (vport->fc_flag & FC_VFI_REGISTERED)
lpfc_issue_init_vpi(vport);
else
lpfc_issue_reg_vfi(vport);
}
return 0;
@ -804,6 +813,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpTimeout);
goto flogifail;
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
spin_unlock_irq(shost->host_lock);
/*
* The FLogI succeeded. Sync the data for the CPU before
@ -2720,7 +2732,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (did == FDMI_DID)
retry = 1;
if ((cmd == ELS_CMD_FLOGI) &&
if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
(phba->fc_topology != TOPOLOGY_LOOP) &&
!lpfc_error_lost_link(irsp)) {
/* FLOGI retry policy */
@ -4385,7 +4397,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
did = Fabric_DID;
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
/* For a FLOGI we accept, then if our portname is greater
* then the remote portname we initiate Nport login.
*/
@ -5915,6 +5927,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@ -5936,6 +5949,26 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
break;
/* If reg_vpi fail with invalid VPI status, re-init VPI */
case 0x20:
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
lpfc_init_vpi(phba, pmb, vport->vpi);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb,
MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_vlog(vport,
KERN_ERR, LOG_MBOX,
"2732 Failed to issue INIT_VPI"
" mailbox command\n");
} else {
lpfc_nlp_put(ndlp);
return;
}
default:
/* Try to recover from this error */
lpfc_mbx_unreg_vpi(vport);
@ -5949,13 +5982,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
break;
}
} else {
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
if (vport == phba->pport)
spin_unlock_irq(shost->host_lock);
if (vport == phba->pport) {
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
else
lpfc_issue_reg_vfi(vport);
else
else {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
}
} else
lpfc_do_scr_ns_plogi(phba, vport);
}
@ -5977,7 +6014,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* This routine registers the @vport as a new virtual port with a HBA.
* It is done through a registering vpi mailbox command.
**/
static void
void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
@ -6017,6 +6054,78 @@ mbox_err_exit:
return;
}
/**
* lpfc_retry_pport_discovery - Start timer to retry FLOGI.
* @phba: pointer to lpfc hba data structure.
*
* This routine abort all pending discovery commands and
* start a timer to retry FLOGI for the physical port
* discovery.
**/
void
lpfc_retry_pport_discovery(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
int i;
uint32_t link_state;
/* Treat this failure as linkdown for all vports */
link_state = phba->link_state;
lpfc_linkdown(phba);
phba->link_state = link_state;
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
if (ndlp)
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
lpfc_els_flush_cmd(vports[i]);
}
lpfc_destroy_vport_work_array(phba, vports);
}
/* If fabric require FLOGI, then re-instantiate physical login */
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (!ndlp)
return;
shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
return;
}
/**
* lpfc_fabric_login_reqd - Check if FLOGI required.
* @phba: pointer to lpfc hba data structure.
* @cmdiocb: pointer to FDISC command iocb.
* @rspiocb: pointer to FDISC response iocb.
*
* This routine checks if a FLOGI is reguired for FDISC
* to succeed.
**/
static int
lpfc_fabric_login_reqd(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
return 0;
else
return 1;
}
/**
* lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
* @phba: pointer to lpfc hba data structure.
@ -6066,6 +6175,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
if (irsp->ulpStatus) {
if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
lpfc_retry_pport_discovery(phba);
goto out;
}
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
@ -6076,6 +6191,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto fdisc_failed;
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
vport->fc_flag |= FC_FABRIC;
if (vport->phba->fc_topology == TOPOLOGY_LOOP)
vport->fc_flag |= FC_PUBLIC_LOOP;
@ -6103,10 +6219,13 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
lpfc_issue_init_vpi(vport);
else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -1346,6 +1346,9 @@ typedef struct { /* FireFly BIU registers */
#define MBX_HEARTBEAT 0x31
#define MBX_WRITE_VPARMS 0x32
#define MBX_ASYNCEVT_ENABLE 0x33
#define MBX_READ_EVENT_LOG_STATUS 0x37
#define MBX_READ_EVENT_LOG 0x38
#define MBX_WRITE_EVENT_LOG 0x39
#define MBX_PORT_CAPABILITIES 0x3B
#define MBX_PORT_IOV_CONTROL 0x3C
@ -1465,17 +1468,13 @@ typedef struct { /* FireFly BIU registers */
#define CMD_IOCB_LOGENTRY_CN 0x94
#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
/* Unhandled Data Security SLI Commands */
#define DSSCMD_IWRITE64_CR 0xD8
#define DSSCMD_IWRITE64_CX 0xD9
#define DSSCMD_IREAD64_CR 0xDA
#define DSSCMD_IREAD64_CX 0xDB
#define DSSCMD_INVALIDATE_DEK 0xDC
#define DSSCMD_SET_KEK 0xDD
#define DSSCMD_GET_KEK_ID 0xDE
#define DSSCMD_GEN_XFER 0xDF
/* Data Security SLI Commands */
#define DSSCMD_IWRITE64_CR 0xF8
#define DSSCMD_IWRITE64_CX 0xF9
#define DSSCMD_IREAD64_CR 0xFA
#define DSSCMD_IREAD64_CX 0xFB
#define CMD_MAX_IOCB_CMD 0xE6
#define CMD_MAX_IOCB_CMD 0xFB
#define CMD_IOCB_MASK 0xff
#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG

Просмотреть файл

@ -52,35 +52,37 @@ struct dma_address {
uint32_t addr_hi;
};
#define LPFC_SLIREV_CONF_WORD 0x58
struct lpfc_sli_intf {
uint32_t word0;
#define lpfc_sli_intf_iftype_MASK 0x00000007
#define lpfc_sli_intf_iftype_SHIFT 0
#define lpfc_sli_intf_iftype_WORD word0
#define lpfc_sli_intf_rev_MASK 0x0000000f
#define lpfc_sli_intf_rev_SHIFT 4
#define lpfc_sli_intf_rev_WORD word0
#define LPFC_SLIREV_CONF_SLI4 4
#define lpfc_sli_intf_family_MASK 0x000000ff
#define lpfc_sli_intf_family_SHIFT 8
#define lpfc_sli_intf_family_WORD word0
#define lpfc_sli_intf_feat1_MASK 0x000000ff
#define lpfc_sli_intf_feat1_SHIFT 16
#define lpfc_sli_intf_feat1_WORD word0
#define lpfc_sli_intf_feat2_MASK 0x0000001f
#define lpfc_sli_intf_feat2_SHIFT 24
#define lpfc_sli_intf_feat2_WORD word0
#define lpfc_sli_intf_valid_MASK 0x00000007
#define lpfc_sli_intf_valid_SHIFT 29
#define lpfc_sli_intf_valid_WORD word0
#define lpfc_sli_intf_valid_SHIFT 29
#define lpfc_sli_intf_valid_MASK 0x00000007
#define lpfc_sli_intf_valid_WORD word0
#define LPFC_SLI_INTF_VALID 6
#define lpfc_sli_intf_featurelevel2_SHIFT 24
#define lpfc_sli_intf_featurelevel2_MASK 0x0000001F
#define lpfc_sli_intf_featurelevel2_WORD word0
#define lpfc_sli_intf_featurelevel1_SHIFT 16
#define lpfc_sli_intf_featurelevel1_MASK 0x000000FF
#define lpfc_sli_intf_featurelevel1_WORD word0
#define LPFC_SLI_INTF_FEATURELEVEL1_1 1
#define LPFC_SLI_INTF_FEATURELEVEL1_2 2
#define lpfc_sli_intf_sli_family_SHIFT 8
#define lpfc_sli_intf_sli_family_MASK 0x000000FF
#define lpfc_sli_intf_sli_family_WORD word0
#define LPFC_SLI_INTF_FAMILY_BE2 0
#define LPFC_SLI_INTF_FAMILY_BE3 1
#define lpfc_sli_intf_slirev_SHIFT 4
#define lpfc_sli_intf_slirev_MASK 0x0000000F
#define lpfc_sli_intf_slirev_WORD word0
#define LPFC_SLI_INTF_REV_SLI3 3
#define LPFC_SLI_INTF_REV_SLI4 4
#define lpfc_sli_intf_if_type_SHIFT 0
#define lpfc_sli_intf_if_type_MASK 0x00000007
#define lpfc_sli_intf_if_type_WORD word0
#define LPFC_SLI_INTF_IF_TYPE_0 0
#define LPFC_SLI_INTF_IF_TYPE_1 1
};
#define LPFC_SLI4_BAR0 1
#define LPFC_SLI4_BAR1 2
#define LPFC_SLI4_BAR2 4
#define LPFC_SLI4_MBX_EMBED true
#define LPFC_SLI4_MBX_NEMBED false
@ -161,6 +163,9 @@ struct lpfc_sli_intf {
#define LPFC_FP_DEF_IMAX 10000
#define LPFC_SP_DEF_IMAX 10000
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
struct ulp_bde64 {
union ULP_BDE_TUS {
uint32_t w;
@ -516,7 +521,7 @@ struct lpfc_register {
#define LPFC_UERR_STATUS_LO 0x00A0
#define LPFC_UE_MASK_HI 0x00AC
#define LPFC_UE_MASK_LO 0x00A8
#define LPFC_SCRATCHPAD 0x0058
#define LPFC_SLI_INTF 0x0058
/* BAR0 Registers */
#define LPFC_HST_STATE 0x00AC
@ -576,19 +581,6 @@ struct lpfc_register {
#define LPFC_POST_STAGE_ARMFW_READY 0xC000
#define LPFC_POST_STAGE_ARMFW_UE 0xF000
#define lpfc_scratchpad_slirev_SHIFT 4
#define lpfc_scratchpad_slirev_MASK 0xF
#define lpfc_scratchpad_slirev_WORD word0
#define lpfc_scratchpad_chiptype_SHIFT 8
#define lpfc_scratchpad_chiptype_MASK 0xFF
#define lpfc_scratchpad_chiptype_WORD word0
#define lpfc_scratchpad_featurelevel1_SHIFT 16
#define lpfc_scratchpad_featurelevel1_MASK 0xFF
#define lpfc_scratchpad_featurelevel1_WORD word0
#define lpfc_scratchpad_featurelevel2_SHIFT 24
#define lpfc_scratchpad_featurelevel2_MASK 0xFF
#define lpfc_scratchpad_featurelevel2_WORD word0
/* BAR1 Registers */
#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
@ -801,6 +793,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
/* Mailbox command structures */
struct eq_context {
@ -1149,10 +1142,7 @@ struct sli4_sge { /* SLI-4 */
this flag !! */
#define lpfc_sli4_sge_last_MASK 0x00000001
#define lpfc_sli4_sge_last_WORD word2
uint32_t word3;
#define lpfc_sli4_sge_len_SHIFT 0
#define lpfc_sli4_sge_len_MASK 0x0001FFFF
#define lpfc_sli4_sge_len_WORD word3
uint32_t sge_len;
};
struct fcf_record {
@ -1301,6 +1291,19 @@ struct lpfc_mbx_del_fcf_tbl_entry {
#define lpfc_mbx_del_fcf_tbl_index_WORD word10
};
struct lpfc_mbx_redisc_fcf_tbl {
struct mbox_header header;
uint32_t word10;
#define lpfc_mbx_redisc_fcf_count_SHIFT 0
#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF
#define lpfc_mbx_redisc_fcf_count_WORD word10
uint32_t resvd;
uint32_t word12;
#define lpfc_mbx_redisc_fcf_index_SHIFT 0
#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF
#define lpfc_mbx_redisc_fcf_index_WORD word12
};
struct lpfc_mbx_query_fw_cfg {
struct mbox_header header;
uint32_t config_number;
@ -1834,6 +1837,177 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
};
struct lpfc_mbx_supp_pages {
uint32_t word1;
#define qs_SHIFT 0
#define qs_MASK 0x00000001
#define qs_WORD word1
#define wr_SHIFT 1
#define wr_MASK 0x00000001
#define wr_WORD word1
#define pf_SHIFT 8
#define pf_MASK 0x000000ff
#define pf_WORD word1
#define cpn_SHIFT 16
#define cpn_MASK 0x000000ff
#define cpn_WORD word1
uint32_t word2;
#define list_offset_SHIFT 0
#define list_offset_MASK 0x000000ff
#define list_offset_WORD word2
#define next_offset_SHIFT 8
#define next_offset_MASK 0x000000ff
#define next_offset_WORD word2
#define elem_cnt_SHIFT 16
#define elem_cnt_MASK 0x000000ff
#define elem_cnt_WORD word2
uint32_t word3;
#define pn_0_SHIFT 24
#define pn_0_MASK 0x000000ff
#define pn_0_WORD word3
#define pn_1_SHIFT 16
#define pn_1_MASK 0x000000ff
#define pn_1_WORD word3
#define pn_2_SHIFT 8
#define pn_2_MASK 0x000000ff
#define pn_2_WORD word3
#define pn_3_SHIFT 0
#define pn_3_MASK 0x000000ff
#define pn_3_WORD word3
uint32_t word4;
#define pn_4_SHIFT 24
#define pn_4_MASK 0x000000ff
#define pn_4_WORD word4
#define pn_5_SHIFT 16
#define pn_5_MASK 0x000000ff
#define pn_5_WORD word4
#define pn_6_SHIFT 8
#define pn_6_MASK 0x000000ff
#define pn_6_WORD word4
#define pn_7_SHIFT 0
#define pn_7_MASK 0x000000ff
#define pn_7_WORD word4
uint32_t rsvd[27];
#define LPFC_SUPP_PAGES 0
#define LPFC_BLOCK_GUARD_PROFILES 1
#define LPFC_SLI4_PARAMETERS 2
};
struct lpfc_mbx_sli4_params {
uint32_t word1;
#define qs_SHIFT 0
#define qs_MASK 0x00000001
#define qs_WORD word1
#define wr_SHIFT 1
#define wr_MASK 0x00000001
#define wr_WORD word1
#define pf_SHIFT 8
#define pf_MASK 0x000000ff
#define pf_WORD word1
#define cpn_SHIFT 16
#define cpn_MASK 0x000000ff
#define cpn_WORD word1
uint32_t word2;
#define if_type_SHIFT 0
#define if_type_MASK 0x00000007
#define if_type_WORD word2
#define sli_rev_SHIFT 4
#define sli_rev_MASK 0x0000000f
#define sli_rev_WORD word2
#define sli_family_SHIFT 8
#define sli_family_MASK 0x000000ff
#define sli_family_WORD word2
#define featurelevel_1_SHIFT 16
#define featurelevel_1_MASK 0x000000ff
#define featurelevel_1_WORD word2
#define featurelevel_2_SHIFT 24
#define featurelevel_2_MASK 0x0000001f
#define featurelevel_2_WORD word2
uint32_t word3;
#define fcoe_SHIFT 0
#define fcoe_MASK 0x00000001
#define fcoe_WORD word3
#define fc_SHIFT 1
#define fc_MASK 0x00000001
#define fc_WORD word3
#define nic_SHIFT 2
#define nic_MASK 0x00000001
#define nic_WORD word3
#define iscsi_SHIFT 3
#define iscsi_MASK 0x00000001
#define iscsi_WORD word3
#define rdma_SHIFT 4
#define rdma_MASK 0x00000001
#define rdma_WORD word3
uint32_t sge_supp_len;
uint32_t word5;
#define if_page_sz_SHIFT 0
#define if_page_sz_MASK 0x0000ffff
#define if_page_sz_WORD word5
#define loopbk_scope_SHIFT 24
#define loopbk_scope_MASK 0x0000000f
#define loopbk_scope_WORD word5
#define rq_db_window_SHIFT 28
#define rq_db_window_MASK 0x0000000f
#define rq_db_window_WORD word5
uint32_t word6;
#define eq_pages_SHIFT 0
#define eq_pages_MASK 0x0000000f
#define eq_pages_WORD word6
#define eqe_size_SHIFT 8
#define eqe_size_MASK 0x000000ff
#define eqe_size_WORD word6
uint32_t word7;
#define cq_pages_SHIFT 0
#define cq_pages_MASK 0x0000000f
#define cq_pages_WORD word7
#define cqe_size_SHIFT 8
#define cqe_size_MASK 0x000000ff
#define cqe_size_WORD word7
uint32_t word8;
#define mq_pages_SHIFT 0
#define mq_pages_MASK 0x0000000f
#define mq_pages_WORD word8
#define mqe_size_SHIFT 8
#define mqe_size_MASK 0x000000ff
#define mqe_size_WORD word8
#define mq_elem_cnt_SHIFT 16
#define mq_elem_cnt_MASK 0x000000ff
#define mq_elem_cnt_WORD word8
uint32_t word9;
#define wq_pages_SHIFT 0
#define wq_pages_MASK 0x0000ffff
#define wq_pages_WORD word9
#define wqe_size_SHIFT 8
#define wqe_size_MASK 0x000000ff
#define wqe_size_WORD word9
uint32_t word10;
#define rq_pages_SHIFT 0
#define rq_pages_MASK 0x0000ffff
#define rq_pages_WORD word10
#define rqe_size_SHIFT 8
#define rqe_size_MASK 0x000000ff
#define rqe_size_WORD word10
uint32_t word11;
#define hdr_pages_SHIFT 0
#define hdr_pages_MASK 0x0000000f
#define hdr_pages_WORD word11
#define hdr_size_SHIFT 8
#define hdr_size_MASK 0x0000000f
#define hdr_size_WORD word11
#define hdr_pp_align_SHIFT 16
#define hdr_pp_align_MASK 0x0000ffff
#define hdr_pp_align_WORD word11
uint32_t word12;
#define sgl_pages_SHIFT 0
#define sgl_pages_MASK 0x0000000f
#define sgl_pages_WORD word12
#define sgl_pp_align_SHIFT 16
#define sgl_pp_align_MASK 0x0000ffff
#define sgl_pp_align_WORD word12
uint32_t rsvd_13_63[51];
};
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@ -1863,6 +2037,7 @@ struct lpfc_mqe {
struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
struct lpfc_mbx_reg_fcfi reg_fcfi;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
@ -1883,6 +2058,8 @@ struct lpfc_mqe {
struct lpfc_mbx_request_features req_ftrs;
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_sli4_params sli4_params;
struct lpfc_mbx_nop nop;
} un;
};
@ -1959,6 +2136,9 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
#define lpfc_acqe_qos_link_speed_SHIFT 16
#define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF
#define lpfc_acqe_qos_link_speed_WORD word1
uint32_t event_tag;
uint32_t trailer;
};
@ -1976,6 +2156,7 @@ struct lpfc_acqe_fcoe {
#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5
uint32_t event_tag;
uint32_t trailer;
};

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@ -544,7 +544,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
} else {
} else if (phba->cfg_suppress_link_up == 0) {
lpfc_init_link(phba, pmb, phba->cfg_topology,
phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@ -602,6 +602,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return 0;
}
/**
* lpfc_hba_init_link - Initialize the FC link
* @phba: pointer to lpfc hba data structure.
*
* This routine will issue the INIT_LINK mailbox command call.
* It is available to other drivers through the lpfc_hba data
* structure for use as a delayed link up mechanism with the
* module parameter lpfc_suppress_link_up.
*
* Return code
* 0 - success
* Any other value - error
**/
int
lpfc_hba_init_link(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
mb = &pmb->u.mb;
pmb->vport = vport;
lpfc_init_link(phba, pmb, phba->cfg_topology,
phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0498 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* Clear all pending interrupts */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
phba->cfg_suppress_link_up = 0;
return 0;
}
/**
* lpfc_hba_down_link - this routine downs the FC link
*
* This routine will issue the DOWN_LINK mailbox command call.
* It is available to other drivers through the lpfc_hba data
* structure for use to stop the link.
*
* Return code
* 0 - success
* Any other value - error
**/
int
lpfc_hba_down_link(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
lpfc_printf_log(phba,
KERN_ERR, LOG_INIT,
"0491 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
lpfc_printf_log(phba,
KERN_ERR, LOG_INIT,
"2522 Adapter failed to issue DOWN_LINK"
" mbox command rc 0x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
return 0;
}
/**
* lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
* @phba: pointer to lpfc HBA data structure.
@ -2072,6 +2168,44 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
return;
}
/**
* __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
* @phba: pointer to lpfc hba data structure.
*
* This routine stops the SLI4 FCF rediscover wait timer if it's on. The
* caller of this routine should already hold the host lock.
**/
void
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
{
/* Clear pending FCF rediscovery wait timer */
phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
/* Now, try to stop the timer */
del_timer(&phba->fcf.redisc_wait);
}
/**
* lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
* @phba: pointer to lpfc hba data structure.
*
* This routine stops the SLI4 FCF rediscover wait timer if it's on. It
* checks whether the FCF rediscovery wait timer is pending with the host
* lock held before proceeding with disabling the timer and clearing the
* wait timer pendig flag.
**/
void
lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
/* FCF rediscovery timer already fired or stopped */
spin_unlock_irq(&phba->hbalock);
return;
}
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
spin_unlock_irq(&phba->hbalock);
}
/**
* lpfc_stop_hba_timers - Stop all the timers associated with an HBA
* @phba: pointer to lpfc hba data structure.
@ -2096,6 +2230,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
break;
case LPFC_PCI_DEV_OC:
/* Stop any OneConnect device sepcific driver timers */
lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@ -2228,6 +2363,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
if (vport->fc_flag & FC_OFFLINE_MODE)
@ -2241,11 +2377,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
if (vports[i]->load_flag & FC_UNLOADING)
continue;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
shost = lpfc_shost_from_vport(vports[i]);
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
@ -2401,7 +2541,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->this_id = -1;
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len;
shost->sg_tablesize = phba->cfg_sg_seg_cnt;
}
@ -2650,8 +2791,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
lpfc_stop_hba_timers(phba);
phba->pport->work_port_events = 0;
phba->sli4_hba.intr_enable = 0;
/* Hard clear it for now, shall have more graceful way to wait later */
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
}
/**
@ -2703,7 +2842,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
phba->fcf.fcf_indx);
phba->fcf.current_rec.fcf_indx);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@ -2726,6 +2865,57 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
mempool_free(mboxq, phba->mbox_mem_pool);
}
/**
* lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
* @phba: Pointer to hba for which this call is being executed.
*
* This routine starts the timer waiting for the FCF rediscovery to complete.
**/
void
lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
{
unsigned long fcf_redisc_wait_tmo =
(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
/* Start fcf rediscovery wait period timer */
mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
spin_lock_irq(&phba->hbalock);
/* Allow action to new fcf asynchronous event */
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
/* Mark the FCF rediscovery pending state */
phba->fcf.fcf_flag |= FCF_REDISC_PEND;
spin_unlock_irq(&phba->hbalock);
}
/**
* lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
* @ptr: Map to lpfc_hba data structure pointer.
*
* This routine is invoked when waiting for FCF table rediscover has been
* timed out. If new FCF record(s) has (have) been discovered during the
* wait period, a new FCF event shall be added to the FCOE async event
* list, and then worker thread shall be waked up for processing from the
* worker thread context.
**/
void
lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
/* Don't send FCF rediscovery event if timer cancelled */
spin_lock_irq(&phba->hbalock);
if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
spin_unlock_irq(&phba->hbalock);
return;
}
/* Clear FCF rediscovery timer pending flag */
phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
/* FCF rediscovery event to worker thread */
phba->fcf.fcf_flag |= FCF_REDISC_EVT;
spin_unlock_irq(&phba->hbalock);
/* wake up worker thread */
lpfc_worker_wake_up(phba);
}
/**
* lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
* @phba: pointer to lpfc hba data structure.
@ -2978,6 +3168,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
bf_get(lpfc_acqe_link_physical, acqe_link);
phba->sli4_hba.link_state.fault =
bf_get(lpfc_acqe_link_fault, acqe_link);
phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_qos_link_speed, acqe_link);
/* Invoke the lpfc_handle_latt mailbox command callback function */
lpfc_mbx_cmpl_read_la(phba, pmb);
@ -3007,22 +3199,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
uint32_t link_state;
int active_vlink_present;
struct lpfc_vport **vports;
int i;
phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2546 New FCF found index 0x%x tag 0x%x\n",
acqe_fcoe->index,
acqe_fcoe->event_tag);
/*
* If the current FCF is in discovered state, or
* FCF discovery is in progress do nothing.
*/
spin_lock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
/*
* If the current FCF is in discovered state or
* FCF discovery is in progress, do nothing.
*/
spin_unlock_irq(&phba->hbalock);
break;
}
if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
/*
* If fast FCF failover rescan event is pending,
* do nothing.
*/
spin_unlock_irq(&phba->hbalock);
break;
}
@ -3049,7 +3253,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
" tag 0x%x\n", acqe_fcoe->index,
acqe_fcoe->event_tag);
/* If the event is not for currently used fcf do nothing */
if (phba->fcf.fcf_indx != acqe_fcoe->index)
if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
break;
/*
* Currently, driver support only one FCF - so treat this as
@ -3074,14 +3278,58 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
if (!ndlp)
break;
shost = lpfc_shost_from_vport(vport);
if (phba->pport->port_state <= LPFC_FLOGI)
break;
/* If virtual link is not yet instantiated ignore CVL */
if (vport->port_state <= LPFC_FDISC)
break;
lpfc_linkdown_port(vport);
if (vport->port_type != LPFC_NPIV_PORT) {
lpfc_cleanup_pending_mbox(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_CVL_RCVD;
spin_unlock_irq(shost->host_lock);
active_vlink_present = 0;
vports = lpfc_create_vport_work_array(phba);
if (vports) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL;
i++) {
if ((!(vports[i]->fc_flag &
FC_VPORT_CVL_RCVD)) &&
(vports[i]->port_state > LPFC_FDISC)) {
active_vlink_present = 1;
break;
}
}
lpfc_destroy_vport_work_array(phba, vports);
}
if (active_vlink_present) {
/*
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
vport->port_state = LPFC_FLOGI;
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
} else {
/*
* Otherwise, we request port to rediscover
* the entire FCF table for a fast recovery
* from possible case that the current FCF
* is no longer valid.
*/
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc)
/*
* Last resort will be re-try on the
* the current registered FCF entry.
*/
lpfc_retry_pport_discovery(phba);
}
break;
default:
@ -3157,6 +3405,34 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
}
}
/**
* lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked by the worker thread to process FCF table
* rediscovery pending completion event.
**/
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
{
int rc;
spin_lock_irq(&phba->hbalock);
/* Clear FCF rediscovery timeout event */
phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
/* Clear driver fast failover FCF record flag */
phba->fcf.failover_rec.flag = 0;
/* Set state for FCF fast failover */
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
/* Scan FCF table from the first entry to re-discover SAN */
rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2747 Post FCF rediscovery read FCF record "
"failed 0x%x\n", rc);
}
/**
* lpfc_api_table_setup - Set up per hba pci-device group func api jump table
* @phba: pointer to lpfc hba data structure.
@ -3442,8 +3718,10 @@ static int
lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
int rc;
int i, hbq_count;
LPFC_MBOXQ_t *mboxq;
int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
@ -3472,6 +3750,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
init_timer(&phba->eratt_poll);
phba->eratt_poll.function = lpfc_poll_eratt;
phba->eratt_poll.data = (unsigned long) phba;
/* FCF rediscover timer */
init_timer(&phba->fcf.redisc_wait);
phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
phba->fcf.redisc_wait.data = (unsigned long)phba;
/*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
@ -3496,31 +3779,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* used to create the sg_dma_buf_pool must be dynamically calculated.
* 2 segments are added since the IOCB needs a command and response bde.
* To insure that the scsi sgl does not cross a 4k page boundary only
* sgl sizes of 1k, 2k, 4k, and 8k are supported.
* Table of sgl sizes and seg_cnt:
* sgl size, sg_seg_cnt total seg
* 1k 50 52
* 2k 114 116
* 4k 242 244
* 8k 498 500
* cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
* cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
* cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
* cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
* sgl sizes of must be a power of 2.
*/
if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
phba->cfg_sg_seg_cnt = 50;
else if (phba->cfg_sg_seg_cnt <= 114)
phba->cfg_sg_seg_cnt = 114;
else if (phba->cfg_sg_seg_cnt <= 242)
phba->cfg_sg_seg_cnt = 242;
buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
/* Feature Level 1 hardware is limited to 2 pages */
if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_FEATURELEVEL1_1))
max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
else
phba->cfg_sg_seg_cnt = 498;
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
+ sizeof(struct fcp_rsp);
phba->cfg_sg_dma_buf_size +=
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
dma_buf_size < max_buf_size && buf_size > dma_buf_size;
dma_buf_size = dma_buf_size << 1)
;
if (dma_buf_size == max_buf_size)
phba->cfg_sg_seg_cnt = (dma_buf_size -
sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
(2 * sizeof(struct sli4_sge))) /
sizeof(struct sli4_sge);
phba->cfg_sg_dma_buf_size = dma_buf_size;
/* Initialize buffer queue management fields */
hbq_count = lpfc_sli_hbq_count();
@ -3638,6 +3916,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcp_eq_hdl;
}
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq) {
rc = -ENOMEM;
goto out_free_fcp_eq_hdl;
}
/* Get the Supported Pages. It is always available. */
lpfc_supported_pages(mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (unlikely(rc)) {
rc = -EIO;
mempool_free(mboxq, phba->mbox_mem_pool);
goto out_free_fcp_eq_hdl;
}
mqe = &mboxq->u.mqe;
memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
LPFC_MAX_SUPPORTED_PAGES);
for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
switch (pn_page[i]) {
case LPFC_SLI4_PARAMETERS:
phba->sli4_hba.pc_sli4_params.supported = 1;
break;
default:
break;
}
}
/* Read the port's SLI4 Parameters capabilities if supported. */
if (phba->sli4_hba.pc_sli4_params.supported)
rc = lpfc_pc_sli4_params_get(phba, mboxq);
mempool_free(mboxq, phba->mbox_mem_pool);
if (rc) {
rc = -EIO;
goto out_free_fcp_eq_hdl;
}
return rc;
out_free_fcp_eq_hdl:
@ -3733,6 +4048,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
int
lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
phba->lpfc_hba_init_link = lpfc_hba_init_link;
phba->lpfc_hba_down_link = lpfc_hba_down_link;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@ -4291,7 +4608,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
return NULL;
}
mutex_init(&phba->ct_event_mutex);
spin_lock_init(&phba->ct_ev_lock);
INIT_LIST_HEAD(&phba->ct_ev_waiters);
return phba;
@ -4641,7 +4958,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
int
lpfc_sli4_post_status_check(struct lpfc_hba *phba)
{
struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
int i, port_error = -ENODEV;
if (!phba->sli4_hba.STAregaddr)
@ -4677,14 +4994,21 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
bf_get(lpfc_hst_state_port_status, &sta_reg));
/* Log device information */
scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
"FeatureL1=0x%x, FeatureL2=0x%x\n",
bf_get(lpfc_scratchpad_chiptype, &scratchpad),
bf_get(lpfc_scratchpad_slirev, &scratchpad),
bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
if (bf_get(lpfc_sli_intf_valid,
&phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
"FeatureL1=0x%x, FeatureL2=0x%x\n",
bf_get(lpfc_sli_intf_sli_family,
&phba->sli4_hba.sli_intf),
bf_get(lpfc_sli_intf_slirev,
&phba->sli4_hba.sli_intf),
bf_get(lpfc_sli_intf_featurelevel1,
&phba->sli4_hba.sli_intf),
bf_get(lpfc_sli_intf_featurelevel2,
&phba->sli4_hba.sli_intf));
}
phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
/* With uncoverable error, log the error message and return error */
@ -4723,8 +5047,8 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
LPFC_UE_MASK_LO;
phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
LPFC_UE_MASK_HI;
phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
LPFC_SCRATCHPAD;
phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
LPFC_SLI_INTF;
}
/**
@ -5999,7 +6323,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
spin_lock_irqsave(&phba->hbalock, flags);
/* Mark the FCFI is no longer registered */
phba->fcf.fcf_flag &=
~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
spin_unlock_irqrestore(&phba->hbalock, flags);
}
}
@ -6039,16 +6363,20 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
* number of bytes required by each mapping. They are actually
* mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
* mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
*/
phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
if (pci_resource_start(pdev, 0)) {
phba->pci_bar0_map = pci_resource_start(pdev, 0);
bar0map_len = pci_resource_len(pdev, 0);
} else {
phba->pci_bar0_map = pci_resource_start(pdev, 1);
bar0map_len = pci_resource_len(pdev, 1);
}
phba->pci_bar1_map = pci_resource_start(pdev, 2);
bar1map_len = pci_resource_len(pdev, 2);
phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
phba->pci_bar2_map = pci_resource_start(pdev, 4);
bar2map_len = pci_resource_len(pdev, 4);
/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
phba->sli4_hba.conf_regs_memmap_p =
@ -6793,6 +7121,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
phba->pport->work_port_events = 0;
}
/**
* lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
* @phba: Pointer to HBA context object.
* @mboxq: Pointer to the mailboxq memory for the mailbox command response.
*
* This function is called in the SLI4 code path to read the port's
* sli4 capabilities.
*
* This function may be be called from any context that can block-wait
* for the completion. The expectation is that this routine is called
* typically from probe_one or from the online routine.
**/
int
lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
int rc;
struct lpfc_mqe *mqe;
struct lpfc_pc_sli4_params *sli4_params;
uint32_t mbox_tmo;
rc = 0;
mqe = &mboxq->u.mqe;
/* Read the port's SLI4 Parameters port capabilities */
lpfc_sli4_params(mboxq);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
}
if (unlikely(rc))
return 1;
sli4_params = &phba->sli4_hba.pc_sli4_params;
sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
sli4_params->featurelevel_1 = bf_get(featurelevel_1,
&mqe->un.sli4_params);
sli4_params->featurelevel_2 = bf_get(featurelevel_2,
&mqe->un.sli4_params);
sli4_params->proto_types = mqe->un.sli4_params.word3;
sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
return rc;
}
/**
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
* @pdev: pointer to PCI device
@ -7134,6 +7529,12 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
*/
pci_save_state(pdev);
if (pdev->is_busmaster)
pci_set_master(pdev);
@ -7317,6 +7718,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
}
pci_restore_state(pdev);
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
*/
pci_save_state(pdev);
if (pdev->is_busmaster)
pci_set_master(pdev);
@ -7726,6 +8134,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
/* Restore device state from PCI config space */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
*/
pci_save_state(pdev);
if (pdev->is_busmaster)
pci_set_master(pdev);
@ -7845,11 +8260,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
int rc;
struct lpfc_sli_intf intf;
if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
return -ENODEV;
if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
(bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
rc = lpfc_pci_probe_one_s4(pdev, pid);
else
rc = lpfc_pci_probe_one_s3(pdev, pid);

Просмотреть файл

@ -1707,7 +1707,8 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
}
/* The sub-header is in DMA memory, which needs endian converstion */
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
if (cfg_shdr)
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
@ -1746,6 +1747,65 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
}
/**
* lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd
* @phba: pointer to lpfc hba data structure.
* @fcf_index: index to fcf table.
*
* This routine routine allocates and constructs non-embedded mailbox command
* for reading a FCF table entry refered by @fcf_index.
*
* Return: pointer to the mailbox command constructed if successful, otherwise
* NULL.
**/
int
lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba,
struct lpfcMboxq *mboxq,
uint16_t fcf_index)
{
void *virt_addr;
dma_addr_t phys_addr;
uint8_t *bytep;
struct lpfc_mbx_sge sge;
uint32_t alloc_len, req_len;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
if (!mboxq)
return -ENOMEM;
req_len = sizeof(struct fcf_record) +
sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
LPFC_SLI4_MBX_NEMBED);
if (alloc_len < req_len) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"0291 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
return -ENOMEM;
}
/* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
virt_addr = mboxq->sge_array->addr[0];
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
/* Set up command fields */
bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
/* Perform necessary endian conversion */
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
return 0;
}
/**
* lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
* @mboxq: pointer to lpfc mbox command.
@ -1946,13 +2006,14 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
phba->fcf.current_rec.fcf_indx);
/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
(~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.current_rec.vlan_id != 0xFFFF) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
phba->fcf.current_rec.vlan_id);
}
}
@ -1992,3 +2053,41 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
/**
* lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
* mailbox command.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The PORT_CAPABILITIES supported pages mailbox command is issued to
* retrieve the particular feature pages supported by the port.
**/
void
lpfc_supported_pages(struct lpfcMboxq *mbox)
{
struct lpfc_mbx_supp_pages *supp_pages;
memset(mbox, 0, sizeof(*mbox));
supp_pages = &mbox->u.mqe.un.supp_pages;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
}
/**
* lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
* mailbox command.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
* retrieve the particular SLI4 features supported by the port.
**/
void
lpfc_sli4_params(struct lpfcMboxq *mbox)
{
struct lpfc_mbx_sli4_params *sli4_params;
memset(mbox, 0, sizeof(*mbox));
sli4_params = &mbox->u.mqe.un.sli4_params;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
}

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2008 Emulex. All rights reserved. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -177,23 +177,3 @@ struct temp_event {
uint32_t data;
};
/* bsg definitions */
#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
struct set_ct_event {
uint32_t command;
uint32_t ev_req_id;
uint32_t ev_reg_id;
};
struct get_ct_event {
uint32_t command;
uint32_t ev_reg_id;
uint32_t ev_req_id;
};
struct get_ct_event_reply {
uint32_t immed_data;
uint32_t type;
};

Просмотреть файл

@ -62,7 +62,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int
lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm * sp, uint32_t class)
struct serv_parm *sp, uint32_t class, int flogi)
{
volatile struct serv_parm *hsp = &vport->fc_sparam;
uint16_t hsp_value, ssp_value = 0;
@ -75,49 +75,56 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* correcting the byte values.
*/
if (sp->cls1.classValid) {
hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
hsp->cls1.rcvDataSizeLsb;
ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb;
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
if (!flogi) {
hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
hsp->cls1.rcvDataSizeLsb);
ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb);
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb =
hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb =
hsp->cls1.rcvDataSizeMsb;
}
}
} else if (class == CLASS1) {
} else if (class == CLASS1)
goto bad_service_param;
}
if (sp->cls2.classValid) {
hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
hsp->cls2.rcvDataSizeLsb;
ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb;
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
if (!flogi) {
hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
hsp->cls2.rcvDataSizeLsb);
ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb);
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb =
hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb =
hsp->cls2.rcvDataSizeMsb;
}
}
} else if (class == CLASS2) {
} else if (class == CLASS2)
goto bad_service_param;
}
if (sp->cls3.classValid) {
hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
hsp->cls3.rcvDataSizeLsb;
ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb;
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
if (!flogi) {
hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
hsp->cls3.rcvDataSizeLsb);
ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb);
if (!ssp_value)
goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb =
hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb =
hsp->cls3.rcvDataSizeMsb;
}
}
} else if (class == CLASS3) {
} else if (class == CLASS3)
goto bad_service_param;
}
/*
* Preserve the upper four bits of the MSB from the PLOGI response.
@ -247,7 +254,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
if (vport->port_state <= LPFC_FLOGI) {
if (vport->port_state <= LPFC_FDISC) {
/* Before responding to PLOGI, check for pt2pt mode.
* If we are pt2pt, with an outstanding FLOGI, abort
* the FLOGI and resend it first.
@ -295,7 +302,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
@ -831,7 +838,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
"0142 PLOGI RSP: Invalid WWN.\n");
goto out;
}
if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
goto out;
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,

Просмотреть файл

@ -626,6 +626,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
spin_unlock_irqrestore(
&phba->sli4_hba.abts_scsi_buf_list_lock,
@ -688,11 +689,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
list);
if (status) {
/* Put this back on the abort scsi list */
psb->status = IOSTAT_LOCAL_REJECT;
psb->result = IOERR_ABORT_REQUESTED;
psb->exch_busy = 1;
rc++;
} else
} else {
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
}
/* Put it back into the SCSI buffer list */
lpfc_release_scsi_buf_s4(phba, psb);
}
@ -796,19 +798,17 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
sgl++;
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
/*
* Since the IOCB for the FCP I/O is built into this
@ -839,11 +839,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
psb->cur_iocbq.sli4_xritag);
if (status) {
/* Put this back on the abort scsi list */
psb->status = IOSTAT_LOCAL_REJECT;
psb->result = IOERR_ABORT_REQUESTED;
psb->exch_busy = 1;
rc++;
} else
} else {
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
}
/* Put it back into the SCSI buffer list */
lpfc_release_scsi_buf_s4(phba, psb);
break;
@ -857,11 +858,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
list);
if (status) {
/* Put this back on the abort scsi list */
psb->status = IOSTAT_LOCAL_REJECT;
psb->result = IOERR_ABORT_REQUESTED;
psb->exch_busy = 1;
rc++;
} else
} else {
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
}
/* Put it back into the SCSI buffer list */
lpfc_release_scsi_buf_s4(phba, psb);
}
@ -951,8 +953,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
unsigned long iflag = 0;
if (psb->status == IOSTAT_LOCAL_REJECT
&& psb->result == IOERR_ABORT_REQUESTED) {
if (psb->exch_busy) {
spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
iflag);
psb->pCmd = NULL;
@ -1869,7 +1870,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
dma_len = sg_dma_len(sgel);
bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
if ((num_bde + 1) == nseg)
@ -1878,7 +1878,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
bf_set(lpfc_sli4_sge_last, sgl, 0);
bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
sgl->sge_len = cpu_to_le32(dma_len);
dma_offset += dma_len;
sgl++;
}
@ -2221,6 +2221,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
@ -2637,6 +2640,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
}
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
return 0;
}
@ -2695,6 +2699,13 @@ lpfc_info(struct Scsi_Host *host)
" port %s",
phba->Port);
}
len = strlen(lpfcinfobuf);
if (phba->sli4_hba.link_state.logical_speed) {
snprintf(lpfcinfobuf + len,
384-len,
" Logical Link Speed: %d Mbps",
phba->sli4_hba.link_state.logical_speed * 10);
}
}
return lpfcinfobuf;
}
@ -2990,6 +3001,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (lpfc_is_link_up(phba))
icmd->ulpCommand = CMD_ABORT_XRI_CN;

Просмотреть файл

@ -118,6 +118,7 @@ struct lpfc_scsi_buf {
uint32_t timeout;
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */

Просмотреть файл

@ -580,10 +580,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) {
if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
&& ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
&& (iocbq->iocb.un.ulpWord[4]
== IOERR_ABORT_REQUESTED))) {
if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag);
list_add(&sglq->list,
@ -764,10 +761,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case DSSCMD_IWRITE64_CX:
case DSSCMD_IREAD64_CR:
case DSSCMD_IREAD64_CX:
case DSSCMD_INVALIDATE_DEK:
case DSSCMD_SET_KEK:
case DSSCMD_GET_KEK_ID:
case DSSCMD_GEN_XFER:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
@ -1717,6 +1710,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_dmabuf *mp;
uint16_t rpi, vpi;
int rc;
struct lpfc_vport *vport = pmb->vport;
mp = (struct lpfc_dmabuf *) (pmb->context1);
@ -1745,6 +1739,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
/* Unreg VPI, if the REG_VPI succeed after VLink failure */
if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
!(phba->pport->load_flag & FC_UNLOADING) &&
!pmb->u.mb.mbxStatus) {
lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
}
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
@ -2228,9 +2234,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* All other are passed to the completion callback.
*/
if (pring->ringno == LPFC_ELS_RING) {
if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(cmdiocbp->iocb_flag &
LPFC_DRIVER_ABORTED)) {
spin_lock_irqsave(&phba->hbalock,
iflag);
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
@ -2240,7 +2252,47 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* of DMAing payload, so don't free data
* buffer till after a hbeat.
*/
spin_lock_irqsave(&phba->hbalock,
iflag);
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
if ((phba->sli_rev == LPFC_SLI_REV4) &&
(saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) {
/* Set cmdiocb flag for the exchange
* busy so sgl (xri) will not be
* released until the abort xri is
* received from hba, clear the
* LPFC_DRIVER_ABORTED bit in case
* it was driver initiated abort.
*/
spin_lock_irqsave(&phba->hbalock,
iflag);
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
cmdiocbp->iocb_flag |=
LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
cmdiocbp->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
cmdiocbp->iocb.un.ulpWord[4] =
IOERR_ABORT_REQUESTED;
/*
* For SLI4, irsiocb contains NO_XRI
* in sli_xritag, it shall not affect
* releasing sgl (xri) process.
*/
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
spin_lock_irqsave(&phba->hbalock,
iflag);
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@ -5687,19 +5739,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
for (i = 0; i < numBdes; i++) {
/* Should already be byte swapped. */
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
/* swap the size field back to the cpu so we
* can assign it to the sgl.
*/
bde.tus.w = le32_to_cpu(bpl->tus.w);
bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
/* swap the size field back to the cpu so we
* can assign it to the sgl.
*/
bde.tus.w = le32_to_cpu(bpl->tus.w);
sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
bpl++;
sgl++;
}
@ -5712,11 +5764,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
bf_set(lpfc_sli4_sge_len, sgl,
icmd->un.genreq64.bdl.bdeSize);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
sgl->sge_len =
cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
}
return sglq->sli4_xritag;
}
@ -5987,12 +6038,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
wqe->words[5] = 0;
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
wqe->generic.abort_tag = abort_tag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@ -6121,15 +6170,15 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
return IOCB_ERROR;
if (piocb->iocb_flag & LPFC_IO_FCP) {
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
/*
* For FCP command IOCB, get a new WQ index to distribute
* WQE across the WQsr. On the other hand, for abort IOCB,
* it carries the same WQ index to the original command
* IOCB.
*/
if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
@ -7004,7 +7053,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb->iocb.ulpContext != abort_context ||
(abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
spin_unlock_irq(&phba->hbalock);
else {
else if (phba->sli_rev < LPFC_SLI_REV4) {
/*
* leave the SLI4 aborted command on the txcmplq
* list and the command complete WCQE's XB bit
* will tell whether the SGL (XRI) can be released
* immediately or to the aborted SGL list for the
* following abort XRI from the HBA.
*/
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
spin_unlock_irq(&phba->hbalock);
@ -7013,11 +7069,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* payload, so don't free data buffer till after
* a hbeat.
*/
spin_lock_irq(&phba->hbalock);
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irq(&phba->hbalock);
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
@ -7106,7 +7164,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
/* This signals the response to set the correct status
* before calling the completion handler.
* before calling the completion handler
*/
cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
@ -7124,6 +7182,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
if (cmdiocb->iocb_flag & LPFC_IO_FCP)
abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
@ -7330,6 +7390,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
if (iocbq->iocb_flag & LPFC_IO_FCP)
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@ -8359,11 +8421,24 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
}
}
/**
* lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
* @phba: pointer to lpfc hba data structure
* @pIocbIn: pointer to the rspiocbq
* @pIocbOut: pointer to the cmdiocbq
* @wcqe: pointer to the complete wcqe
*
* This routine transfers the fields of a command iocbq to a response iocbq
* by copying all the IOCB fields from command iocbq and transferring the
* completion status information from the complete wcqe.
**/
static void
lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
unsigned long iflags;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@ -8377,8 +8452,17 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
wcqe->total_data_placed;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else
else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
}
/* Pick up HBA exchange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
spin_lock_irqsave(&phba->hbalock, iflags);
pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
}
/**
@ -8419,7 +8503,7 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
}
/* Fake the irspiocbq and copy necessary response information */
lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
return irspiocbq;
}
@ -8849,8 +8933,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
int ecount = 0;
uint16_t cqid;
if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
bf_get(lpfc_eqe_minor_code, eqe) != 0) {
if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
@ -8976,7 +9059,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
}
/* Fake the irspiocb and copy necessary response information */
lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
/* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@ -9082,8 +9165,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint16_t cqid;
int ecount = 0;
if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
@ -11871,12 +11953,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
void *virt_addr;
dma_addr_t phys_addr;
uint8_t *bytep;
struct lpfc_mbx_sge sge;
uint32_t alloc_len, req_len;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@ -11887,43 +11963,19 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
error = -ENOMEM;
goto fail_fcfscan;
}
req_len = sizeof(struct fcf_record) +
sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
LPFC_SLI4_MBX_NEMBED);
if (alloc_len < req_len) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0291 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
error = -ENOMEM;
/* Construct the read FCF record mailbox command */
rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index);
if (rc) {
error = -EINVAL;
goto fail_fcfscan;
}
/* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
virt_addr = mboxq->sge_array->addr[0];
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
/* Set up command fields */
bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
/* Perform necessary endian conversion */
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
/* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
if (rc == MBX_NOT_FINISHED)
error = -EIO;
} else {
else {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
@ -11941,6 +11993,90 @@ fail_fcfscan:
return error;
}
/**
* lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
* @phba: pointer to lpfc hba data structure.
*
* This routine is the completion routine for the rediscover FCF table mailbox
* command. If the mailbox command returned failure, it will try to stop the
* FCF rediscover wait timer.
**/
void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
{
struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
uint32_t shdr_status, shdr_add_status;
redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
shdr_status = bf_get(lpfc_mbox_hdr_status,
&redisc_fcf->header.cfg_shdr.response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&redisc_fcf->header.cfg_shdr.response);
if (shdr_status || shdr_add_status) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2746 Requesting for FCF rediscovery failed "
"status x%x add_status x%x\n",
shdr_status, shdr_add_status);
/*
* Request failed, last resort to re-try current
* registered FCF entry
*/
lpfc_retry_pport_discovery(phba);
} else
/*
* Start FCF rediscovery wait timer for pending FCF
* before rescan FCF record table.
*/
lpfc_fcf_redisc_wait_start_timer(phba);
mempool_free(mbox, phba->mbox_mem_pool);
}
/**
* lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to request for rediscovery of the entire FCF table
* by the port.
**/
int
lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mbox;
struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
int rc, length;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2745 Failed to allocate mbox for "
"requesting FCF rediscover.\n");
return -ENOMEM;
}
length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
length, LPFC_SLI4_MBX_EMBED);
redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
/* Set count to 0 for invalidating the entire FCF database */
bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
/* Issue the mailbox command asynchronously */
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
return -EIO;
}
return 0;
}
/**
* lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
* @phba: pointer to lpfc hba data structure.
@ -12069,3 +12205,48 @@ out:
kfree(rgn23_data);
return;
}
/**
* lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
* @vport: pointer to vport data structure.
*
* This function iterate through the mailboxq and clean up all REG_LOGIN
* and REG_VPI mailbox commands associated with the vport. This function
* is called when driver want to restart discovery of the vport due to
* a Clear Virtual Link event.
**/
void
lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if (mb->vport != vport)
continue;
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
(mb->u.mb.mbxCommand != MBX_REG_VPI))
continue;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
}
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
}
mb = phba->sli.mbox_active;
if (mb && (mb->vport == vport)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
spin_unlock_irq(&phba->hbalock);
}

Просмотреть файл

@ -53,17 +53,19 @@ struct lpfc_iocbq {
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint8_t iocb_flag;
uint16_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
#define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */
#define LPFC_FIP_ELS_ID_SHIFT 6
#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
uint8_t abort_count;
uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */

Просмотреть файл

@ -22,6 +22,10 @@
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
#define LPFC_GET_QE_REL_INT 32
#define LPFC_RPI_LOW_WATER_MARK 10
/* Amount of time in seconds for waiting FCF rediscovery to complete */
#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
#define LPFC_NEMBED_MBOX_SGL_CNT 254
@ -126,24 +130,36 @@ struct lpfc_sli4_link {
uint8_t status;
uint8_t physical;
uint8_t fault;
uint16_t logical_speed;
};
struct lpfc_fcf_rec {
uint8_t fabric_name[8];
uint8_t switch_name[8];
uint8_t mac_addr[6];
uint16_t fcf_indx;
uint32_t priority;
uint16_t vlan_id;
uint32_t addr_mode;
uint32_t flag;
#define BOOT_ENABLE 0x01
#define RECORD_VALID 0x02
};
struct lpfc_fcf {
uint8_t fabric_name[8];
uint8_t switch_name[8];
uint8_t mac_addr[6];
uint16_t fcf_indx;
uint16_t fcfi;
uint32_t fcf_flag;
#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
#define FCF_REGISTERED 0x02 /* FCF registered with FW */
#define FCF_DISCOVERED 0x04 /* FCF discovery started */
#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
uint32_t priority;
#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */
#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */
uint32_t addr_mode;
uint16_t vlan_id;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
struct timer_list redisc_wait;
};
#define LPFC_REGION23_SIGNATURE "RG23"
@ -248,7 +264,10 @@ struct lpfc_bmbx {
#define SLI4_CT_VFI 2
#define SLI4_CT_FCFI 3
#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
#define LPFC_SLI4_MIN_BUF_SIZE 0x400
#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
/*
* SLI4 specific data structures
@ -282,6 +301,42 @@ struct lpfc_fcp_eq_hdl {
struct lpfc_hba *phba;
};
/* Port Capabilities for SLI4 Parameters */
struct lpfc_pc_sli4_params {
uint32_t supported;
uint32_t if_type;
uint32_t sli_rev;
uint32_t sli_family;
uint32_t featurelevel_1;
uint32_t featurelevel_2;
uint32_t proto_types;
#define LPFC_SLI4_PROTO_FCOE 0x0000001
#define LPFC_SLI4_PROTO_FC 0x0000002
#define LPFC_SLI4_PROTO_NIC 0x0000004
#define LPFC_SLI4_PROTO_ISCSI 0x0000008
#define LPFC_SLI4_PROTO_RDMA 0x0000010
uint32_t sge_supp_len;
uint32_t if_page_sz;
uint32_t rq_db_window;
uint32_t loopbk_scope;
uint32_t eq_pages_max;
uint32_t eqe_size;
uint32_t cq_pages_max;
uint32_t cqe_size;
uint32_t mq_pages_max;
uint32_t mqe_size;
uint32_t mq_elem_cnt;
uint32_t wq_pages_max;
uint32_t wqe_size;
uint32_t rq_pages_max;
uint32_t rqe_size;
uint32_t hdr_pages_max;
uint32_t hdr_size;
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
};
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@ -295,7 +350,7 @@ struct lpfc_sli4_hba {
void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
/* BAR1 FCoE function CSR register memory map */
void __iomem *STAregaddr; /* Address to HST_STATE register */
void __iomem *ISRregaddr; /* Address to HST_ISR register */
@ -310,6 +365,8 @@ struct lpfc_sli4_hba {
uint32_t ue_mask_lo;
uint32_t ue_mask_hi;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
uint32_t cfg_eqn;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@ -406,6 +463,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
struct lpfc_mbx_sge *);
int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t);
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@ -448,6 +507,7 @@ int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.7"
#define LPFC_DRIVER_VERSION "8.3.9"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"

Просмотреть файл

@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* by the port.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
(pport->vpi_state & LPFC_VPI_REGISTERED)) {
(pport->fc_flag & FC_VFI_REGISTERED)) {
rc = lpfc_sli4_init_vpi(phba, vpi);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@ -505,6 +505,7 @@ enable_vport(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((phba->link_state < LPFC_LINK_UP) ||
(phba->fc_topology == TOPOLOGY_LOOP)) {
@ -512,10 +513,10 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
spin_lock_irq(&phba->hbalock);
spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(&phba->hbalock);
spin_unlock_irq(shost->host_lock);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.

Просмотреть файл

@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/macints.h>
#include <asm/macintosh.h>
@ -279,24 +278,27 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
* Programmed IO routines follow.
*/
static inline int mac_esp_wait_for_fifo(struct esp *esp)
static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
{
int i = 500000;
do {
if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
return 0;
unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
if (fbytes)
return fbytes;
udelay(2);
} while (--i);
printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
return 1;
return 0;
}
static inline int mac_esp_wait_for_intr(struct esp *esp)
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
@ -308,6 +310,7 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
} while (--i);
printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
mep->error = 1;
return 1;
}
@ -347,11 +350,10 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
unsigned long flags;
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
u8 *fifo = esp->regs + ESP_FDATA * 16;
local_irq_save(flags);
disable_irq(esp->host->irq);
cmd &= ~ESP_CMD_DMA;
mep->error = 0;
@ -359,11 +361,35 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
if (write) {
scsi_esp_cmd(esp, cmd);
if (!mac_esp_wait_for_intr(esp)) {
if (mac_esp_wait_for_fifo(esp))
esp_count = 0;
} else {
esp_count = 0;
while (1) {
unsigned int n;
n = mac_esp_wait_for_fifo(esp);
if (!n)
break;
if (n > esp_count)
n = esp_count;
esp_count -= n;
MAC_ESP_PIO_LOOP("%2@,%0@+", n);
if (!esp_count)
break;
if (mac_esp_wait_for_intr(esp))
break;
if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
break;
esp->ireg = esp_read8(ESP_INTRPT);
if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
ESP_INTR_BSERV)
break;
scsi_esp_cmd(esp, ESP_CMD_TI);
}
} else {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@ -374,47 +400,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
scsi_esp_cmd(esp, cmd);
}
while (esp_count) {
unsigned int n;
while (esp_count) {
unsigned int n;
if (mac_esp_wait_for_intr(esp)) {
mep->error = 1;
break;
}
if (esp->sreg & ESP_STAT_SPAM) {
printk(KERN_ERR PFX "gross error\n");
mep->error = 1;
break;
}
n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
if (write) {
if (n > esp_count)
n = esp_count;
esp_count -= n;
MAC_ESP_PIO_LOOP("%2@,%0@+", n);
if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
if (mac_esp_wait_for_intr(esp))
break;
if (esp_count) {
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_DC)
break;
if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
break;
scsi_esp_cmd(esp, ESP_CMD_TI);
}
} else {
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_DC)
if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
ESP_INTR_BSERV)
break;
n = MAC_ESP_FIFO_SIZE - n;
n = MAC_ESP_FIFO_SIZE -
(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
if (n > esp_count)
n = esp_count;
@ -429,7 +432,7 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
}
}
local_irq_restore(flags);
enable_irq(esp->host->irq);
}
static int mac_esp_irq_pending(struct esp *esp)

Просмотреть файл

@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
* Version : v00.00.04.12-rc1
* Version : v00.00.04.17.1-rc1
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@ -843,6 +843,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->lun = scp->device->lun;
pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0;
pthru->pad_0 = 0;
pthru->flags = flags;
pthru->data_xfer_len = scsi_bufflen(scp);
@ -874,6 +875,12 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->sge_count = megasas_make_sgl32(instance, scp,
&pthru->sgl);
if (pthru->sge_count > instance->max_num_sge) {
printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
pthru->sge_count);
return 0;
}
/*
* Sense info specific
*/
@ -1000,6 +1007,12 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
} else
ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
if (ldio->sge_count > instance->max_num_sge) {
printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
ldio->sge_count);
return 0;
}
/*
* Sense info specific
*/
@ -2250,6 +2263,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
dcmd->sgl.sge32[0].phys_addr = ci_h;
@ -2294,6 +2308,86 @@ megasas_get_pd_list(struct megasas_instance *instance)
return ret;
}
/*
* megasas_get_ld_list_info - Returns FW's ld_list structure
* @instance: Adapter soft state
* @ld_list: ld_list structure
*
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
static int
megasas_get_ld_list(struct megasas_instance *instance)
{
int ret = 0, ld_index = 0, ids = 0;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct MR_LD_LIST *ci;
dma_addr_t ci_h = 0;
cmd = megasas_get_cmd(instance);
if (!cmd) {
printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
return -ENOMEM;
}
dcmd = &cmd->frame->dcmd;
ci = pci_alloc_consistent(instance->pdev,
sizeof(struct MR_LD_LIST),
&ci_h);
if (!ci) {
printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
dcmd->opcode = MR_DCMD_LD_GET_LIST;
dcmd->sgl.sge32[0].phys_addr = ci_h;
dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
dcmd->pad_0 = 0;
if (!megasas_issue_polled(instance, cmd)) {
ret = 0;
} else {
ret = -1;
}
/* the following function will get the instance PD LIST */
if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
instance->ld_ids[ids] =
ci->ldList[ld_index].ref.targetId;
}
}
}
pci_free_consistent(instance->pdev,
sizeof(struct MR_LD_LIST),
ci,
ci_h);
megasas_return_cmd(instance, cmd);
return ret;
}
/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
@ -2339,6 +2433,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = ci_h;
@ -2590,6 +2685,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
megasas_get_pd_list(instance);
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
megasas_get_ld_list(instance);
ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
/*
@ -2714,6 +2812,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = el_info_h;
@ -2828,6 +2927,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
dcmd->mbox.w[0] = seq_num;
@ -3166,6 +3266,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
@ -3205,6 +3306,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
dcmd->opcode = opcode;
@ -3984,6 +4086,7 @@ megasas_aen_polling(struct work_struct *work)
struct Scsi_Host *host;
struct scsi_device *sdev1;
u16 pd_index = 0;
u16 ld_index = 0;
int i, j, doscan = 0;
u32 seq_num;
int error;
@ -3999,8 +4102,124 @@ megasas_aen_polling(struct work_struct *work)
switch (instance->evt_detail->code) {
case MR_EVT_PD_INSERTED:
if (megasas_get_pd_list(instance) == 0) {
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
j++) {
pd_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 =
scsi_device_lookup(host, i, j, 0);
if (instance->pd_list[pd_index].driveState
== MR_PD_STATE_SYSTEM) {
if (!sdev1) {
scsi_add_device(host, i, j, 0);
}
if (sdev1)
scsi_device_put(sdev1);
}
}
}
}
doscan = 0;
break;
case MR_EVT_PD_REMOVED:
if (megasas_get_pd_list(instance) == 0) {
megasas_get_pd_list(instance);
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
j++) {
pd_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 =
scsi_device_lookup(host, i, j, 0);
if (instance->pd_list[pd_index].driveState
== MR_PD_STATE_SYSTEM) {
if (sdev1) {
scsi_device_put(sdev1);
}
} else {
if (sdev1) {
scsi_remove_device(sdev1);
scsi_device_put(sdev1);
}
}
}
}
}
doscan = 0;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_LD_DELETED:
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
j++) {
ld_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
i + MEGASAS_MAX_LD_CHANNELS,
j,
0);
if (instance->ld_ids[ld_index] != 0xff) {
if (sdev1) {
scsi_device_put(sdev1);
}
} else {
if (sdev1) {
scsi_remove_device(sdev1);
scsi_device_put(sdev1);
}
}
}
}
doscan = 0;
break;
case MR_EVT_LD_CREATED:
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0;
j < MEGASAS_MAX_DEV_PER_CHANNEL;
j++) {
ld_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
i+MEGASAS_MAX_LD_CHANNELS,
j, 0);
if (instance->ld_ids[ld_index] !=
0xff) {
if (!sdev1) {
scsi_add_device(host,
i + 2,
j, 0);
}
}
if (sdev1) {
scsi_device_put(sdev1);
}
}
}
doscan = 0;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
doscan = 1;
break;
default:
@ -4035,6 +4254,31 @@ megasas_aen_polling(struct work_struct *work)
}
}
}
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
ld_index =
(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
sdev1 = scsi_device_lookup(host,
i+MEGASAS_MAX_LD_CHANNELS, j, 0);
if (instance->ld_ids[ld_index] != 0xff) {
if (!sdev1) {
scsi_add_device(host,
i+2,
j, 0);
} else {
scsi_device_put(sdev1);
}
} else {
if (sdev1) {
scsi_remove_device(sdev1);
scsi_device_put(sdev1);
}
}
}
}
}
if ( instance->aen_cmd != NULL ) {

Просмотреть файл

@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
#define MEGASAS_VERSION "00.00.04.12-rc1"
#define MEGASAS_RELDATE "Sep. 17, 2009"
#define MEGASAS_EXT_VERSION "Thu Sep. 17 11:41:51 PST 2009"
#define MEGASAS_VERSION "00.00.04.17.1-rc1"
#define MEGASAS_RELDATE "Oct. 29, 2009"
#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009"
/*
* Device IDs
@ -117,6 +117,7 @@
#define MFI_CMD_STP 0x08
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01
@ -349,6 +350,32 @@ struct megasas_pd_list {
u8 driveState;
} __packed;
/*
* defines the logical drive reference structure
*/
union MR_LD_REF {
struct {
u8 targetId;
u8 reserved;
u16 seqNum;
};
u32 ref;
} __packed;
/*
* defines the logical drive list structure
*/
struct MR_LD_LIST {
u32 ldCount;
u32 reserved;
struct {
union MR_LD_REF ref;
u8 state;
u8 reserved[3];
u64 size;
} ldList[MAX_LOGICAL_DRIVES];
} __packed;
/*
* SAS controller properties
*/
@ -637,6 +664,8 @@ struct megasas_ctrl_info {
#define MEGASAS_MAX_LD 64
#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_DBG_LVL 1
@ -1187,6 +1216,7 @@ struct megasas_instance {
struct megasas_register_set __iomem *reg_set;
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;

Просмотреть файл

@ -44,6 +44,7 @@ config SCSI_MPT2SAS
tristate "LSI MPT Fusion SAS 2.0 Device Driver"
depends on PCI && SCSI
select SCSI_SAS_ATTRS
select RAID_ATTRS
---help---
This driver supports PCI-Express SAS 6Gb/s Host Adapters.

Просмотреть файл

@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
* mpi2.h Version: 02.00.13
* mpi2.h Version: 02.00.14
*
* Version History
* ---------------
@ -53,6 +53,10 @@
* bytes reserved.
* Added RAID Accelerator functionality.
* 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
* 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MSI-x index mask and shift for Reply Post Host
* Index register.
* Added function code for Host Based Discovery Action.
* --------------------------------------------------------------------------
*/
@ -78,7 +82,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
#define MPI2_HEADER_VERSION_UNIT (0x0D)
#define MPI2_HEADER_VERSION_UNIT (0x0E)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@ -232,9 +236,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
/*
* Offset for the Reply Descriptor Post Queue
* Defines for the Reply Descriptor Post Queue
*/
#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
/*
* Defines for the HCBSize and address
@ -497,12 +504,13 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
/* Host Based Discovery Action */
#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
/* Doorbell functions */
#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
/* #define MPI2_FUNCTION_IO_UNIT_RESET (0x41) */
#define MPI2_FUNCTION_HANDSHAKE (0x42)

Просмотреть файл

@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
* mpi2_cnfg.h Version: 02.00.12
* mpi2_cnfg.h Version: 02.00.13
*
* Version History
* ---------------
@ -107,6 +107,8 @@
* to SAS Device Page 0 Flags field.
* Added PhyInfo defines for power condition.
* Added Ethernet configuration pages.
* 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
* Added SAS PHY Page 4 structure and defines.
* --------------------------------------------------------------------------
*/
@ -712,6 +714,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
/* IO Unit Page 1 Flags defines */
#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
@ -2291,6 +2294,26 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
#define MPI2_SASPHY3_PAGEVERSION (0x00)
/* SAS PHY Page 4 */
typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
U16 Reserved1; /* 0x08 */
U8 Reserved2; /* 0x0A */
U8 Flags; /* 0x0B */
U8 InitialFrame[28]; /* 0x0C */
} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
#define MPI2_SASPHY4_PAGEVERSION (0x00)
/* values for the Flags field */
#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
/****************************************************************************
* SAS Port Config Pages
****************************************************************************/

Просмотреть файл

@ -5,23 +5,24 @@
Copyright (c) 2000-2009 LSI Corporation.
---------------------------------------
Header Set Release Version: 02.00.12
Header Set Release Date: 05-06-09
Header Set Release Version: 02.00.14
Header Set Release Date: 10-28-09
---------------------------------------
Filename Current version Prior version
---------- --------------- -------------
mpi2.h 02.00.12 02.00.11
mpi2_cnfg.h 02.00.11 02.00.10
mpi2_init.h 02.00.07 02.00.06
mpi2_ioc.h 02.00.11 02.00.10
mpi2_raid.h 02.00.03 02.00.03
mpi2_sas.h 02.00.02 02.00.02
mpi2.h 02.00.14 02.00.13
mpi2_cnfg.h 02.00.13 02.00.12
mpi2_init.h 02.00.08 02.00.07
mpi2_ioc.h 02.00.13 02.00.12
mpi2_raid.h 02.00.04 02.00.04
mpi2_sas.h 02.00.03 02.00.02
mpi2_targ.h 02.00.03 02.00.03
mpi2_tool.h 02.00.03 02.00.02
mpi2_tool.h 02.00.04 02.00.04
mpi2_type.h 02.00.00 02.00.00
mpi2_ra.h 02.00.00
mpi2_history.txt 02.00.11 02.00.12
mpi2_ra.h 02.00.00 02.00.00
mpi2_hbd.h 02.00.00
mpi2_history.txt 02.00.14 02.00.13
* Date Version Description
@ -65,6 +66,11 @@ mpi2.h
* MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
* bytes reserved.
* Added RAID Accelerator functionality.
* 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
* 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MSI-x index mask and shift for Reply Post Host
* Index register.
* Added function code for Host Based Discovery Action.
* --------------------------------------------------------------------------
mpi2_cnfg.h
@ -155,6 +161,15 @@ mpi2_cnfg.h
* Added expander reduced functionality data to SAS
* Expander Page 0.
* Added SAS PHY Page 2 and SAS PHY Page 3.
* 07-30-09 02.00.12 Added IO Unit Page 7.
* Added new device ids.
* Added SAS IO Unit Page 5.
* Added partial and slumber power management capable flags
* to SAS Device Page 0 Flags field.
* Added PhyInfo defines for power condition.
* Added Ethernet configuration pages.
* 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
* Added SAS PHY Page 4 structure and defines.
* --------------------------------------------------------------------------
mpi2_init.h
@ -172,6 +187,10 @@ mpi2_init.h
* Query Asynchronous Event.
* Defined two new bits in the SlotStatus field of the SCSI
* Enclosure Processor Request and Reply.
* 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
* both SCSI IO Error Reply and SCSI Task Management Reply.
* Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
* Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
* --------------------------------------------------------------------------
mpi2_ioc.h
@ -246,6 +265,20 @@ mpi2_ioc.h
* Added two new reason codes for SAS Device Status Change
* Event.
* Added new event: SAS PHY Counter.
* 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
* Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
* Added new product id family for 2208.
* 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
* Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
* Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
* Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
* Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
* Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
* Added Host Based Discovery Phy Event data.
* Added defines for ProductID Product field
* (MPI2_FW_HEADER_PID_).
* Modified values for SAS ProductID Family
* (MPI2_FW_HEADER_PID_FAMILY_).
* --------------------------------------------------------------------------
mpi2_raid.h
@ -256,6 +289,8 @@ mpi2_raid.h
* 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
* the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
* can be sized by the build environment.
* 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
* VolumeCreationFlags and marked the old one as obsolete.
* --------------------------------------------------------------------------
mpi2_sas.h
@ -264,6 +299,8 @@ mpi2_sas.h
* Control Request.
* 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
* Request.
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
* --------------------------------------------------------------------------
mpi2_targ.h
@ -283,6 +320,10 @@ mpi2_tool.h
* structures and defines.
* 02-29-08 02.00.02 Modified various names to make them 32-character unique.
* 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
* 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
* and reply messages.
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
* --------------------------------------------------------------------------
mpi2_type.h
@ -293,20 +334,26 @@ mpi2_ra.h
* 05-06-09 02.00.00 Initial version.
* --------------------------------------------------------------------------
mpi2_hbd.h
* 10-28-09 02.00.00 Initial version.
* --------------------------------------------------------------------------
mpi2_history.txt Parts list history
Filename 02.00.12
---------- --------
mpi2.h 02.00.12
mpi2_cnfg.h 02.00.11
mpi2_init.h 02.00.07
mpi2_ioc.h 02.00.11
mpi2_raid.h 02.00.03
mpi2_sas.h 02.00.02
mpi2_targ.h 02.00.03
mpi2_tool.h 02.00.03
mpi2_type.h 02.00.00
mpi2_ra.h 02.00.00
Filename 02.00.14 02.00.13 02.00.12
---------- -------- -------- --------
mpi2.h 02.00.14 02.00.13 02.00.12
mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
mpi2_init.h 02.00.08 02.00.07 02.00.07
mpi2_ioc.h 02.00.13 02.00.12 02.00.11
mpi2_raid.h 02.00.04 02.00.04 02.00.03
mpi2_sas.h 02.00.03 02.00.02 02.00.02
mpi2_targ.h 02.00.03 02.00.03 02.00.03
mpi2_tool.h 02.00.04 02.00.04 02.00.03
mpi2_type.h 02.00.00 02.00.00 02.00.00
mpi2_ra.h 02.00.00 02.00.00 02.00.00
mpi2_hbd.h 02.00.00
Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
---------- -------- -------- -------- -------- -------- --------

Просмотреть файл

@ -6,7 +6,7 @@
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
* mpi2_init.h Version: 02.00.07
* mpi2_init.h Version: 02.00.08
*
* Version History
* ---------------
@ -27,6 +27,10 @@
* Query Asynchronous Event.
* Defined two new bits in the SlotStatus field of the SCSI
* Enclosure Processor Request and Reply.
* 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
* both SCSI IO Error Reply and SCSI Task Management Reply.
* Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
* Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
* --------------------------------------------------------------------------
*/
@ -254,6 +258,11 @@ typedef struct _MPI2_SCSI_IO_REPLY
#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
/* masks and shifts for the ResponseInfo field */
#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
@ -327,6 +336,7 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
U16 IOCStatus; /* 0x0E */
U32 IOCLogInfo; /* 0x10 */
U32 TerminationCount; /* 0x14 */
U32 ResponseInfo; /* 0x18 */
} MPI2_SCSI_TASK_MANAGE_REPLY,
MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
@ -339,8 +349,20 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
/* masks and shifts for the ResponseInfo field */
#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
/****************************************************************************
* SCSI Enclosure Processor messages

Просмотреть файл

@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
* mpi2_ioc.h Version: 02.00.12
* mpi2_ioc.h Version: 02.00.13
*
* Version History
* ---------------
@ -87,6 +87,17 @@
* 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
* Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
* Added new product id family for 2208.
* 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
* Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
* Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
* Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
* Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
* Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
* Added Host Based Discovery Phy Event data.
* Added defines for ProductID Product field
* (MPI2_FW_HEADER_PID_).
* Modified values for SAS ProductID Family
* (MPI2_FW_HEADER_PID_FAMILY_).
* --------------------------------------------------------------------------
*/
@ -119,8 +130,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST
U16 MsgVersion; /* 0x0C */
U16 HeaderVersion; /* 0x0E */
U32 Reserved5; /* 0x10 */
U32 Reserved6; /* 0x14 */
U16 Reserved7; /* 0x18 */
U16 Reserved6; /* 0x14 */
U8 Reserved7; /* 0x16 */
U8 HostMSIxVectors; /* 0x17 */
U16 Reserved8; /* 0x18 */
U16 SystemRequestFrameSize; /* 0x1A */
U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
U16 ReplyFreeQueueDepth; /* 0x1E */
@ -215,7 +228,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
U8 MaxChainDepth; /* 0x14 */
U8 WhoInit; /* 0x15 */
U8 NumberOfPorts; /* 0x16 */
U8 Reserved2; /* 0x17 */
U8 MaxMSIxVectors; /* 0x17 */
U16 RequestCredit; /* 0x18 */
U16 ProductID; /* 0x1A */
U32 IOCCapabilities; /* 0x1C */
@ -233,7 +246,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY
U8 MaxVolumes; /* 0x37 */
U16 MaxDevHandle; /* 0x38 */
U16 MaxPersistentEntries; /* 0x3A */
U32 Reserved4; /* 0x3C */
U16 MinDevHandle; /* 0x3C */
U16 Reserved4; /* 0x3E */
} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
@ -269,6 +283,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
/* ProductID field uses MPI2_FW_HEADER_PID_ */
/* IOCCapabilities */
#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
@ -453,6 +468,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
/* Log Entry Added Event data */
@ -793,6 +809,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
/* values for the ExpStatus field */
#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
@ -878,6 +895,44 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
* */
/* Host Based Discovery Phy Event data */
typedef struct _MPI2_EVENT_HBD_PHY_SAS {
U8 Flags; /* 0x00 */
U8 NegotiatedLinkRate; /* 0x01 */
U8 PhyNum; /* 0x02 */
U8 PhysicalPort; /* 0x03 */
U32 Reserved1; /* 0x04 */
U8 InitialFrame[28]; /* 0x08 */
} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
/* values for the Flags field */
#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for
* the NegotiatedLinkRate field */
typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
MPI2_EVENT_HBD_PHY_SAS Sas;
} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
typedef struct _MPI2_EVENT_DATA_HBD_PHY {
U8 DescriptorType; /* 0x00 */
U8 Reserved1; /* 0x01 */
U16 Reserved2; /* 0x02 */
U32 Reserved3; /* 0x04 */
MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */
} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
/* values for the DescriptorType field */
#define MPI2_EVENT_HBD_DT_SAS (0x01)
/****************************************************************************
* EventAck message
****************************************************************************/
@ -1126,13 +1181,17 @@ typedef struct _MPI2_FW_IMAGE_HEADER
#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
/* SAS */
#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010)
#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0011)
#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */

Просмотреть файл

@ -6,7 +6,7 @@
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
* mpi2.h Version: 02.00.02
* mpi2.h Version: 02.00.03
*
* Version History
* ---------------
@ -18,6 +18,8 @@
* Control Request.
* 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
* Request.
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
* --------------------------------------------------------------------------
*/
@ -160,7 +162,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */
MPI2_SGE_IO_UNION SGL; /* 0x20 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;

Просмотреть файл

@ -107,8 +107,7 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
if (ret)
return ret;
printk(KERN_INFO "setting logging_level(0x%08x)\n",
mpt2sas_fwfault_debug);
printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
ioc->fwfault_debug = mpt2sas_fwfault_debug;
return 0;
@ -1222,6 +1221,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
u32 memap_sz;
u32 pio_sz;
int i, r = 0;
u64 pio_chip = 0;
u64 chip_phys = 0;
dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
ioc->name, __func__));
@ -1255,12 +1256,13 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
if (pio_sz)
continue;
ioc->pio_chip = pci_resource_start(pdev, i);
pio_chip = (u64)pci_resource_start(pdev, i);
pio_sz = pci_resource_len(pdev, i);
} else {
if (memap_sz)
continue;
ioc->chip_phys = pci_resource_start(pdev, i);
chip_phys = (u64)ioc->chip_phys;
memap_sz = pci_resource_len(pdev, i);
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
if (ioc->chip == NULL) {
@ -1280,10 +1282,10 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
"IO-APIC enabled"), ioc->pci_irq);
printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n",
ioc->name, ioc->chip_phys, ioc->chip, memap_sz);
printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n",
ioc->name, ioc->pio_chip, pio_sz);
printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
ioc->name, (unsigned long long)pio_chip, pio_sz);
return 0;
@ -3573,6 +3575,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
init_waitqueue_head(&ioc->reset_wq);
ioc->fwfault_debug = mpt2sas_fwfault_debug;
/* base internal command bits */
mutex_init(&ioc->base_cmds.mutex);
ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);

Просмотреть файл

@ -69,10 +69,10 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
#define MPT2SAS_DRIVER_VERSION "03.100.03.00"
#define MPT2SAS_MAJOR_VERSION 03
#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
#define MPT2SAS_MAJOR_VERSION 04
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 03
#define MPT2SAS_BUILD_VERSION 01
#define MPT2SAS_RELEASE_VERSION 00
/*
@ -323,6 +323,7 @@ struct _sas_device {
* @device_info: bitfield provides detailed info about the hidden components
* @num_pds: number of hidden raid components
* @responding: used in _scsih_raid_device_mark_responding
* @percent_complete: resync percent complete
*/
struct _raid_device {
struct list_head list;
@ -336,6 +337,7 @@ struct _raid_device {
u32 device_info;
u8 num_pds;
u8 responding;
u8 percent_complete;
};
/**
@ -464,7 +466,6 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
* @pdev: pci pdev object
* @chip: memory mapped register space
* @chip_phys: physical addrss prior to mapping
* @pio_chip: I/O mapped register space
* @logging_level: see mpt2sas_debug.h
* @fwfault_debug: debuging FW timeouts
* @ir_firmware: IR firmware present
@ -587,8 +588,7 @@ struct MPT2SAS_ADAPTER {
char tmp_string[MPT_STRING_LENGTH];
struct pci_dev *pdev;
Mpi2SystemInterfaceRegs_t __iomem *chip;
unsigned long chip_phys;
unsigned long pio_chip;
resource_size_t chip_phys;
int logging_level;
int fwfault_debug;
u8 ir_firmware;
@ -853,6 +853,8 @@ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOCPage8_t *config_page);
int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t

Просмотреть файл

@ -324,7 +324,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (r != 0)
goto out;
if (mpi_request->Action ==
MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) {
MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
mpi_request->Action ==
MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
mem.page_dma);
@ -882,7 +884,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
}
/**
* mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0
* mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
@ -907,7 +909,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
mpi_request.Header.PageNumber = 1;
mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
@ -921,6 +923,49 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
return r;
}
/**
* mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
* @sz: size of buffer passed in config_page
* Context: sleep.
*
* Calling function should call config_get_number_hba_phys prior to
* this function, so enough memory is allocated for config_page.
*
* Returns 0 for success, non-zero for failure.
*/
int
mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
{
Mpi2ConfigRequest_t mpi_request;
int r;
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
mpi_request.Header.PageNumber = 1;
mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
if (r)
goto out;
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
_config_request(ioc, &mpi_request, mpi_reply,
MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
out:
return r;
}
/**
* mpt2sas_config_get_expander_pg0 - obtain expander page 0
* @ioc: per adapter object

Просмотреть файл

@ -891,6 +891,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset) {
ret = -ENODATA;
if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function ==
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
@ -2202,14 +2203,10 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
karg.data_out_size = karg32.data_out_size;
karg.max_sense_bytes = karg32.max_sense_bytes;
karg.data_sge_offset = karg32.data_sge_offset;
memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr,
sizeof(uint32_t));
memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr,
sizeof(uint32_t));
memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr,
sizeof(uint32_t));
memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr,
sizeof(uint32_t));
karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
}

Просмотреть файл

@ -52,6 +52,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/raid_class.h>
#include "mpt2sas_base.h"
@ -133,6 +134,9 @@ struct fw_event_work {
void *event_data;
};
/* raid transport support */
static struct raid_template *mpt2sas_raid_template;
/**
* struct _scsi_io_transfer - scsi io transfer
* @handle: sas device handle (assigned by firmware)
@ -1305,7 +1309,6 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct scsi_target *starget;
struct _raid_device *raid_device;
struct _sas_device *sas_device;
unsigned long flags;
sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@ -1332,21 +1335,8 @@ _scsih_slave_alloc(struct scsi_device *sdev)
if (raid_device)
raid_device->sdev = sdev; /* raid is single lun */
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
} else {
/* set TLR bit for SSP devices */
if (!(ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_TLR))
goto out;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_device_priv_data->sas_target->sas_address);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (sas_device && sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SSP_TARGET)
sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON;
}
out:
return 0;
}
@ -1418,6 +1408,140 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
(flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
}
/**
* _scsih_is_raid - return boolean indicating device is raid volume
* @dev the device struct object
*/
static int
_scsih_is_raid(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
}
/**
* _scsih_get_resync - get raid volume resync percent complete
* @dev the device struct object
*/
static void
_scsih_get_resync(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
static struct _raid_device *raid_device;
unsigned long flags;
Mpi2RaidVolPage0_t vol_pg0;
Mpi2ConfigReply_t mpi_reply;
u32 volume_status_flags;
u8 percent_complete = 0;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
sdev->channel);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device)
goto out;
if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
sizeof(Mpi2RaidVolPage0_t))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out;
}
volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
percent_complete = raid_device->percent_complete;
out:
raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
}
/**
* _scsih_get_state - get raid volume level
* @dev the device struct object
*/
static void
_scsih_get_state(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
static struct _raid_device *raid_device;
unsigned long flags;
Mpi2RaidVolPage0_t vol_pg0;
Mpi2ConfigReply_t mpi_reply;
u32 volstate;
enum raid_state state = RAID_STATE_UNKNOWN;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
sdev->channel);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device)
goto out;
if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
sizeof(Mpi2RaidVolPage0_t))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out;
}
volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
state = RAID_STATE_RESYNCING;
goto out;
}
switch (vol_pg0.VolumeState) {
case MPI2_RAID_VOL_STATE_OPTIMAL:
case MPI2_RAID_VOL_STATE_ONLINE:
state = RAID_STATE_ACTIVE;
break;
case MPI2_RAID_VOL_STATE_DEGRADED:
state = RAID_STATE_DEGRADED;
break;
case MPI2_RAID_VOL_STATE_FAILED:
case MPI2_RAID_VOL_STATE_MISSING:
state = RAID_STATE_OFFLINE;
break;
}
out:
raid_set_state(mpt2sas_raid_template, dev, state);
}
/**
* _scsih_set_level - set raid level
* @sdev: scsi device struct
* @raid_device: raid_device object
*/
static void
_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
{
enum raid_level level = RAID_LEVEL_UNKNOWN;
switch (raid_device->volume_type) {
case MPI2_RAID_VOL_TYPE_RAID0:
level = RAID_LEVEL_0;
break;
case MPI2_RAID_VOL_TYPE_RAID10:
level = RAID_LEVEL_10;
break;
case MPI2_RAID_VOL_TYPE_RAID1E:
level = RAID_LEVEL_1E;
break;
case MPI2_RAID_VOL_TYPE_RAID1:
level = RAID_LEVEL_1;
break;
}
raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level);
}
/**
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
@ -1478,6 +1602,32 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
kfree(vol_pg0);
}
/**
* _scsih_enable_tlr - setting TLR flags
* @ioc: per adapter object
* @sdev: scsi device struct
*
* Enabling Transaction Layer Retries for tape devices when
* vpd page 0x90 is present
*
*/
static void
_scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev)
{
/* only for TAPE */
if (sdev->type != TYPE_TAPE)
return;
if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
return;
sas_enable_tlr(sdev);
sdev_printk(KERN_INFO, sdev, "TLR %s\n",
sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
return;
}
/**
* _scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
@ -1574,6 +1724,8 @@ _scsih_slave_configure(struct scsi_device *sdev)
(unsigned long long)raid_device->wwid,
raid_device->num_pds, ds);
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
/* raid transport support */
_scsih_set_level(sdev, raid_device);
return 0;
}
@ -1621,8 +1773,10 @@ _scsih_slave_configure(struct scsi_device *sdev)
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
if (ssp_target)
if (ssp_target) {
sas_read_port_mode_page(sdev);
_scsih_enable_tlr(ioc, sdev);
}
return 0;
}
@ -2908,8 +3062,9 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
} else
mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON))
/* Make sure Device is not raid volume */
if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
sas_is_tlr_enabled(scmd->device))
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@ -3298,10 +3453,12 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
if (!sas_device_priv_data->tlr_snoop_check) {
sas_device_priv_data->tlr_snoop_check++;
if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
sas_device_priv_data->flags &=
~MPT_DEVICE_TLR_ON;
if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
sas_is_tlr_enabled(scmd->device) &&
response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
sas_disable_tlr(scmd->device);
sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
}
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
@ -5170,11 +5327,33 @@ static void
_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
static struct _raid_device *raid_device;
unsigned long flags;
u16 handle;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_ir_operation_status_event_debug(ioc,
fw_event->event_data);
event_data);
#endif
/* code added for raid transport support */
if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
handle = le16_to_cpu(event_data->VolDevHandle);
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device)
return;
if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
raid_device->percent_complete =
event_data->PercentComplete;
}
}
/**
@ -5998,6 +6177,8 @@ _scsih_remove(struct pci_dev *pdev)
struct _sas_port *mpt2sas_port;
struct _sas_device *sas_device;
struct _sas_node *expander_sibling;
struct _raid_device *raid_device, *next;
struct MPT2SAS_TARGET *sas_target_priv_data;
struct workqueue_struct *wq;
unsigned long flags;
@ -6011,6 +6192,21 @@ _scsih_remove(struct pci_dev *pdev)
if (wq)
destroy_workqueue(wq);
/* release all the volumes */
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
if (raid_device->starget) {
sas_target_priv_data =
raid_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
scsi_remove_target(&raid_device->starget->dev);
}
printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
"(0x%016llx)\n", ioc->name, raid_device->handle,
(unsigned long long) raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
/* free ports attached to the sas_host */
retry_again:
list_for_each_entry(mpt2sas_port,
@ -6373,6 +6569,13 @@ static struct pci_driver scsih_driver = {
#endif
};
/* raid transport support */
static struct raid_function_template mpt2sas_raid_functions = {
.cookie = &scsih_driver_template,
.is_raid = _scsih_is_raid,
.get_resync = _scsih_get_resync,
.get_state = _scsih_get_state,
};
/**
* _scsih_init - main entry point for this driver.
@ -6392,6 +6595,12 @@ _scsih_init(void)
sas_attach_transport(&mpt2sas_transport_functions);
if (!mpt2sas_transport_template)
return -ENODEV;
/* raid transport support */
mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions);
if (!mpt2sas_raid_template) {
sas_release_transport(mpt2sas_transport_template);
return -ENODEV;
}
mpt2sas_base_initialize_callback_handler();
@ -6426,8 +6635,11 @@ _scsih_init(void)
mpt2sas_ctl_init();
error = pci_register_driver(&scsih_driver);
if (error)
if (error) {
/* raid transport support */
raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt2sas_transport_template);
}
return error;
}
@ -6445,7 +6657,8 @@ _scsih_exit(void)
pci_unregister_driver(&scsih_driver);
sas_release_transport(mpt2sas_transport_template);
mpt2sas_ctl_exit();
mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
mpt2sas_base_release_callback_handler(tm_cb_idx);
mpt2sas_base_release_callback_handler(base_cb_idx);
@ -6457,7 +6670,10 @@ _scsih_exit(void)
mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
mpt2sas_ctl_exit();
/* raid transport support */
raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt2sas_transport_template);
}
module_init(_scsih_init);

Просмотреть файл

@ -855,6 +855,17 @@ rphy_to_ioc(struct sas_rphy *rphy)
return shost_priv(shost);
}
static struct _sas_phy *
_transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy)
{
int i;
for (i = 0; i < ioc->sas_hba.num_phys; i++)
if (ioc->sas_hba.phy[i].phy == phy)
return(&ioc->sas_hba.phy[i]);
return NULL;
}
/**
* _transport_get_linkerrors -
* @phy: The sas phy object
@ -870,14 +881,8 @@ _transport_get_linkerrors(struct sas_phy *phy)
struct _sas_phy *mpt2sas_phy;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasPhyPage1_t phy_pg1;
int i;
for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
!mpt2sas_phy; i++) {
if (ioc->sas_hba.phy[i].phy != phy)
continue;
mpt2sas_phy = &ioc->sas_hba.phy[i];
}
mpt2sas_phy = _transport_find_local_phy(ioc, phy);
if (!mpt2sas_phy) /* this phy not on sas_host */
return -EINVAL;
@ -971,14 +976,8 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
struct _sas_phy *mpt2sas_phy;
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
int i;
for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
!mpt2sas_phy; i++) {
if (ioc->sas_hba.phy[i].phy != phy)
continue;
mpt2sas_phy = &ioc->sas_hba.phy[i];
}
mpt2sas_phy = _transport_find_local_phy(ioc, phy);
if (!mpt2sas_phy) /* this phy not on sas_host */
return -EINVAL;
@ -1005,6 +1004,173 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
return 0;
}
/**
* _transport_phy_enable - enable/disable phys
* @phy: The sas phy object
* @enable: enable phy when true
*
* Only support sas_host direct attached phys.
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_phy_enable(struct sas_phy *phy, int enable)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
struct _sas_phy *mpt2sas_phy;
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi2ConfigReply_t mpi_reply;
u16 ioc_status;
u16 sz;
int rc = 0;
mpt2sas_phy = _transport_find_local_phy(ioc, phy);
if (!mpt2sas_phy) /* this phy not on sas_host */
return -EINVAL;
/* sas_iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
if (enable)
sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
&= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
else
sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
|= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
out:
kfree(sas_iounit_pg1);
return rc;
}
/**
* _transport_phy_speed - set phy min/max link rates
* @phy: The sas phy object
* @rates: rates defined in sas_phy_linkrates
*
* Only support sas_host direct attached phys.
* Returns 0 for success, non-zero for failure.
*/
static int
_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
{
struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
struct _sas_phy *mpt2sas_phy;
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi2SasPhyPage0_t phy_pg0;
Mpi2ConfigReply_t mpi_reply;
u16 ioc_status;
u16 sz;
int i;
int rc = 0;
mpt2sas_phy = _transport_find_local_phy(ioc, phy);
if (!mpt2sas_phy) /* this phy not on sas_host */
return -EINVAL;
if (!rates->minimum_linkrate)
rates->minimum_linkrate = phy->minimum_linkrate;
else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
rates->minimum_linkrate = phy->minimum_linkrate_hw;
if (!rates->maximum_linkrate)
rates->maximum_linkrate = phy->maximum_linkrate;
else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
rates->maximum_linkrate = phy->maximum_linkrate_hw;
/* sas_iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
for (i = 0; i < ioc->sas_hba.num_phys; i++) {
if (mpt2sas_phy->phy_id != i) {
sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
(ioc->sas_hba.phy[i].phy->minimum_linkrate +
(ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
} else {
sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
(rates->minimum_linkrate +
(rates->maximum_linkrate << 4));
}
}
if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
sz)) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
/* link reset */
_transport_phy_reset(phy, 0);
/* read phy page 0, then update the rates in the sas transport phy */
if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
mpt2sas_phy->phy_id)) {
phy->minimum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
phy->maximum_linkrate = _transport_convert_phy_link_rate(
phy_pg0.ProgrammedLinkRate >> 4);
phy->negotiated_linkrate = _transport_convert_phy_link_rate(
phy_pg0.NegotiatedLinkRate &
MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
}
out:
kfree(sas_iounit_pg1);
return rc;
}
/**
* _transport_smp_handler - transport portal for smp passthru
* @shost: shost object
@ -1207,6 +1373,8 @@ struct sas_function_template mpt2sas_transport_functions = {
.get_enclosure_identifier = _transport_get_enclosure_identifier,
.get_bay_identifier = _transport_get_bay_identifier,
.phy_reset = _transport_phy_reset,
.phy_enable = _transport_phy_enable,
.set_phy_speed = _transport_phy_speed,
.smp_handler = _transport_smp_handler,
};

Просмотреть файл

@ -654,7 +654,7 @@ static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
}
chip = &pm8001_chips[ent->driver_data];
SHOST_TO_SAS_HA(shost) =
kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
if (!SHOST_TO_SAS_HA(shost)) {
rc = -ENOMEM;
goto err_out_free_host;

Просмотреть файл

@ -1640,8 +1640,10 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
uint16_t mb[MAILBOX_REGISTER_COUNT], i;
int err;
spin_unlock_irq(ha->host->host_lock);
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
&ha->pdev->dev);
spin_lock_irq(ha->host->host_lock);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
ql1280_board_tbl[ha->devnum].fwname, err);
@ -1699,8 +1701,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
return -ENOMEM;
#endif
spin_unlock_irq(ha->host->host_lock);
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
&ha->pdev->dev);
spin_lock_irq(ha->host->host_lock);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
ql1280_board_tbl[ha->devnum].fwname, err);

Просмотреть файл

@ -11,7 +11,9 @@
#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
@ -1167,6 +1169,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
ha->qla_stats.total_isp_aborts);
}
static ssize_t
qla24xx_84xx_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rval = QLA_SUCCESS;
uint16_t status[2] = {0, 0};
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (IS_QLA84XX(ha) && ha->cs84xx) {
if (ha->cs84xx->op_fw_version == 0) {
rval = qla84xx_verify_chip(vha, status);
}
if ((rval == QLA_SUCCESS) && (status[0] == 0))
return snprintf(buf, PAGE_SIZE, "%u\n",
(uint32_t)ha->cs84xx->op_fw_version);
}
return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
NULL);
static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
NULL);
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
NULL);
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
&dev_attr_optrom_fw_version,
&dev_attr_84xx_fw_version,
&dev_attr_total_isp_aborts,
&dev_attr_mpi_version,
&dev_attr_phy_version,
@ -1504,8 +1531,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
qla2x00_abort_fcport_cmds(fcport);
}
static int
@ -1795,6 +1820,581 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
/* BSG support for ELS/CT pass through */
inline srb_t *
qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
struct srb_bsg_ctx *ctx;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
goto done;
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
goto done;
}
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
done:
return sp;
}
static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
struct fc_rport *rport;
fc_port_t *fcport;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
srb_t *sp;
const char *type;
int req_sg_cnt, rsp_sg_cnt;
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
struct srb_bsg *els;
/* Multiple SG's are not supported for ELS requests */
if (bsg_job->request_payload.sg_cnt > 1 ||
bsg_job->reply_payload.sg_cnt > 1) {
DEBUG2(printk(KERN_INFO
"multiple SG's are not supported for ELS requests"
" [request_sg_cnt: %x reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt,
bsg_job->reply_payload.sg_cnt));
rval = -EPERM;
goto done;
}
/* ELS request for rport */
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_RPT_ELS";
/* make sure the rport is logged in,
* if not perform fabric login
*/
if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"failed to login port %06X for ELS passthru\n",
fcport->d_id.b24));
rval = -EIO;
goto done;
}
} else {
host = bsg_job->shost;
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
/* Allocate a dummy fcport structure, since functions
* preparing the IOCB and mailbox command retrieves port
* specific information from fcport structure. For Host based
* ELS commands there will be no fcport structure allocated
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
rval = -ENOMEM;
goto done;
}
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa =
bsg_job->request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
bsg_job->request->rqst_data.h_els.port_id[1];
fcport->d_id.b.domain =
bsg_job->request->rqst_data.h_els.port_id[2];
fcport->loop_id =
(fcport->d_id.b.al_pa == 0xFD) ?
NPH_FABRIC_CONTROLLER : NPH_F_PORT;
}
if (!vha->flags.online) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"host not online\n"));
rval = -EIO;
goto done;
}
req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
rval = -ENOMEM;
goto done_free_fcport;
}
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
rval = -ENOMEM;
goto done_free_fcport;
}
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
{
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts \
[request_sg_cnt: %x dma_request_sg_cnt: %x\
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt, req_sg_cnt,
bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
rval = -EAGAIN;
goto done_unmap_sg;
}
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
if (!sp) {
rval = -ENOMEM;
goto done_unmap_sg;
}
els = sp->ctx;
els->ctx.type =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
els->bsg_job = bsg_job;
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
bsg_job->request->rqst_data.h_els.command_code,
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_unmap_sg;
}
return rval;
done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
goto done_free_fcport;
done_free_fcport:
if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
kfree(fcport);
done:
return rval;
}
static int
qla2x00_process_ct(struct fc_bsg_job *bsg_job)
{
srb_t *sp;
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
int req_sg_cnt, rsp_sg_cnt;
uint16_t loop_id;
struct fc_port *fcport;
char *type = "FC_BSG_HST_CT";
struct srb_bsg *ct;
/* pass through is supported only for ISP 4Gb or higher */
if (!IS_FWI2_CAPABLE(ha)) {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld):Firmware is not capable to support FC "
"CT pass thru\n", vha->host_no));
rval = -EPERM;
goto done;
}
req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
rval = -ENOMEM;
goto done;
}
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
rval = -ENOMEM;
goto done;
}
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
{
DEBUG2(qla_printk(KERN_WARNING, ha,
"dma mapping resulted in different sg counts \
[request_sg_cnt: %x dma_request_sg_cnt: %x\
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt, req_sg_cnt,
bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
rval = -EAGAIN;
goto done_unmap_sg;
}
if (!vha->flags.online) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"host not online\n"));
rval = -EIO;
goto done_unmap_sg;
}
loop_id =
(bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
>> 24;
switch (loop_id) {
case 0xFC:
loop_id = cpu_to_le16(NPH_SNS);
break;
case 0xFA:
loop_id = vha->mgmt_svr_loop_id;
break;
default:
DEBUG2(qla_printk(KERN_INFO, ha,
"Unknown loop id: %x\n", loop_id));
rval = -EINVAL;
goto done_unmap_sg;
}
/* Allocate a dummy fcport structure, since functions preparing the
* IOCB and mailbox command retrieves port specific information
* from fcport structure. For Host based ELS commands there will be
* no fcport structure allocated
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport)
{
rval = -ENOMEM;
goto done_unmap_sg;
}
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
fcport->loop_id = loop_id;
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
if (!sp) {
rval = -ENOMEM;
goto done_free_fcport;
}
ct = sp->ctx;
ct->ctx.type = SRB_CT_CMD;
ct->bsg_job = bsg_job;
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
(bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
}
return rval;
done_free_fcport:
kfree(fcport);
done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done:
return rval;
}
static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval;
uint8_t command_sent;
uint32_t vendor_cmd;
char *type;
struct msg_echo_lb elreq;
uint16_t response[MAILBOX_REGISTER_COUNT];
uint8_t* fw_sts_ptr;
uint8_t *req_data;
dma_addr_t req_data_dma;
uint32_t req_data_len;
uint8_t *rsp_data;
dma_addr_t rsp_data_dma;
uint32_t rsp_data_len;
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
rval = -EBUSY;
goto done;
}
if (!vha->flags.online) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"host not online\n"));
rval = -EIO;
goto done;
}
elreq.req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!elreq.req_sg_cnt) {
rval = -ENOMEM;
goto done;
}
elreq.rsp_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!elreq.rsp_sg_cnt) {
rval = -ENOMEM;
goto done;
}
if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
{
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts \
[request_sg_cnt: %x dma_request_sg_cnt: %x\
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
rval = -EAGAIN;
goto done_unmap_sg;
}
req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
&req_data_dma, GFP_KERNEL);
rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
&rsp_data_dma, GFP_KERNEL);
/* Copy the request buffer in req_data now */
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, req_data,
req_data_len);
elreq.send_dma = req_data_dma;
elreq.rcv_dma = rsp_data_dma;
elreq.transfer_size = req_data_len;
/* Vendor cmd : loopback or ECHO diagnostic
* Options:
* Loopback : Either internal or external loopback
* ECHO: ECHO ELS or Vendor specific FC4 link data
*/
vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
elreq.options =
*(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
+ 1);
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
case QL_VND_LOOPBACK:
if (ha->current_topology != ISP_CFG_F) {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
vha->host_no, type, vendor_cmd, elreq.options));
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
if (IS_QLA81XX(ha)) {
if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
"ISP\n", __func__, vha->host_no));
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
}
} else {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
vha->host_no, type, vendor_cmd, elreq.options));
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
}
break;
case QLA84_RESET:
if (!IS_QLA84XX(vha->hw)) {
rval = -EINVAL;
DEBUG16(printk(
"%s(%ld): 8xxx exiting.\n",
__func__, vha->host_no));
return rval;
}
rval = qla84xx_reset(vha, &elreq, bsg_job);
break;
case QLA84_MGMT_CMD:
if (!IS_QLA84XX(vha->hw)) {
rval = -EINVAL;
DEBUG16(printk(
"%s(%ld): 8xxx exiting.\n",
__func__, vha->host_no));
return rval;
}
rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
break;
default:
rval = -ENOSYS;
}
if (rval != QLA_SUCCESS) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
bsg_job->reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, response, sizeof(response));
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
} else {
DEBUG2(qla_printk(KERN_WARNING, ha,
"scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
rval = bsg_job->reply->result = 0;
bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, response, sizeof(response));
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
}
bsg_job->job_done(bsg_job);
done_unmap_sg:
if(req_data)
dma_free_coherent(&ha->pdev->dev, req_data_len,
req_data, req_data_dma);
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done:
return rval;
}
static int
qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
{
int ret = -EINVAL;
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
ret = qla2x00_process_els(bsg_job);
break;
case FC_BSG_HST_CT:
ret = qla2x00_process_ct(bsg_job);
break;
case FC_BSG_HST_VENDOR:
ret = qla2x00_process_vendor_specific(bsg_job);
break;
case FC_BSG_HST_ADD_RPORT:
case FC_BSG_HST_DEL_RPORT:
case FC_BSG_RPT_CT:
default:
DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
break;
}
return ret;
}
static int
qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
{
scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int cnt, que;
unsigned long flags;
struct req_que *req;
struct srb_bsg *sp_bsg;
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
if (!req)
continue;
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
sp = req->outstanding_cmds[cnt];
if (sp) {
sp_bsg = (struct srb_bsg*)sp->ctx;
if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
(sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
|| ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
(sp_bsg->bsg_job == bsg_job)) {
if (ha->isp_ops->abort_command(sp)) {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld): mbx abort_command failed\n", vha->host_no));
bsg_job->req->errors = bsg_job->reply->result = -EIO;
} else {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld): mbx abort_command success\n", vha->host_no));
bsg_job->req->errors = bsg_job->reply->result = 0;
}
goto done;
}
}
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) SRB not found to abort\n", vha->host_no));
bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
return 0;
done:
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
return 0;
}
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@ -1838,6 +2438,8 @@ struct fc_function_template qla2xxx_transport_functions = {
.vport_create = qla24xx_vport_create,
.vport_disable = qla24xx_vport_disable,
.vport_delete = qla24xx_vport_delete,
.bsg_request = qla24xx_bsg_request,
.bsg_timeout = qla24xx_bsg_timeout,
};
struct fc_function_template qla2xxx_transport_vport_functions = {
@ -1878,6 +2480,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
.bsg_request = qla24xx_bsg_request,
.bsg_timeout = qla24xx_bsg_timeout,
};
void
@ -1906,3 +2510,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
}
static int
qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
{
int ret = 0;
int cmd;
uint16_t cmd_status;
DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
== A84_RESET_FLAG_ENABLE_DIAG_FW ?
A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
&cmd_status);
return ret;
}
static int
qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
{
struct access_chip_84xx *mn;
dma_addr_t mn_dma, mgmt_dma;
void *mgmt_b = NULL;
int ret = 0;
int rsp_hdr_len, len = 0;
struct qla84_msg_mgmt *ql84_mgmt;
ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
ql84_mgmt->cmd =
*((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
ql84_mgmt->mgmtp.u.mem.start_addr =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
ql84_mgmt->len =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
ql84_mgmt->mgmtp.u.config.id =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
ql84_mgmt->mgmtp.u.config.param0 =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
ql84_mgmt->mgmtp.u.config.param1 =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
ql84_mgmt->mgmtp.u.info.type =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
ql84_mgmt->mgmtp.u.info.context =
*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
rsp_hdr_len = bsg_job->request_payload.payload_len;
mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
"failed%lu\n", __func__, ha->host_no));
return -ENOMEM;
}
memset(mn, 0, sizeof (struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
switch (ql84_mgmt->cmd) {
case QLA84_MGMT_READ_MEM:
mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
break;
case QLA84_MGMT_WRITE_MEM:
mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
break;
case QLA84_MGMT_CHNG_CONFIG:
mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
break;
case QLA84_MGMT_GET_INFO:
mn->options = cpu_to_le16(ACO_REQUEST_INFO);
mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
break;
default:
ret = -EIO;
goto exit_mgmt0;
}
if ((len == ql84_mgmt->len) &&
ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
&mgmt_dma, GFP_KERNEL);
if (mgmt_b == NULL) {
DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
"failed%lu\n", __func__, ha->host_no));
ret = -ENOMEM;
goto exit_mgmt0;
}
mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
mn->dseg_count = cpu_to_le16(1);
mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
mn->dseg_length = cpu_to_le32(len);
if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
memcpy(mgmt_b, ql84_mgmt->payload, len);
}
}
ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
|| (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
if (ret != QLA_SUCCESS)
DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
__func__, ha->host_no));
} else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
(ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
}
if (mgmt_b)
dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
exit_mgmt0:
dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
return ret;
}

Просмотреть файл

@ -31,6 +31,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#define QLA2XXX_DRIVER_NAME "qla2xxx"
@ -228,6 +229,27 @@ struct srb_logio {
uint16_t flags;
};
struct srb_bsg_ctx {
#define SRB_ELS_CMD_RPT 3
#define SRB_ELS_CMD_HST 4
#define SRB_CT_CMD 5
uint16_t type;
};
struct srb_bsg {
struct srb_bsg_ctx ctx;
struct fc_bsg_job *bsg_job;
};
struct msg_echo_lb {
dma_addr_t send_dma;
dma_addr_t rcv_dma;
uint16_t req_sg_cnt;
uint16_t rsp_sg_cnt;
uint16_t options;
uint32_t transfer_size;
};
/*
* ISP I/O Register Set structure definitions.
*/
@ -522,6 +544,8 @@ typedef struct {
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
/*
* Firmware options 1, 2, 3.
*/
@ -2230,6 +2254,13 @@ struct req_que {
int max_q_depth;
};
/* Place holder for FW buffer parameters */
struct qlfc_fw {
void *fw_buf;
dma_addr_t fw_dma;
uint32_t len;
};
/*
* Qlogic host adapter specific data structure.
*/
@ -2594,6 +2625,7 @@ struct qla_hw_data {
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
};
/*
@ -2766,4 +2798,127 @@ typedef struct scsi_qla_host {
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
/*
* BSG Vendor specific commands
*/
#define QL_VND_LOOPBACK 0x01
#define QLA84_RESET 0x02
#define QLA84_UPDATE_FW 0x03
#define QLA84_MGMT_CMD 0x04
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
#define INT_DEF_LB_ECHO_CMD 1
/* BSG Vendor specific definations */
typedef struct _A84_RESET {
uint16_t Flags;
uint16_t Reserved;
#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
} __attribute__((packed)) A84_RESET, *PA84_RESET;
#define A84_ISSUE_WRITE_TYPE_CMD 0
#define A84_ISSUE_READ_TYPE_CMD 1
#define A84_CLEANUP_CMD 2
#define A84_ISSUE_RESET_OP_FW 3
#define A84_ISSUE_RESET_DIAG_FW 4
#define A84_ISSUE_UPDATE_OPFW_CMD 5
#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
struct qla84_mgmt_param {
union {
struct {
uint32_t start_addr;
} mem; /* for QLA84_MGMT_READ/WRITE_MEM */
struct {
uint32_t id;
#define QLA84_MGMT_CONFIG_ID_UIF 1
#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
#define QLA84_MGMT_CONFIG_ID_PAUSE 3
#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
uint32_t param0;
uint32_t param1;
} config; /* for QLA84_MGMT_CHNG_CONFIG */
struct {
uint32_t type;
#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
uint32_t context;
/*
* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
*/
#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
/*
* context definitions for QLA84_MGMT_INFO_PORT_STAT
*/
#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
/*
* context definitions for QLA84_MGMT_INFO_LIF_STAT
*/
#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
} info; /* for QLA84_MGMT_GET_INFO */
} u;
};
struct qla84_msg_mgmt {
uint16_t cmd;
#define QLA84_MGMT_READ_MEM 0x00
#define QLA84_MGMT_WRITE_MEM 0x01
#define QLA84_MGMT_CHNG_CONFIG 0x02
#define QLA84_MGMT_GET_INFO 0x03
uint16_t rsrvd;
struct qla84_mgmt_param mgmtp;/* parameters for cmd */
uint32_t len; /* bytes in payload following this struct */
uint8_t payload[0]; /* payload for cmd */
};
struct msg_update_fw {
/*
* diag_fw = 0 operational fw
* otherwise diagnostic fw
* offset, len, fw_len are present to overcome the current limitation
* of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
* specifies the byte "offset" where it fits in the fw buffer. The
* number of bytes in each chunk is specified in "len". "fw_len"
* is the total size of fw. The first chunk should start at offset = 0.
* When offset+len == fw_len, the fw is written to the HBA.
*/
uint32_t diag_fw;
uint32_t offset;/* start offset */
uint32_t len; /* num bytes in cur xfer */
uint32_t fw_len; /* size of fw in bytes */
uint8_t fw_bytes[0];
};
#endif

Просмотреть файл

@ -627,6 +627,39 @@ struct els_entry_24xx {
uint32_t rx_len; /* Data segment 1 length. */
};
struct els_sts_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint16_t comp_status;
uint16_t nport_handle; /* N_PORT handle. */
uint16_t reserved_1;
uint8_t vp_index;
uint8_t sof_type;
uint32_t rx_xchg_address; /* Receive exchange address. */
uint16_t reserved_2;
uint8_t opcode;
uint8_t reserved_3;
uint8_t port_id[3];
uint8_t reserved_4;
uint16_t reserved_5;
uint16_t control_flags; /* Control flags. */
uint32_t total_byte_count;
uint32_t error_subcode_1;
uint32_t error_subcode_2;
};
/*
* ISP queue - Mailbox Command entry structure definition.
*/

Просмотреть файл

@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
/*
* Global Data in qla_os.c source file.
*/
@ -76,6 +78,7 @@ extern int ql2xiidmaenable;
extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@ -94,7 +97,6 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
extern void qla2x00_abort_fcport_cmds(fc_port_t *);
extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
struct qla_hw_data *);
extern void qla2x00_free_host(struct scsi_qla_host *);
@ -154,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern void qla2x00_ctx_sp_free(srb_t *);
/*
* Global Function Prototypes in qla_mbx.c source file.
@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
/*
* Global Function Prototypes in qla_dfs.c source file.

Просмотреть файл

@ -62,7 +62,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
ctx->free(sp);
}
static void
void
qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
@ -338,6 +338,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
/* Issue verify 84xx FW IOCB to complete 84xx initialization */
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"Unable to initialize ISP84XX.\n");
qla84xx_put_chip(vha);
}
}
return (rval);
}
@ -2216,7 +2226,7 @@ qla2x00_rport_del(void *data)
*
* Returns a pointer to the allocated fcport, or NULL, if none available.
*/
static fc_port_t *
fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
{
fc_port_t *fcport;
@ -2900,8 +2910,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (qla2x00_is_reserved_id(vha, loop_id))
continue;
if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
if (atomic_read(&vha->loop_down_timer) ||
LOOP_TRANSITION(vha)) {
atomic_set(&vha->loop_down_timer, 0);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
}
if (swl != NULL) {
if (last_dev) {
@ -4877,6 +4892,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
void
qla81xx_update_fw_options(scsi_qla_host_t *ha)
qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
if (!ql2xetsenable)
return;
/* Enable ETS Burst. */
memset(ha->fw_options, 0, sizeof(ha->fw_options));
ha->fw_options[2] |= BIT_9;
qla2x00_set_fw_options(vha, ha->fw_options);
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше