Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits)
  [SCSI] libsas: make ATA functions selectable by a config option
  [SCSI] bsg: unexport sg v3 helper functions
  [SCSI] bsg: fix bsg_unregister_queue
  [SCSI] bsg: make class backlinks
  [SCSI] 3w-9xxx: add support for 9690SA
  [SCSI] bsg: fix bsg_register_queue error path
  [SCSI] ESP: Increase ESP_BUS_TIMEOUT to 275.
  [SCSI] libsas: fix scr_read/write users and update the libata documentation
  [SCSI] mpt fusion: update Kconfig help
  [SCSI] scsi_transport_sas: add destructor for bsg
  [SCSI] iscsi_tcp: buggered kmalloc()
  [SCSI] qla2xxx: Update version number to 8.02.00-k2.
  [SCSI] qla2xxx: Add ISP25XX support.
  [SCSI] qla2xxx: Use pci_try_set_mwi().
  [SCSI] qla2xxx: Use PCI-X/PCI-Express read control interfaces.
  [SCSI] qla2xxx: Re-factor isp_operations to static structures.
  [SCSI] qla2xxx: Validate mid-layer 'underflow' during check-condition handling.
  [SCSI] qla2xxx: Correct setting of 'current' and 'supported' speeds during FDMI registration.
  [SCSI] qla2xxx: Generalize iIDMA support.
  [SCSI] qla2xxx: Generalize FW-Interface-2 support.
  ...
This commit is contained in:
Linus Torvalds 2007-07-22 11:36:49 -07:00
Родитель 7578634990 b91421749a
Коммит e6f194d8f6
73 изменённых файлов: 3673 добавлений и 1319 удалений

Просмотреть файл

@ -456,8 +456,9 @@ void (*irq_clear) (struct ata_port *);
<sect2><title>SATA phy read/write</title>
<programlisting>
u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
int (*scr_read) (struct ata_port *ap, unsigned int sc_reg,
u32 *val);
int (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
u32 val);
</programlisting>

Просмотреть файл

@ -53,7 +53,7 @@ endif # BLOCK
config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"
depends on (SCSI=y) && EXPERIMENTAL
depends on EXPERIMENTAL
---help---
Saying Y here will enable generic SG (SCSI generic) v4 support
for any block device.

Просмотреть файл

@ -932,24 +932,34 @@ void bsg_unregister_queue(struct request_queue *q)
{
struct bsg_class_device *bcd = &q->bsg_dev;
WARN_ON(!bcd->class_dev);
if (!bcd->class_dev)
return;
mutex_lock(&bsg_mutex);
sysfs_remove_link(&q->kobj, "bsg");
class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
class_device_unregister(bcd->class_dev);
put_device(bcd->dev);
bcd->class_dev = NULL;
bcd->dev = NULL;
list_del_init(&bcd->list);
bsg_device_nr--;
mutex_unlock(&bsg_mutex);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
int bsg_register_queue(struct request_queue *q, const char *name)
int bsg_register_queue(struct request_queue *q, struct device *gdev,
const char *name)
{
struct bsg_class_device *bcd, *__bcd;
dev_t dev;
int ret = -EMFILE;
struct class_device *class_dev = NULL;
const char *devname;
if (name)
devname = name;
else
devname = gdev->bus_id;
/*
* we need a proper transport to send commands, not a stacked device
@ -982,18 +992,20 @@ retry:
bsg_minor_idx = 0;
bcd->queue = q;
bcd->dev = get_device(gdev);
dev = MKDEV(bsg_major, bcd->minor);
class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name);
class_dev = class_device_create(bsg_class, NULL, dev, gdev, "%s",
devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto err;
goto err_put;
}
bcd->class_dev = class_dev;
if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
if (ret)
goto err;
goto err_unregister;
}
list_add_tail(&bcd->list, &bsg_class_list);
@ -1001,37 +1013,17 @@ retry:
mutex_unlock(&bsg_mutex);
return 0;
err_unregister:
class_device_unregister(class_dev);
err_put:
put_device(gdev);
err:
if (class_dev)
class_device_destroy(bsg_class, MKDEV(bsg_major, bcd->minor));
mutex_unlock(&bsg_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bsg_register_queue);
static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
{
int ret;
struct scsi_device *sdp = to_scsi_device(cl_dev->dev);
struct request_queue *rq = sdp->request_queue;
if (rq->kobj.parent)
ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent));
else
ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj));
return ret;
}
static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
{
bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue);
}
static struct class_interface bsg_intf = {
.add = bsg_add,
.remove = bsg_remove,
};
static struct cdev bsg_cdev = {
.kobj = {.name = "bsg", },
.owner = THIS_MODULE,
@ -1069,16 +1061,9 @@ static int __init bsg_init(void)
if (ret)
goto unregister_chrdev;
ret = scsi_register_interface(&bsg_intf);
if (ret)
goto remove_cdev;
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
" loaded (major %d)\n", bsg_major);
return 0;
remove_cdev:
printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret);
cdev_del(&bsg_cdev);
unregister_chrdev:
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:

Просмотреть файл

@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm)
}
EXPORT_SYMBOL_GPL(blk_verify_command);
int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
struct sg_io_hdr *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@ -238,21 +238,19 @@ int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
return 0;
}
EXPORT_SYMBOL_GPL(blk_fill_sghdr_rq);
/*
* unmap a request that was previously mapped to this sg_io_hdr. handles
* both sg and non-sg sg_io_hdr.
*/
int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
{
blk_rq_unmap_user(rq->bio);
blk_put_request(rq);
return 0;
}
EXPORT_SYMBOL_GPL(blk_unmap_sghdr_rq);
int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio)
{
int r, ret = 0;
@ -287,7 +285,6 @@ int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return r;
}
EXPORT_SYMBOL_GPL(blk_complete_sghdr_rq);
static int sg_io(struct file *file, request_queue_t *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr)

Просмотреть файл

@ -1160,7 +1160,7 @@ static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
static struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
.proc_name = (char *)sbp2_driver_name,
.proc_name = sbp2_driver_name,
.queuecommand = sbp2_scsi_queuecommand,
.slave_alloc = sbp2_scsi_slave_alloc,
.slave_configure = sbp2_scsi_slave_configure,

Просмотреть файл

@ -37,6 +37,7 @@ config FUSION_FC
LSIFC929
LSIFC929X
LSIFC929XL
Brocade FC 410/420
config FUSION_SAS
tristate "Fusion MPT ScsiHost drivers for SAS"

Просмотреть файл

@ -161,6 +161,7 @@ static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_timer_expired(unsigned long data);
static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
@ -1131,6 +1132,248 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
return -1;
}
/**
* mpt_get_product_name - returns product string
* @vendor: pci vendor id
* @device: pci device id
* @revision: pci revision id
* @prod_name: string returned
*
* Returns product string displayed when driver loads,
* in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
*
**/
static void
mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
{
char *product_str = NULL;
if (vendor == PCI_VENDOR_ID_BROCADE) {
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "BRE040 A0";
break;
case 0x01:
product_str = "BRE040 A1";
break;
default:
product_str = "BRE040";
break;
}
break;
}
goto out;
}
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC909:
product_str = "LSIFC909 B1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919:
product_str = "LSIFC919 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929:
product_str = "LSIFC929 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
if (revision < 0x80)
product_str = "LSIFC919X A0";
else
product_str = "LSIFC919XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < 0x80)
product_str = "LSIFC929X A0";
else
product_str = "LSIFC929XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
product_str = "LSIFC939X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
product_str = "LSIFC949X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "LSIFC949E A0";
break;
case 0x01:
product_str = "LSIFC949E A1";
break;
default:
product_str = "LSIFC949E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
switch (revision)
{
case 0x00:
product_str = "LSI53C1030 A0";
break;
case 0x01:
product_str = "LSI53C1030 B0";
break;
case 0x03:
product_str = "LSI53C1030 B1";
break;
case 0x07:
product_str = "LSI53C1030 B2";
break;
case 0x08:
product_str = "LSI53C1030 C0";
break;
case 0x80:
product_str = "LSI53C1030T A0";
break;
case 0x83:
product_str = "LSI53C1030T A2";
break;
case 0x87:
product_str = "LSI53C1030T A3";
break;
case 0xc1:
product_str = "LSI53C1020A A1";
break;
default:
product_str = "LSI53C1030";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
switch (revision)
{
case 0x03:
product_str = "LSI53C1035 A2";
break;
case 0x04:
product_str = "LSI53C1035 B0";
break;
default:
product_str = "LSI53C1035";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064 A1";
break;
case 0x01:
product_str = "LSISAS1064 A2";
break;
case 0x02:
product_str = "LSISAS1064 A3";
break;
case 0x03:
product_str = "LSISAS1064 A4";
break;
default:
product_str = "LSISAS1064";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064E A0";
break;
case 0x01:
product_str = "LSISAS1064E B0";
break;
case 0x02:
product_str = "LSISAS1064E B1";
break;
case 0x04:
product_str = "LSISAS1064E B2";
break;
case 0x08:
product_str = "LSISAS1064E B3";
break;
default:
product_str = "LSISAS1064E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068 A0";
break;
case 0x01:
product_str = "LSISAS1068 B0";
break;
case 0x02:
product_str = "LSISAS1068 B1";
break;
default:
product_str = "LSISAS1068";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068E A0";
break;
case 0x01:
product_str = "LSISAS1068E B0";
break;
case 0x02:
product_str = "LSISAS1068E B1";
break;
case 0x04:
product_str = "LSISAS1068E B2";
break;
case 0x08:
product_str = "LSISAS1068E B3";
break;
default:
product_str = "LSISAS1068E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1078:
switch (revision)
{
case 0x00:
product_str = "LSISAS1078 A0";
break;
case 0x01:
product_str = "LSISAS1078 B0";
break;
case 0x02:
product_str = "LSISAS1078 C0";
break;
case 0x03:
product_str = "LSISAS1078 C1";
break;
case 0x04:
product_str = "LSISAS1078 C2";
break;
default:
product_str = "LSISAS1078";
break;
}
break;
}
out:
if (product_str)
sprintf(prod_name, "%s", product_str);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_attach - Install a PCI intelligent MPT adapter.
@ -1274,23 +1517,23 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->pio_chip = (SYSIF_REGS __iomem *)pmem;
}
if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) {
ioc->prod_name = "LSIFC909";
ioc->bus_type = FC;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
ioc->prod_name = "LSIFC929";
ioc->bus_type = FC;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919) {
ioc->prod_name = "LSIFC919";
ioc->bus_type = FC;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929X) {
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
switch (pdev->device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < XL_929) {
ioc->prod_name = "LSIFC929X";
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
@ -1298,75 +1541,46 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
} else {
ioc->prod_name = "LSIFC929XL";
/* 929XL Chip Fix. Set MMRBC to 0x08.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd |= 0x08;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919X) {
ioc->prod_name = "LSIFC919X";
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
/* 919X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC939X) {
ioc->prod_name = "LSIFC939X";
ioc->bus_type = FC;
ioc->errata_flag_1064 = 1;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC949X) {
ioc->prod_name = "LSIFC949X";
ioc->bus_type = FC;
ioc->errata_flag_1064 = 1;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC949E) {
ioc->prod_name = "LSIFC949E";
ioc->bus_type = FC;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
ioc->prod_name = "LSI53C1030";
ioc->bus_type = SPI;
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
if (revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
ioc->prod_name = "LSI53C1035";
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
ioc->prod_name = "LSISAS1064";
ioc->bus_type = SAS;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
case MPI_MANUFACTPAGE_DEVID_SAS1068:
ioc->errata_flag_1064 = 1;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
ioc->prod_name = "LSISAS1068";
ioc->bus_type = SAS;
ioc->errata_flag_1064 = 1;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
ioc->prod_name = "LSISAS1064E";
ioc->bus_type = SAS;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
ioc->prod_name = "LSISAS1068E";
ioc->bus_type = SAS;
}
else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
ioc->prod_name = "LSISAS1078";
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
case MPI_MANUFACTPAGE_DEVID_SAS1078:
ioc->bus_type = SAS;
}
@ -1880,6 +2094,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
}
GetIoUnitPage2(ioc);
mpt_get_manufacturing_pg_0(ioc);
}
/*
@ -2138,8 +2353,8 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
int i = 0;
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name && strlen(ioc->prod_name) > 3)
printk("%s: ", ioc->prod_name+3);
if (ioc->prod_name)
printk("%s: ", ioc->prod_name);
printk("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
@ -5190,6 +5405,49 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
return;
}
static void
mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t buf_dma;
ManufacturingPage0_t *pbuf = NULL;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = 10;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!cfg.cfghdr.hdr->PageLength)
goto out;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
if (!pbuf)
goto out;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0)
goto out;
memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
out:
if (pbuf)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendEventNotification - Send EventNotification (on or off) request to adapter

Просмотреть файл

@ -537,7 +537,14 @@ typedef struct _MPT_ADAPTER
int id; /* Unique adapter id N {0,1,2,...} */
int pci_irq; /* This irq */
char name[MPT_NAME_LENGTH]; /* "iocN" */
char *prod_name; /* "LSIFC9x9" */
char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
char board_name[16];
char board_assembly[16];
char board_tracer[16];
u16 nvdata_version_persistent;
u16 nvdata_version_default;
u8 io_missing_delay;
u8 device_missing_delay;
SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
u8 bus_type;

Просмотреть файл

@ -130,6 +130,7 @@ static struct scsi_host_template mptfc_driver_template = {
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
};
/****************************************************************************
@ -153,6 +154,8 @@ static struct pci_device_id mptfc_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E,
PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptfc_pci_table);

Просмотреть файл

@ -1119,6 +1119,7 @@ static struct scsi_host_template mptsas_driver_template = {
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
@ -1390,6 +1391,11 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
goto out_free_consistent;
}
ioc->nvdata_version_persistent =
le16_to_cpu(buffer->NvdataVersionPersistent);
ioc->nvdata_version_default =
le16_to_cpu(buffer->NvdataVersionDefault);
for (i = 0; i < port_info->num_phys; i++) {
mptsas_print_phy_data(&buffer->PhyData[i]);
port_info->phy_info[i].phy_id = i;
@ -1409,6 +1415,63 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
return error;
}
static int
mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage1_t *buffer;
dma_addr_t dma_handle;
int error;
u16 device_missing_delay;
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
memset(&cfg, 0, sizeof(CONFIGPARMS));
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = 10;
cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
cfg.cfghdr.ehdr->PageNumber = 1;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
ioc->io_missing_delay =
le16_to_cpu(buffer->IODeviceMissingDelay);
device_missing_delay = le16_to_cpu(buffer->ReportDeviceMissingDelay);
ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
(device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
static int
mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
u32 form, u32 form_specific)
@ -1990,6 +2053,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
if (error)
goto out_free_port_info;
mptsas_sas_io_unit_pg1(ioc);
mutex_lock(&ioc->sas_topology_mutex);
ioc->handle = hba->phy_info[0].handle;
port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle);
@ -3237,6 +3301,8 @@ static struct pci_driver mptsas_driver = {
static int __init
mptsas_init(void)
{
int error;
show_mptmod_ver(my_NAME, my_VERSION);
mptsas_transport_template =
@ -3260,7 +3326,11 @@ mptsas_init(void)
": Registered for IOC reset notifications\n"));
}
return pci_register_driver(&mptsas_driver);
error = pci_register_driver(&mptsas_driver);
if (error)
sas_release_transport(mptsas_transport_template);
return error;
}
static void __exit

Просмотреть файл

@ -3187,6 +3187,159 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
mptscsih_do_cmd(hd, &iocmd);
}
static ssize_t
mptscsih_version_fw_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
(ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
(ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF);
}
static CLASS_DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL);
static ssize_t
mptscsih_version_bios_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
(ioc->biosVersion & 0xFF000000) >> 24,
(ioc->biosVersion & 0x00FF0000) >> 16,
(ioc->biosVersion & 0x0000FF00) >> 8,
ioc->biosVersion & 0x000000FF);
}
static CLASS_DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL);
static ssize_t
mptscsih_version_mpi_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
}
static CLASS_DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL);
static ssize_t
mptscsih_version_product_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
}
static CLASS_DEVICE_ATTR(version_product, S_IRUGO,
mptscsih_version_product_show, NULL);
static ssize_t
mptscsih_version_nvdata_persistent_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02xh\n",
ioc->nvdata_version_persistent);
}
static CLASS_DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
mptscsih_version_nvdata_persistent_show, NULL);
static ssize_t
mptscsih_version_nvdata_default_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
}
static CLASS_DEVICE_ATTR(version_nvdata_default, S_IRUGO,
mptscsih_version_nvdata_default_show, NULL);
static ssize_t
mptscsih_board_name_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
}
static CLASS_DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL);
static ssize_t
mptscsih_board_assembly_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
}
static CLASS_DEVICE_ATTR(board_assembly, S_IRUGO,
mptscsih_board_assembly_show, NULL);
static ssize_t
mptscsih_board_tracer_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
}
static CLASS_DEVICE_ATTR(board_tracer, S_IRUGO,
mptscsih_board_tracer_show, NULL);
static ssize_t
mptscsih_io_delay_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
}
static CLASS_DEVICE_ATTR(io_delay, S_IRUGO,
mptscsih_io_delay_show, NULL);
static ssize_t
mptscsih_device_delay_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
MPT_ADAPTER *ioc = hd->ioc;
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
}
static CLASS_DEVICE_ATTR(device_delay, S_IRUGO,
mptscsih_device_delay_show, NULL);
struct class_device_attribute *mptscsih_host_attrs[] = {
&class_device_attr_version_fw,
&class_device_attr_version_bios,
&class_device_attr_version_mpi,
&class_device_attr_version_product,
&class_device_attr_version_nvdata_persistent,
&class_device_attr_version_nvdata_default,
&class_device_attr_board_name,
&class_device_attr_board_assembly,
&class_device_attr_board_tracer,
&class_device_attr_io_delay,
&class_device_attr_device_delay,
NULL,
};
EXPORT_SYMBOL(mptscsih_host_attrs);
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);
#ifdef CONFIG_PM

Просмотреть файл

@ -129,3 +129,4 @@ extern void mptscsih_timer_expired(unsigned long data);
extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct class_device_attribute *mptscsih_host_attrs[];

Просмотреть файл

@ -821,6 +821,7 @@ static struct scsi_host_template mptspi_driver_template = {
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
};
static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
@ -1523,6 +1524,8 @@ static struct pci_driver mptspi_driver = {
static int __init
mptspi_init(void)
{
int error;
show_mptmod_ver(my_NAME, my_VERSION);
mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions);
@ -1543,7 +1546,11 @@ mptspi_init(void)
": Registered for IOC reset notifications\n"));
}
return pci_register_driver(&mptspi_driver);
error = pci_register_driver(&mptspi_driver);
if (error)
spi_release_transport(mptspi_transport_template);
return error;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

Просмотреть файл

@ -1526,15 +1526,12 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
* zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
* @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
*/
static void
zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
{
if ((gid_pn->ct.pool != 0))
if (gid_pn->ct.pool)
mempool_free(gid_pn, gid_pn->ct.pool);
else
kfree(gid_pn);
return;
}
/**

Просмотреть файл

@ -126,6 +126,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
/********************* FSF SPECIFIC DEFINES *********************************/

Просмотреть файл

@ -1626,7 +1626,7 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
{
struct zfcp_erp_add_work *p;
p = kmalloc(sizeof(*p), GFP_KERNEL);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
"the FCP-LUN 0x%Lx connected to "
@ -1639,7 +1639,6 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
}
zfcp_unit_get(unit);
memset(p, 0, sizeof(*p));
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;

Просмотреть файл

@ -1930,7 +1930,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
skip_fsfstatus:
send_els->status = retval;
if (send_els->handler != 0)
if (send_els->handler)
send_els->handler(send_els->handler_data);
return retval;

Просмотреть файл

@ -46,104 +46,57 @@ static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
/*
* Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
* array in the adapter struct.
* Cur_buf is the pointer array and count can be any number of required
* buffers, the page-fitting arithmetic is done entirely within this funciton.
*
* returns: number of buffers allocated
* locks: must only be called with zfcp_data.config_sema taken
*/
static int
zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count)
{
int buf_pos;
int qdio_buffers_per_page;
int page_pos = 0;
struct qdio_buffer *first_in_page = NULL;
qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
for (buf_pos = 0; buf_pos < count; buf_pos++) {
if (page_pos == 0) {
cur_buf[buf_pos] = (struct qdio_buffer *)
get_zeroed_page(GFP_KERNEL);
if (cur_buf[buf_pos] == NULL) {
ZFCP_LOG_INFO("error: allocation of "
"QDIO buffer failed \n");
goto out;
}
first_in_page = cur_buf[buf_pos];
} else {
cur_buf[buf_pos] = first_in_page + page_pos;
}
/* was initialised to zero */
page_pos++;
page_pos %= qdio_buffers_per_page;
}
out:
return buf_pos;
}
/*
* Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
* in the adapter struct cur_buf is the pointer array and count can be any
* number of buffers in the array that should be freed starting from buffer 0
* in the adapter struct sbuf is the pointer array.
*
* locks: must only be called with zfcp_data.config_sema taken
*/
static void
zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count)
zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
{
int buf_pos;
int qdio_buffers_per_page;
int pos;
qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
free_page((unsigned long) sbuf[pos]);
}
for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page)
free_page((unsigned long) cur_buf[buf_pos]);
return;
/*
* Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
* array in the adapter struct.
* Cur_buf is the pointer array
*
* returns: zero on success else -ENOMEM
* locks: must only be called with zfcp_data.config_sema taken
*/
static int
zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
{
int pos;
for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
if (!sbuf[pos]) {
zfcp_qdio_buffers_dequeue(sbuf);
return -ENOMEM;
}
}
for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
if (pos % QBUFF_PER_PAGE)
sbuf[pos] = sbuf[pos - 1] + 1;
return 0;
}
/* locks: must only be called with zfcp_data.config_sema taken */
int
zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
{
int buffer_count;
int retval = 0;
int ret;
buffer_count =
zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]),
QDIO_MAX_BUFFERS_PER_Q);
if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request "
"queue\n", buffer_count);
zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
buffer_count);
retval = -ENOMEM;
goto out;
}
buffer_count =
zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]),
QDIO_MAX_BUFFERS_PER_Q);
if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response "
"queue", buffer_count);
zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
buffer_count);
ZFCP_LOG_TRACE("freeing request_queue buffers\n");
zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
QDIO_MAX_BUFFERS_PER_Q);
retval = -ENOMEM;
goto out;
}
out:
return retval;
ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
if (ret)
return ret;
return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
}
/* locks: must only be called with zfcp_data.config_sema taken */
@ -151,12 +104,10 @@ void
zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
{
ZFCP_LOG_TRACE("freeing request_queue buffers\n");
zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
QDIO_MAX_BUFFERS_PER_Q);
zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer);
ZFCP_LOG_TRACE("freeing response_queue buffers\n");
zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
QDIO_MAX_BUFFERS_PER_Q);
zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer);
}
int

Просмотреть файл

@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -69,6 +69,8 @@
2.26.02.008 - Free irq handler in __twa_shutdown().
Serialize reset code.
Add support for 9650SE controllers.
2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
2.26.02.010 - Add support for 9690SA controllers.
*/
#include <linux/module.h>
@ -92,7 +94,7 @@
#include "3w-9xxx.h"
/* Globals */
#define TW_DRIVER_VERSION "2.26.02.008"
#define TW_DRIVER_VERSION "2.26.02.010"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@ -124,11 +126,11 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
unsigned short *fw_on_ctlr_branch,
unsigned short *fw_on_ctlr_build,
u32 *init_connect_result);
static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
@ -683,7 +685,7 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
full_command_packet = &tw_ioctl->firmware_command;
/* Load request id and sglist for both command types */
twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
@ -700,10 +702,10 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
/* Now we need to reset the board */
printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
tw_dev->host->host_no, TW_DRIVER, 0xc,
tw_dev->host->host_no, TW_DRIVER, 0x37,
cmd);
retval = TW_IOCTL_ERROR_OS_EIO;
twa_reset_device_extension(tw_dev, 1);
twa_reset_device_extension(tw_dev);
goto out3;
}
@ -890,7 +892,9 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
}
if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
if ((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) || (!test_bit(TW_IN_RESET, &tw_dev->flags)))
if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
(tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
(!test_bit(TW_IN_RESET, &tw_dev->flags)))
TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
}
@ -935,8 +939,7 @@ static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
unsigned long before;
int retval = 1;
if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) ||
(tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE)) {
if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
before = jiffies;
while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
@ -1195,7 +1198,6 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
u32 status_reg_value;
TW_Response_Queue response_que;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
int handled = 0;
@ -1273,7 +1275,6 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
request_id = TW_RESID_OUT(response_que.response_id);
full_command_packet = tw_dev->command_packet_virt[request_id];
error = 0;
command_packet = &full_command_packet->command.oldcommand;
/* Check for command packet errors */
if (full_command_packet->command.newcommand.status != 0) {
if (tw_dev->srb[request_id] != 0) {
@ -1352,11 +1353,15 @@ twa_interrupt_bail:
} /* End twa_interrupt() */
/* This function will load the request id and various sgls for ioctls */
static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
{
TW_Command *oldcommand;
TW_Command_Apache *newcommand;
TW_SG_Entry *sgl;
unsigned int pae = 0;
if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
pae = 1;
if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
newcommand = &full_command_packet->command.newcommand;
@ -1372,12 +1377,14 @@ static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, d
if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
/* Load the sg list */
if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
else
sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
sgl->length = cpu_to_le32(length);
if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
oldcommand->size += 1;
oldcommand->size += pae;
}
}
} /* End twa_load_sgl() */
@ -1506,7 +1513,8 @@ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id,
command_que_value = tw_dev->command_packet_phys[request_id];
/* For 9650SE write low 4 bytes first */
if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
(tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
command_que_value += TW_COMMAND_OFFSET;
writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
}
@ -1537,7 +1545,8 @@ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id,
TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
goto out;
} else {
if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
(tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
/* Now write upper 4 bytes */
writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
} else {
@ -1561,7 +1570,7 @@ out:
} /* End twa_post_command_packet() */
/* This function will reset a device extension */
static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
{
int i = 0;
int retval = 1;
@ -1719,7 +1728,7 @@ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
mutex_lock(&tw_dev->ioctl_lock);
/* Now reset the card and some of the device extension data */
if (twa_reset_device_extension(tw_dev, 0)) {
if (twa_reset_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
goto out;
}
@ -2001,9 +2010,12 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK);
if (retval) {
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|| pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
|| pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
}
@ -2053,7 +2065,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto out_iounmap;
/* Set host specific parameters */
if (pdev->device == PCI_DEVICE_ID_3WARE_9650SE)
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
(pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
host->max_id = TW_MAX_UNITS_9650SE;
else
host->max_id = TW_MAX_UNITS;
@ -2160,6 +2173,8 @@ static struct pci_device_id twa_pci_tbl[] __devinitdata = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);

Просмотреть файл

@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -419,6 +419,9 @@ static twa_message_type twa_error_table[] = {
#ifndef PCI_DEVICE_ID_3WARE_9650SE
#define PCI_DEVICE_ID_3WARE_9650SE 0x1004
#endif
#ifndef PCI_DEVICE_ID_3WARE_9690SA
#define PCI_DEVICE_ID_3WARE_9690SA 0x1005
#endif
/* Bitmask macros to eliminate bitfields */

Просмотреть файл

@ -282,7 +282,7 @@ config SCSI_ISCSI_ATTRS
config SCSI_SAS_ATTRS
tristate "SAS Transport Attributes"
depends on SCSI
depends on SCSI && BLK_DEV_BSG
help
If you wish to export transport-specific information about
each attached SAS device to sysfs, say Y.
@ -291,8 +291,12 @@ source "drivers/scsi/libsas/Kconfig"
endmenu
menu "SCSI low-level drivers"
menuconfig SCSI_LOWLEVEL
bool "SCSI low-level drivers"
depends on SCSI!=n
default y
if SCSI_LOWLEVEL
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
@ -1800,7 +1804,7 @@ config SCSI_SRP
To compile this driver as a module, choose M here: the
module will be called libsrp.
endmenu
endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"

Просмотреть файл

@ -79,6 +79,7 @@ static int __devinit a4000t_probe(struct device *dev)
goto out_put_host;
}
dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
@ -95,7 +96,7 @@ static int __devinit a4000t_probe(struct device *dev)
static __devexit int a4000t_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_to_shost(dev);
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
scsi_remove_host(host);

Просмотреть файл

@ -751,6 +751,101 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
inqstrcpy ("V1.0", str->prl);
}
static void get_container_serial_callback(void *context, struct fib * fibptr)
{
struct aac_get_serial_resp * get_serial_reply;
struct scsi_cmnd * scsicmd;
BUG_ON(fibptr == NULL);
scsicmd = (struct scsi_cmnd *) context;
if (!aac_valid_context(scsicmd, fibptr))
return;
get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
char sp[13];
/* EVPD bit set */
sp[0] = INQD_PDT_DA;
sp[1] = scsicmd->cmnd[2];
sp[2] = 0;
sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
le32_to_cpu(get_serial_reply->uid));
aac_internal_transfer(scsicmd, sp, 0, sizeof(sp));
}
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
/**
* aac_get_container_serial - get container serial, none blocking.
*/
static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
{
int status;
struct aac_get_serial *dinfo;
struct fib * cmd_fibcontext;
struct aac_dev * dev;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (!(cmd_fibcontext = aac_fib_alloc(dev)))
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
sizeof (struct aac_get_serial),
FsaNormal,
0, 1,
(fib_callback) get_container_serial_callback,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return -1;
}
/* Function: setinqserial
*
* Arguments: [1] pointer to void [1] int
*
* Purpose: Sets SCSI Unit Serial number.
* This is a fake. We should read a proper
* serial number from the container. <SuSE>But
* without docs it's quite hard to do it :-)
* So this will have to do in the meantime.</SuSE>
*/
static int setinqserial(struct aac_dev *dev, void *data, int cid)
{
/*
* This breaks array migration.
*/
return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
u8 a_sense_code, u8 incorrect_length,
u8 bit_pointer, u16 field_pointer,
@ -1798,6 +1893,49 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
memset(&inq_data, 0, sizeof (struct inquiry_data));
if (scsicmd->cmnd[1] & 0x1 ) {
char *arr = (char *)&inq_data;
/* EVPD bit set */
arr[0] = (scmd_id(scsicmd) == host->this_id) ?
INQD_PDT_PROC : INQD_PDT_DA;
if (scsicmd->cmnd[2] == 0) {
/* supported vital product data pages */
arr[3] = 2;
arr[4] = 0x0;
arr[5] = 0x80;
arr[1] = scsicmd->cmnd[2];
aac_internal_transfer(scsicmd, &inq_data, 0,
sizeof(inq_data));
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
scmd_id(scsicmd));
arr[1] = scsicmd->cmnd[2];
aac_internal_transfer(scsicmd, &inq_data, 0,
sizeof(inq_data));
return aac_get_container_serial(scsicmd);
} else {
/* vpd page not implemented */
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST,
SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 0, 7, 2, 0);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
(sizeof(dev->fsa_dev[cid].sense_data) >
sizeof(scsicmd->sense_buffer))
? sizeof(scsicmd->sense_buffer)
: sizeof(dev->fsa_dev[cid].sense_data));
}
scsicmd->scsi_done(scsicmd);
return 0;
}
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data.inqd_len = 31;
@ -2070,7 +2208,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
}
else return -EINVAL;
qd.valid = fsa_dev_ptr[qd.cnum].valid;
qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
qd.locked = fsa_dev_ptr[qd.cnum].locked;
qd.deleted = fsa_dev_ptr[qd.cnum].deleted;

Просмотреть файл

@ -1567,6 +1567,20 @@ struct aac_get_name_resp {
u8 data[16];
};
#define CT_CID_TO_32BITS_UID 165
struct aac_get_serial {
__le32 command; /* VM_ContainerConfig */
__le32 type; /* CT_CID_TO_32BITS_UID */
__le32 cid;
};
struct aac_get_serial_resp {
__le32 dummy0;
__le32 dummy1;
__le32 status; /* CT_OK */
__le32 uid;
};
/*
* The following command is sent to shut down each container.
*/

Просмотреть файл

@ -80,7 +80,11 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
pci_free_consistent(dev->pdev,
dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
dev->hw_fib_va, dev->hw_fib_pa);
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
/**
@ -1087,8 +1091,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
* case.
*/
aac_fib_map_free(aac);
aac->hw_fib_va = NULL;
aac->hw_fib_pa = 0;
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
aac->comm_addr = NULL;
aac->comm_phys = 0;
@ -1098,12 +1100,12 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
goto out;
} else {
if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
goto out;
}
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))

Просмотреть файл

@ -126,7 +126,7 @@ static inline int asd_init_sata(struct domain_device *dev)
if (w76 & 0x100) /* NCQ? */
qdepth = (w75 & 0x1F) + 1;
asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
(1<<qdepth)-1);
(1ULL<<qdepth)-1);
asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
}
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||

Просмотреть файл

@ -81,6 +81,9 @@ static struct scsi_host_template aic94xx_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
};
static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)

Просмотреть файл

@ -74,8 +74,13 @@ static inline int asd_map_scatterlist(struct sas_task *task,
return 0;
}
num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
task->data_dir);
/* STP tasks come from libata which has already mapped
* the SG list */
if (sas_protocol_ata(task->task_proto))
num_sg = task->num_scatter;
else
num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
task->num_scatter, task->data_dir);
if (num_sg == 0)
return -ENOMEM;
@ -120,6 +125,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
return 0;
err_unmap:
if (sas_protocol_ata(task->task_proto))
pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
task->data_dir);
return res;
@ -142,6 +148,7 @@ static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
}
asd_free_coherent(asd_ha, ascb->sg_arr);
if (task->task_proto != SAS_PROTOCOL_STP)
pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
task->data_dir);
}
@ -391,7 +398,6 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
scb->ata_task.fis = task->ata_task.fis;
scb->ata_task.fis.fis_type = 0x27;
if (likely(!task->ata_task.device_control_reg_update))
scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */

Просмотреть файл

@ -74,6 +74,7 @@ bvme6000_probe(struct device *dev)
goto out_put_host;
}
dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
@ -89,7 +90,7 @@ bvme6000_probe(struct device *dev)
static __devexit int
bvme6000_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_to_shost(dev);
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
scsi_remove_host(host);

Просмотреть файл

@ -220,7 +220,7 @@
#define ESP_BUSID_RESELID 0x10
#define ESP_BUSID_CTR32BIT 0x40
#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */
#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))

Просмотреть файл

@ -30,6 +30,13 @@ config SCSI_SAS_LIBSAS
This provides transport specific helpers for SAS drivers which
use the domain device construct (like the aic94xxx).
config SCSI_SAS_ATA
bool "ATA support for libsas (requires libata)"
depends on SCSI_SAS_LIBSAS && ATA
help
Builds in ATA support into libsas. Will necessitate
the loading of libata along with libsas.
config SCSI_SAS_LIBSAS_DEBUG
bool "Compile the SAS Domain Transport Attributes in debug mode"
default y

Просмотреть файл

@ -34,3 +34,4 @@ libsas-y += sas_init.o \
sas_discover.o \
sas_expander.o \
sas_scsi_host.o
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o

Просмотреть файл

@ -0,0 +1,817 @@
/*
* Support for SATA devices on Serial Attached SCSI (SAS) controllers
*
* Copyright (C) 2006 IBM Corporation
*
* Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include <linux/scatterlist.h>
#include <scsi/sas_ata.h>
#include "sas_internal.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include <scsi/scsi_eh.h>
static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
{
/* Cheesy attempt to translate SAS errors into ATA. Hah! */
/* transport error */
if (ts->resp == SAS_TASK_UNDELIVERED)
return AC_ERR_ATA_BUS;
/* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
case SAS_DEV_NO_RESPONSE:
return AC_ERR_TIMEOUT;
case SAS_INTERRUPTED:
case SAS_PHY_DOWN:
case SAS_NAK_R_ERR:
return AC_ERR_ATA_BUS;
case SAS_DATA_UNDERRUN:
/*
* Some programs that use the taskfile interface
* (smartctl in particular) can cause underrun
* problems. Ignore these errors, perhaps at our
* peril.
*/
return 0;
case SAS_DATA_OVERRUN:
case SAS_QUEUE_FULL:
case SAS_DEVICE_UNKNOWN:
case SAS_SG_ERR:
return AC_ERR_INVALID;
case SAM_CHECK_COND:
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
__FUNCTION__, ts->stat);
return AC_ERR_OTHER;
case SAS_ABORTED_TASK:
return AC_ERR_DEV;
case SAS_PROTO_RESPONSE:
/* This means the ending_fis has the error
* value; return 0 here to collect it */
return 0;
default:
return 0;
}
}
static void sas_ata_task_done(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
struct domain_device *dev;
struct task_status_struct *stat = &task->task_status;
struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
struct sas_ha_struct *sas_ha;
enum ata_completion_errors ac;
unsigned long flags;
if (!qc)
goto qc_already_gone;
dev = qc->ap->private_data;
sas_ha = dev->port->ha;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_GOOD) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
} else if (stat->stat != SAM_STAT_GOOD) {
ac = sas_to_ata_err(stat);
if (ac) {
SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
qc->err_mask = ac;
dev->sata_dev.tf.feature = 0x04; /* status err */
dev->sata_dev.tf.command = ATA_ERR;
}
}
qc->lldd_task = NULL;
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
ata_qc_complete(qc);
spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
/*
* If the sas_task has an ata qc, a scsi_cmnd and the aborted
* flag is set, then we must have come in via the libsas EH
* functions. When we exit this function, we need to put the
* scsi_cmnd on the list of finished errors. The ata_qc_complete
* call cleans up the libata side of things but we're protected
* from the scsi_cmnd going away because the scsi_cmnd is owned
* by the EH, making libata's call to scsi_done a NOP.
*/
spin_lock_irqsave(&task->task_state_lock, flags);
if (qc->scsicmd && task->task_state_flags & SAS_TASK_STATE_ABORTED)
scsi_eh_finish_cmd(qc->scsicmd, &sas_ha->eh_done_q);
spin_unlock_irqrestore(&task->task_state_lock, flags);
qc_already_gone:
list_del_init(&task->list);
sas_free_task(task);
}
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
int res;
struct sas_task *task;
struct domain_device *dev = qc->ap->private_data;
struct sas_ha_struct *sas_ha = dev->port->ha;
struct Scsi_Host *host = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(host->transportt);
struct scatterlist *sg;
unsigned int num = 0;
unsigned int xfer = 0;
task = sas_alloc_task(GFP_ATOMIC);
if (!task)
return AC_ERR_SYSTEM;
task->dev = dev;
task->task_proto = SAS_PROTOCOL_STP;
task->task_done = sas_ata_task_done;
if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
qc->tf.command == ATA_CMD_FPDMA_READ) {
/* Need to zero out the tag libata assigned us */
qc->tf.nsect = 0;
}
ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
task->uldd_task = qc;
if (is_atapi_taskfile(&qc->tf)) {
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes + qc->pad_len;
task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
} else {
ata_for_each_sg(sg, qc) {
num++;
xfer += sg->length;
}
task->total_xfer_len = xfer;
task->num_scatter = num;
}
task->data_dir = qc->dma_dir;
task->scatter = qc->__sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;
qc->lldd_task = task;
switch (qc->tf.protocol) {
case ATA_PROT_NCQ:
task->ata_task.use_ncq = 1;
/* fall through */
case ATA_PROT_ATAPI_DMA:
case ATA_PROT_DMA:
task->ata_task.dma_xfer = 1;
break;
}
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
if (sas_ha->lldd_max_execute_num < 2)
res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
else
res = sas_queue_up(task);
/* Examine */
if (res) {
SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
sas_free_task(task);
return AC_ERR_SYSTEM;
}
return 0;
}
static u8 sas_ata_check_status(struct ata_port *ap)
{
struct domain_device *dev = ap->private_data;
return dev->sata_dev.tf.command;
}
static void sas_ata_phy_reset(struct ata_port *ap)
{
struct domain_device *dev = ap->private_data;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int res = 0;
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
if (res)
SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
ap->device[0].class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
ap->device[0].class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
__FUNCTION__,
dev->sata_dev.command_set);
ap->device[0].class = ATA_DEV_UNKNOWN;
break;
}
ap->cbl = ATA_CBL_SATA;
}
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_FAILED)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
/*
* Find the sas_task and kill it. By this point,
* libata has decided to kill the qc, so we needn't
* bother with sas_ata_task_done. But we still
* ought to abort the task.
*/
struct sas_task *task = qc->lldd_task;
unsigned long flags;
qc->lldd_task = NULL;
if (task) {
/* Should this be a AT(API) device reset? */
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, flags);
task->uldd_task = NULL;
__sas_task_abort(task);
}
}
}
static void sas_ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct domain_device *dev = ap->private_data;
memcpy(tf, &dev->sata_dev.tf, sizeof (*tf));
}
static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
u32 val)
{
struct domain_device *dev = ap->private_data;
SAS_DPRINTK("STUB %s\n", __FUNCTION__);
switch (sc_reg_in) {
case SCR_STATUS:
dev->sata_dev.sstatus = val;
break;
case SCR_CONTROL:
dev->sata_dev.scontrol = val;
break;
case SCR_ERROR:
dev->sata_dev.serror = val;
break;
case SCR_ACTIVE:
dev->sata_dev.ap->sactive = val;
break;
default:
return -EINVAL;
}
return 0;
}
static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
u32 *val)
{
struct domain_device *dev = ap->private_data;
SAS_DPRINTK("STUB %s\n", __FUNCTION__);
switch (sc_reg_in) {
case SCR_STATUS:
*val = dev->sata_dev.sstatus;
return 0;
case SCR_CONTROL:
*val = dev->sata_dev.scontrol;
return 0;
case SCR_ERROR:
*val = dev->sata_dev.serror;
return 0;
case SCR_ACTIVE:
*val = dev->sata_dev.ap->sactive;
return 0;
default:
return -EINVAL;
}
}
static struct ata_port_operations sas_sata_ops = {
.port_disable = ata_port_disable,
.check_status = sas_ata_check_status,
.check_altstatus = sas_ata_check_status,
.dev_select = ata_noop_dev_select,
.phy_reset = sas_ata_phy_reset,
.post_internal_cmd = sas_ata_post_internal,
.tf_read = sas_ata_tf_read,
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
.scr_read = sas_ata_scr_read,
.scr_write = sas_ata_scr_write
};
static struct ata_port_info sata_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
.pio_mask = 0x1f, /* PIO0-4 */
.mwdma_mask = 0x07, /* MWDMA0-2 */
.udma_mask = ATA_UDMA6,
.port_ops = &sas_sata_ops
};
int sas_ata_init_host_and_port(struct domain_device *found_dev,
struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct ata_port *ap;
ata_host_init(&found_dev->sata_dev.ata_host,
&ha->pcidev->dev,
sata_port_info.flags,
&sas_sata_ops);
ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
&sata_port_info,
shost);
if (!ap) {
SAS_DPRINTK("ata_sas_port_alloc failed.\n");
return -ENODEV;
}
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
found_dev->sata_dev.ap = ap;
return 0;
}
void sas_ata_task_abort(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
struct completion *waiting;
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
scsi_req_abort_cmd(qc->scsicmd);
scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
/* Internal command, fake a timeout and complete. */
qc->flags &= ~ATA_QCFLAG_ACTIVE;
qc->flags |= ATA_QCFLAG_FAILED;
qc->err_mask |= AC_ERR_TIMEOUT;
waiting = qc->private_data;
complete(waiting);
}
static void sas_task_timedout(unsigned long _task)
{
struct sas_task *task = (void *) _task;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
complete(&task->completion);
}
static void sas_disc_task_done(struct sas_task *task)
{
if (!del_timer(&task->timer))
return;
complete(&task->completion);
}
#define SAS_DEV_TIMEOUT 10
/**
* sas_execute_task -- Basic task processing for discovery
* @task: the task to be executed
* @buffer: pointer to buffer to do I/O
* @size: size of @buffer
* @pci_dma_dir: PCI_DMA_...
*/
static int sas_execute_task(struct sas_task *task, void *buffer, int size,
int pci_dma_dir)
{
int res = 0;
struct scatterlist *scatter = NULL;
struct task_status_struct *ts = &task->task_status;
int num_scatter = 0;
int retries = 0;
struct sas_internal *i =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
if (pci_dma_dir != PCI_DMA_NONE) {
scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
if (!scatter)
goto out;
sg_init_one(scatter, buffer, size);
num_scatter = 1;
}
task->task_proto = task->dev->tproto;
task->scatter = scatter;
task->num_scatter = num_scatter;
task->total_xfer_len = size;
task->data_dir = pci_dma_dir;
task->task_done = sas_disc_task_done;
if (pci_dma_dir != PCI_DMA_NONE &&
sas_protocol_ata(task->task_proto)) {
task->num_scatter = pci_map_sg(task->dev->port->ha->pcidev,
task->scatter,
task->num_scatter,
task->data_dir);
}
for (retries = 0; retries < 5; retries++) {
task->task_state_flags = SAS_TASK_STATE_PENDING;
init_completion(&task->completion);
task->timer.data = (unsigned long) task;
task->timer.function = sas_task_timedout;
task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
add_timer(&task->timer);
res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
if (res) {
del_timer(&task->timer);
SAS_DPRINTK("executing SAS discovery task failed:%d\n",
res);
goto ex_err;
}
wait_for_completion(&task->completion);
res = -ETASK;
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
int res2;
SAS_DPRINTK("task aborted, flags:0x%x\n",
task->task_state_flags);
res2 = i->dft->lldd_abort_task(task);
SAS_DPRINTK("came back from abort task\n");
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
if (res2 == TMF_RESP_FUNC_COMPLETE)
continue; /* Retry the task */
else
goto ex_err;
}
}
if (task->task_status.stat == SAM_BUSY ||
task->task_status.stat == SAM_TASK_SET_FULL ||
task->task_status.stat == SAS_QUEUE_FULL) {
SAS_DPRINTK("task: q busy, sleeping...\n");
schedule_timeout_interruptible(HZ);
} else if (task->task_status.stat == SAM_CHECK_COND) {
struct scsi_sense_hdr shdr;
if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
&shdr)) {
SAS_DPRINTK("couldn't normalize sense\n");
continue;
}
if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
(shdr.sense_key == 2 && shdr.asc == 4 &&
shdr.ascq == 1)) {
SAS_DPRINTK("device %016llx LUN: %016llx "
"powering up or not ready yet, "
"sleeping...\n",
SAS_ADDR(task->dev->sas_addr),
SAS_ADDR(task->ssp_task.LUN));
schedule_timeout_interruptible(5*HZ);
} else if (shdr.sense_key == 1) {
res = 0;
break;
} else if (shdr.sense_key == 5) {
break;
} else {
SAS_DPRINTK("dev %016llx LUN: %016llx "
"sense key:0x%x ASC:0x%x ASCQ:0x%x"
"\n",
SAS_ADDR(task->dev->sas_addr),
SAS_ADDR(task->ssp_task.LUN),
shdr.sense_key,
shdr.asc, shdr.ascq);
}
} else if (task->task_status.resp != SAS_TASK_COMPLETE ||
task->task_status.stat != SAM_GOOD) {
SAS_DPRINTK("task finished with resp:0x%x, "
"stat:0x%x\n",
task->task_status.resp,
task->task_status.stat);
goto ex_err;
} else {
res = 0;
break;
}
}
ex_err:
if (pci_dma_dir != PCI_DMA_NONE) {
if (sas_protocol_ata(task->task_proto))
pci_unmap_sg(task->dev->port->ha->pcidev,
task->scatter, task->num_scatter,
task->data_dir);
kfree(scatter);
}
out:
return res;
}
/* ---------- SATA ---------- */
static void sas_get_ata_command_set(struct domain_device *dev)
{
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
if ((fis->sector_count == 1 && /* ATA */
fis->lbal == 1 &&
fis->lbam == 0 &&
fis->lbah == 0 &&
fis->device == 0)
||
(fis->sector_count == 0 && /* CE-ATA (mATA) */
fis->lbal == 0 &&
fis->lbam == 0xCE &&
fis->lbah == 0xAA &&
(fis->device & ~0x10) == 0))
dev->sata_dev.command_set = ATA_COMMAND_SET;
else if ((fis->interrupt_reason == 1 && /* ATAPI */
fis->lbal == 1 &&
fis->byte_count_low == 0x14 &&
fis->byte_count_high == 0xEB &&
(fis->device & ~0x10) == 0))
dev->sata_dev.command_set = ATAPI_COMMAND_SET;
else if ((fis->sector_count == 1 && /* SEMB */
fis->lbal == 1 &&
fis->lbam == 0x3C &&
fis->lbah == 0xC3 &&
fis->device == 0)
||
(fis->interrupt_reason == 1 && /* SATA PM */
fis->lbal == 1 &&
fis->byte_count_low == 0x69 &&
fis->byte_count_high == 0x96 &&
(fis->device & ~0x10) == 0))
/* Treat it as a superset? */
dev->sata_dev.command_set = ATAPI_COMMAND_SET;
}
/**
* sas_issue_ata_cmd -- Basic SATA command processing for discovery
* @dev: the device to send the command to
* @command: the command register
* @features: the features register
* @buffer: pointer to buffer to do I/O
* @size: size of @buffer
* @pci_dma_dir: PCI_DMA_...
*/
static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
u8 features, void *buffer, int size,
int pci_dma_dir)
{
int res = 0;
struct sas_task *task;
struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
&dev->frame_rcvd[0];
res = -ENOMEM;
task = sas_alloc_task(GFP_KERNEL);
if (!task)
goto out;
task->dev = dev;
task->ata_task.fis.fis_type = 0x27;
task->ata_task.fis.command = command;
task->ata_task.fis.features = features;
task->ata_task.fis.device = d2h_fis->device;
task->ata_task.retry_count = 1;
res = sas_execute_task(task, buffer, size, pci_dma_dir);
sas_free_task(task);
out:
return res;
}
static void sas_sata_propagate_sas_addr(struct domain_device *dev)
{
unsigned long flags;
struct asd_sas_port *port = dev->port;
struct asd_sas_phy *phy;
BUG_ON(dev->parent);
memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
spin_lock_irqsave(&port->phy_list_lock, flags);
list_for_each_entry(phy, &port->phy_list, port_phy_el)
memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
spin_unlock_irqrestore(&port->phy_list_lock, flags);
}
#define ATA_IDENTIFY_DEV 0xEC
#define ATA_IDENTIFY_PACKET_DEV 0xA1
#define ATA_SET_FEATURES 0xEF
#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
/**
* sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
* @dev: STP/SATA device of interest (ATA/ATAPI)
*
* The LLDD has already been notified of this device, so that we can
* send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
* PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
* performance for this device.
*/
static int sas_discover_sata_dev(struct domain_device *dev)
{
int res;
__le16 *identify_x;
u8 command;
identify_x = kzalloc(512, GFP_KERNEL);
if (!identify_x)
return -ENOMEM;
if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
dev->sata_dev.identify_device = identify_x;
command = ATA_IDENTIFY_DEV;
} else {
dev->sata_dev.identify_packet_device = identify_x;
command = ATA_IDENTIFY_PACKET_DEV;
}
res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
PCI_DMA_FROMDEVICE);
if (res)
goto out_err;
/* lives on the media? */
if (le16_to_cpu(identify_x[0]) & 4) {
/* incomplete response */
SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
"dev %llx\n", SAS_ADDR(dev->sas_addr));
if (!le16_to_cpu(identify_x[83] & (1<<6)))
goto cont1;
res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
ATA_FEATURE_PUP_STBY_SPIN_UP,
NULL, 0, PCI_DMA_NONE);
if (res)
goto cont1;
schedule_timeout_interruptible(5*HZ); /* More time? */
res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
PCI_DMA_FROMDEVICE);
if (res)
goto out_err;
}
cont1:
/* Get WWN */
if (dev->port->oob_mode != SATA_OOB_MODE) {
memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
SAS_ADDR_SIZE);
} else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
(le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
== 0x5000) {
int i;
for (i = 0; i < 4; i++) {
dev->sas_addr[2*i] =
(le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
dev->sas_addr[2*i+1] =
le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
}
}
sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
if (!dev->parent)
sas_sata_propagate_sas_addr(dev);
/* XXX Hint: register this SATA device with SATL.
When this returns, dev->sata_dev->lu is alive and
present.
sas_satl_register_dev(dev);
*/
sas_fill_in_rphy(dev, dev->rphy);
return 0;
out_err:
dev->sata_dev.identify_packet_device = NULL;
dev->sata_dev.identify_device = NULL;
kfree(identify_x);
return res;
}
static int sas_discover_sata_pm(struct domain_device *dev)
{
return -ENODEV;
}
/**
* sas_discover_sata -- discover an STP/SATA domain device
* @dev: pointer to struct domain_device of interest
*
* First we notify the LLDD of this device, so we can send frames to
* it. Then depending on the type of device we call the appropriate
* discover functions. Once device discover is done, we notify the
* LLDD so that it can fine-tune its parameters for the device, by
* removing it and then adding it. That is, the second time around,
* the driver would have certain fields, that it is looking at, set.
* Finally we initialize the kobj so that the device can be added to
* the system at registration time. Devices directly attached to a HA
* port, have no parents. All other devices do, and should have their
* "parent" pointer set appropriately before calling this function.
*/
int sas_discover_sata(struct domain_device *dev)
{
int res;
sas_get_ata_command_set(dev);
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
switch (dev->dev_type) {
case SATA_DEV:
res = sas_discover_sata_dev(dev);
break;
case SATA_PM:
res = sas_discover_sata_pm(dev);
break;
default:
break;
}
sas_notify_lldd_dev_gone(dev);
if (!res) {
sas_notify_lldd_dev_found(dev);
res = sas_rphy_add(dev->rphy);
}
return res;
}

Просмотреть файл

@ -55,149 +55,6 @@ void sas_init_dev(struct domain_device *dev)
}
}
static void sas_task_timedout(unsigned long _task)
{
struct sas_task *task = (void *) _task;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
complete(&task->completion);
}
static void sas_disc_task_done(struct sas_task *task)
{
if (!del_timer(&task->timer))
return;
complete(&task->completion);
}
#define SAS_DEV_TIMEOUT 10
/**
* sas_execute_task -- Basic task processing for discovery
* @task: the task to be executed
* @buffer: pointer to buffer to do I/O
* @size: size of @buffer
* @pci_dma_dir: PCI_DMA_...
*/
static int sas_execute_task(struct sas_task *task, void *buffer, int size,
int pci_dma_dir)
{
int res = 0;
struct scatterlist *scatter = NULL;
struct task_status_struct *ts = &task->task_status;
int num_scatter = 0;
int retries = 0;
struct sas_internal *i =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
if (pci_dma_dir != PCI_DMA_NONE) {
scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
if (!scatter)
goto out;
sg_init_one(scatter, buffer, size);
num_scatter = 1;
}
task->task_proto = task->dev->tproto;
task->scatter = scatter;
task->num_scatter = num_scatter;
task->total_xfer_len = size;
task->data_dir = pci_dma_dir;
task->task_done = sas_disc_task_done;
for (retries = 0; retries < 5; retries++) {
task->task_state_flags = SAS_TASK_STATE_PENDING;
init_completion(&task->completion);
task->timer.data = (unsigned long) task;
task->timer.function = sas_task_timedout;
task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
add_timer(&task->timer);
res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
if (res) {
del_timer(&task->timer);
SAS_DPRINTK("executing SAS discovery task failed:%d\n",
res);
goto ex_err;
}
wait_for_completion(&task->completion);
res = -ETASK;
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
int res2;
SAS_DPRINTK("task aborted, flags:0x%x\n",
task->task_state_flags);
res2 = i->dft->lldd_abort_task(task);
SAS_DPRINTK("came back from abort task\n");
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
if (res2 == TMF_RESP_FUNC_COMPLETE)
continue; /* Retry the task */
else
goto ex_err;
}
}
if (task->task_status.stat == SAM_BUSY ||
task->task_status.stat == SAM_TASK_SET_FULL ||
task->task_status.stat == SAS_QUEUE_FULL) {
SAS_DPRINTK("task: q busy, sleeping...\n");
schedule_timeout_interruptible(HZ);
} else if (task->task_status.stat == SAM_CHECK_COND) {
struct scsi_sense_hdr shdr;
if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
&shdr)) {
SAS_DPRINTK("couldn't normalize sense\n");
continue;
}
if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
(shdr.sense_key == 2 && shdr.asc == 4 &&
shdr.ascq == 1)) {
SAS_DPRINTK("device %016llx LUN: %016llx "
"powering up or not ready yet, "
"sleeping...\n",
SAS_ADDR(task->dev->sas_addr),
SAS_ADDR(task->ssp_task.LUN));
schedule_timeout_interruptible(5*HZ);
} else if (shdr.sense_key == 1) {
res = 0;
break;
} else if (shdr.sense_key == 5) {
break;
} else {
SAS_DPRINTK("dev %016llx LUN: %016llx "
"sense key:0x%x ASC:0x%x ASCQ:0x%x"
"\n",
SAS_ADDR(task->dev->sas_addr),
SAS_ADDR(task->ssp_task.LUN),
shdr.sense_key,
shdr.asc, shdr.ascq);
}
} else if (task->task_status.resp != SAS_TASK_COMPLETE ||
task->task_status.stat != SAM_GOOD) {
SAS_DPRINTK("task finished with resp:0x%x, "
"stat:0x%x\n",
task->task_status.resp,
task->task_status.stat);
goto ex_err;
} else {
res = 0;
break;
}
}
ex_err:
if (pci_dma_dir != PCI_DMA_NONE)
kfree(scatter);
out:
return res;
}
/* ---------- Domain device discovery ---------- */
/**
@ -255,6 +112,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
switch (dev->dev_type) {
case SAS_END_DEV:
case SATA_DEV:
rphy = sas_end_device_alloc(port->port);
break;
case EDGE_DEV:
@ -265,7 +123,6 @@ static int sas_get_port_device(struct asd_sas_port *port)
rphy = sas_expander_alloc(port->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
case SATA_DEV:
default:
printk("ERROR: Unidentified device type %d\n", dev->dev_type);
rphy = NULL;
@ -292,207 +149,15 @@ static int sas_get_port_device(struct asd_sas_port *port)
port->disc.max_level = 0;
dev->rphy = rphy;
spin_lock(&port->dev_list_lock);
spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
spin_unlock(&port->dev_list_lock);
spin_unlock_irq(&port->dev_list_lock);
return 0;
}
/* ---------- Discover and Revalidate ---------- */
/* ---------- SATA ---------- */
static void sas_get_ata_command_set(struct domain_device *dev)
{
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
if ((fis->sector_count == 1 && /* ATA */
fis->lbal == 1 &&
fis->lbam == 0 &&
fis->lbah == 0 &&
fis->device == 0)
||
(fis->sector_count == 0 && /* CE-ATA (mATA) */
fis->lbal == 0 &&
fis->lbam == 0xCE &&
fis->lbah == 0xAA &&
(fis->device & ~0x10) == 0))
dev->sata_dev.command_set = ATA_COMMAND_SET;
else if ((fis->interrupt_reason == 1 && /* ATAPI */
fis->lbal == 1 &&
fis->byte_count_low == 0x14 &&
fis->byte_count_high == 0xEB &&
(fis->device & ~0x10) == 0))
dev->sata_dev.command_set = ATAPI_COMMAND_SET;
else if ((fis->sector_count == 1 && /* SEMB */
fis->lbal == 1 &&
fis->lbam == 0x3C &&
fis->lbah == 0xC3 &&
fis->device == 0)
||
(fis->interrupt_reason == 1 && /* SATA PM */
fis->lbal == 1 &&
fis->byte_count_low == 0x69 &&
fis->byte_count_high == 0x96 &&
(fis->device & ~0x10) == 0))
/* Treat it as a superset? */
dev->sata_dev.command_set = ATAPI_COMMAND_SET;
}
/**
* sas_issue_ata_cmd -- Basic SATA command processing for discovery
* @dev: the device to send the command to
* @command: the command register
* @features: the features register
* @buffer: pointer to buffer to do I/O
* @size: size of @buffer
* @pci_dma_dir: PCI_DMA_...
*/
static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
u8 features, void *buffer, int size,
int pci_dma_dir)
{
int res = 0;
struct sas_task *task;
struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
&dev->frame_rcvd[0];
res = -ENOMEM;
task = sas_alloc_task(GFP_KERNEL);
if (!task)
goto out;
task->dev = dev;
task->ata_task.fis.command = command;
task->ata_task.fis.features = features;
task->ata_task.fis.device = d2h_fis->device;
task->ata_task.retry_count = 1;
res = sas_execute_task(task, buffer, size, pci_dma_dir);
sas_free_task(task);
out:
return res;
}
static void sas_sata_propagate_sas_addr(struct domain_device *dev)
{
unsigned long flags;
struct asd_sas_port *port = dev->port;
struct asd_sas_phy *phy;
BUG_ON(dev->parent);
memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
spin_lock_irqsave(&port->phy_list_lock, flags);
list_for_each_entry(phy, &port->phy_list, port_phy_el)
memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
spin_unlock_irqrestore(&port->phy_list_lock, flags);
}
#define ATA_IDENTIFY_DEV 0xEC
#define ATA_IDENTIFY_PACKET_DEV 0xA1
#define ATA_SET_FEATURES 0xEF
#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
/**
* sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
* @dev: STP/SATA device of interest (ATA/ATAPI)
*
* The LLDD has already been notified of this device, so that we can
* send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
* PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
* performance for this device.
*/
static int sas_discover_sata_dev(struct domain_device *dev)
{
int res;
__le16 *identify_x;
u8 command;
identify_x = kzalloc(512, GFP_KERNEL);
if (!identify_x)
return -ENOMEM;
if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
dev->sata_dev.identify_device = identify_x;
command = ATA_IDENTIFY_DEV;
} else {
dev->sata_dev.identify_packet_device = identify_x;
command = ATA_IDENTIFY_PACKET_DEV;
}
res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
PCI_DMA_FROMDEVICE);
if (res)
goto out_err;
/* lives on the media? */
if (le16_to_cpu(identify_x[0]) & 4) {
/* incomplete response */
SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
"dev %llx\n", SAS_ADDR(dev->sas_addr));
if (!le16_to_cpu(identify_x[83] & (1<<6)))
goto cont1;
res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
ATA_FEATURE_PUP_STBY_SPIN_UP,
NULL, 0, PCI_DMA_NONE);
if (res)
goto cont1;
schedule_timeout_interruptible(5*HZ); /* More time? */
res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
PCI_DMA_FROMDEVICE);
if (res)
goto out_err;
}
cont1:
/* Get WWN */
if (dev->port->oob_mode != SATA_OOB_MODE) {
memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
SAS_ADDR_SIZE);
} else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
(le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
== 0x5000) {
int i;
for (i = 0; i < 4; i++) {
dev->sas_addr[2*i] =
(le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
dev->sas_addr[2*i+1] =
le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
}
}
sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
if (!dev->parent)
sas_sata_propagate_sas_addr(dev);
/* XXX Hint: register this SATA device with SATL.
When this returns, dev->sata_dev->lu is alive and
present.
sas_satl_register_dev(dev);
*/
return 0;
out_err:
dev->sata_dev.identify_packet_device = NULL;
dev->sata_dev.identify_device = NULL;
kfree(identify_x);
return res;
}
static int sas_discover_sata_pm(struct domain_device *dev)
{
return -ENODEV;
}
int sas_notify_lldd_dev_found(struct domain_device *dev)
{
int res = 0;
@ -525,60 +190,6 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
/* ---------- Common/dispatchers ---------- */
/**
* sas_discover_sata -- discover an STP/SATA domain device
* @dev: pointer to struct domain_device of interest
*
* First we notify the LLDD of this device, so we can send frames to
* it. Then depending on the type of device we call the appropriate
* discover functions. Once device discover is done, we notify the
* LLDD so that it can fine-tune its parameters for the device, by
* removing it and then adding it. That is, the second time around,
* the driver would have certain fields, that it is looking at, set.
* Finally we initialize the kobj so that the device can be added to
* the system at registration time. Devices directly attached to a HA
* port, have no parents. All other devices do, and should have their
* "parent" pointer set appropriately before calling this function.
*/
int sas_discover_sata(struct domain_device *dev)
{
int res;
sas_get_ata_command_set(dev);
res = sas_notify_lldd_dev_found(dev);
if (res)
goto out_err2;
switch (dev->dev_type) {
case SATA_DEV:
res = sas_discover_sata_dev(dev);
break;
case SATA_PM:
res = sas_discover_sata_pm(dev);
break;
default:
break;
}
if (res)
goto out_err;
sas_notify_lldd_dev_gone(dev);
res = sas_notify_lldd_dev_found(dev);
if (res)
goto out_err2;
res = sas_rphy_add(dev->rphy);
if (res)
goto out_err;
return res;
out_err:
sas_notify_lldd_dev_gone(dev);
out_err2:
return res;
}
/**
* sas_discover_end_dev -- discover an end device (SSP, etc)
@ -685,11 +296,14 @@ static void sas_discover_domain(struct work_struct *work)
case FANOUT_DEV:
error = sas_discover_root_expander(dev);
break;
#ifdef CONFIG_SCSI_SAS_ATA
case SATA_DEV:
case SATA_PM:
error = sas_discover_sata(dev);
break;
#endif
default:
error = -ENXIO;
SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
break;
}
@ -698,9 +312,9 @@ static void sas_discover_domain(struct work_struct *work)
sas_rphy_free(dev->rphy);
dev->rphy = NULL;
spin_lock(&port->dev_list_lock);
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock(&port->dev_list_lock);
spin_unlock_irq(&port->dev_list_lock);
kfree(dev); /* not kobject_register-ed yet */
port->port_dev = NULL;

Просмотреть файл

@ -23,6 +23,7 @@
*/
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include "sas_internal.h"
@ -36,14 +37,6 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
u8 *sas_addr, int include);
static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
#if 0
/* FIXME: smp needs to migrate into the sas class */
static ssize_t smp_portal_read(struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
static ssize_t smp_portal_write(struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
#endif
/* ---------- SMP task management ---------- */
static void smp_task_timedout(unsigned long _task)
@ -220,6 +213,36 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
#define DISCOVER_REQ_SIZE 16
#define DISCOVER_RESP_SIZE 56
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
u8 *disc_resp, int single)
{
int i, res;
disc_req[9] = single;
for (i = 1 ; i < 3; i++) {
struct discover_resp *dr;
res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
/* This is detecting a failure to transmit inital
* dev to host FIS as described in section G.5 of
* sas-2 r 04b */
dr = &((struct smp_resp *)disc_resp)->disc;
if (!(dr->attached_dev_type == 0 &&
dr->attached_sata_dev))
break;
/* In order to generate the dev to host FIS, we
* send a link reset to the expander port */
sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
/* Wait for the reset to trigger the negotiation */
msleep(500);
}
sas_set_ex_phy(dev, single, disc_resp);
return 0;
}
static int sas_ex_phy_discover(struct domain_device *dev, int single)
{
struct expander_device *ex = &dev->ex_dev;
@ -240,23 +263,15 @@ static int sas_ex_phy_discover(struct domain_device *dev, int single)
disc_req[1] = SMP_DISCOVER;
if (0 <= single && single < ex->num_phys) {
disc_req[9] = single;
res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out_err;
sas_set_ex_phy(dev, single, disc_resp);
res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single);
} else {
int i;
for (i = 0; i < ex->num_phys; i++) {
disc_req[9] = i;
res = smp_execute_task(dev, disc_req,
DISCOVER_REQ_SIZE, disc_resp,
DISCOVER_RESP_SIZE);
res = sas_ex_phy_discover_helper(dev, disc_req,
disc_resp, i);
if (res)
goto out_err;
sas_set_ex_phy(dev, i, disc_resp);
}
}
out_err:
@ -520,6 +535,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
}
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
#define RPS_RESP_SIZE 60
@ -529,6 +546,7 @@ static int sas_get_report_phy_sata(struct domain_device *dev,
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
u8 *resp = (u8 *)rps_resp;
if (!rps_req)
return -ENOMEM;
@ -539,9 +557,30 @@ static int sas_get_report_phy_sata(struct domain_device *dev,
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
rps_resp, RPS_RESP_SIZE);
/* 0x34 is the FIS type for the D2H fis. There's a potential
* standards cockup here. sas-2 explicitly specifies the FIS
* should be encoded so that FIS type is in resp[24].
* However, some expanders endian reverse this. Undo the
* reversal here */
if (!res && resp[27] == 0x34 && resp[24] != 0x34) {
int i;
for (i = 0; i < 5; i++) {
int j = 24 + (i*4);
u8 a, b;
a = resp[j + 0];
b = resp[j + 1];
resp[j + 0] = resp[j + 3];
resp[j + 1] = resp[j + 2];
resp[j + 2] = b;
resp[j + 3] = a;
}
}
kfree(rps_req);
return 0;
return res;
}
#endif
static void sas_ex_get_linkrate(struct domain_device *parent,
struct domain_device *child,
@ -609,6 +648,7 @@ static struct domain_device *sas_ex_discover_end_dev(
}
sas_ex_get_linkrate(parent, child, phy);
#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
child->dev_type = SATA_DEV;
if (phy->attached_tproto & SAS_PROTO_STP)
@ -625,16 +665,30 @@ static struct domain_device *sas_ex_discover_end_dev(
}
memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
rphy = sas_end_device_alloc(phy->port);
if (unlikely(!rphy))
goto out_free;
sas_init_dev(child);
child->rphy = rphy;
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_sata(child);
if (res) {
SAS_DPRINTK("sas_discover_sata() for device %16llx at "
"%016llx:0x%x returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
goto out_free;
goto out_list_del;
}
} else if (phy->attached_tproto & SAS_PROTO_SSP) {
} else
#endif
if (phy->attached_tproto & SAS_PROTO_SSP) {
child->dev_type = SAS_END_DEV;
rphy = sas_end_device_alloc(phy->port);
/* FIXME: error handling */
@ -646,9 +700,9 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
sas_fill_in_rphy(child, rphy);
spin_lock(&parent->port->dev_list_lock);
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock(&parent->port->dev_list_lock);
spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_end_dev(child);
if (res) {
@ -662,6 +716,7 @@ static struct domain_device *sas_ex_discover_end_dev(
SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
phy->attached_tproto, SAS_ADDR(parent->sas_addr),
phy_id);
goto out_free;
}
list_add_tail(&child->siblings, &parent_ex->children);
@ -761,9 +816,9 @@ static struct domain_device *sas_ex_discover_expander(
sas_fill_in_rphy(child, rphy);
sas_rphy_add(rphy);
spin_lock(&parent->port->dev_list_lock);
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock(&parent->port->dev_list_lock);
spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_expander(child);
if (res) {
@ -1359,30 +1414,6 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
return 0;
}
#if 0
#define SMP_BIN_ATTR_NAME "smp_portal"
static void sas_ex_smp_hook(struct domain_device *dev)
{
struct expander_device *ex_dev = &dev->ex_dev;
struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr;
memset(bin_attr, 0, sizeof(*bin_attr));
bin_attr->attr.name = SMP_BIN_ATTR_NAME;
bin_attr->attr.mode = 0600;
bin_attr->size = 0;
bin_attr->private = NULL;
bin_attr->read = smp_portal_read;
bin_attr->write= smp_portal_write;
bin_attr->mmap = NULL;
ex_dev->smp_portal_pid = -1;
init_MUTEX(&ex_dev->smp_sema);
}
#endif
/**
* sas_discover_expander -- expander discovery
* @ex: pointer to expander domain device
@ -1844,76 +1875,49 @@ out:
return res;
}
#if 0
/* ---------- SMP portal ---------- */
static ssize_t smp_portal_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t offs, size_t size)
int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req)
{
struct domain_device *dev = to_dom_device(kobj);
struct expander_device *ex = &dev->ex_dev;
struct domain_device *dev;
int ret, type = rphy->identify.device_type;
struct request *rsp = req->next_rq;
if (offs != 0)
return -EFBIG;
else if (size == 0)
return 0;
down_interruptible(&ex->smp_sema);
if (ex->smp_req)
kfree(ex->smp_req);
ex->smp_req = kzalloc(size, GFP_USER);
if (!ex->smp_req) {
up(&ex->smp_sema);
return -ENOMEM;
}
memcpy(ex->smp_req, buf, size);
ex->smp_req_size = size;
ex->smp_portal_pid = current->pid;
up(&ex->smp_sema);
return size;
}
static ssize_t smp_portal_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t offs, size_t size)
{
struct domain_device *dev = to_dom_device(kobj);
struct expander_device *ex = &dev->ex_dev;
u8 *smp_resp;
int res = -EINVAL;
/* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact
* it should be 0.
*/
down_interruptible(&ex->smp_sema);
if (!ex->smp_req || ex->smp_portal_pid != current->pid)
goto out;
res = 0;
if (size == 0)
goto out;
res = -ENOMEM;
smp_resp = alloc_smp_resp(size);
if (!smp_resp)
goto out;
res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size,
smp_resp, size);
if (!res) {
memcpy(buf, smp_resp, size);
res = size;
if (!rsp) {
printk("%s: space for a smp response is missing\n",
__FUNCTION__);
return -EINVAL;
}
kfree(smp_resp);
out:
kfree(ex->smp_req);
ex->smp_req = NULL;
ex->smp_req_size = 0;
ex->smp_portal_pid = -1;
up(&ex->smp_sema);
return res;
/* seems aic94xx doesn't support */
if (!rphy) {
printk("%s: can we send a smp request to a host?\n",
__FUNCTION__);
return -EINVAL;
}
if (type != SAS_EDGE_EXPANDER_DEVICE &&
type != SAS_FANOUT_EXPANDER_DEVICE) {
printk("%s: can we send a smp request to a device?\n",
__FUNCTION__);
return -EINVAL;
}
dev = sas_find_dev_by_rphy(rphy);
if (!dev) {
printk("%s: fail to find a domain_device?\n", __FUNCTION__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
__FUNCTION__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
bio_data(rsp->bio), rsp->data_len);
return ret;
}
#endif

Просмотреть файл

@ -259,6 +259,7 @@ static struct sas_function_template sft = {
.phy_reset = sas_phy_reset,
.set_phy_speed = sas_set_phy_speed,
.get_linkerrors = sas_get_linkerrors,
.smp_handler = sas_smp_handler,
};
struct scsi_transport_template *

Просмотреть файл

@ -39,6 +39,9 @@
#define SAS_DPRINTK(fmt, ...)
#endif
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_show_class(enum sas_class class, char *buf);

Просмотреть файл

@ -34,6 +34,7 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
#include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include "../scsi_priv.h"
@ -42,12 +43,10 @@
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/scatterlist.h>
#include <linux/libata.h>
/* ---------- SCSI Host glue ---------- */
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
static void sas_scsi_task_done(struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
@ -172,7 +171,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
return task;
}
static int sas_queue_up(struct sas_task *task)
int sas_queue_up(struct sas_task *task)
{
struct sas_ha_struct *sas_ha = task->dev->port->ha;
struct scsi_core *core = &sas_ha->core;
@ -213,6 +212,16 @@ int sas_queuecommand(struct scsi_cmnd *cmd,
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
if (dev_is_sata(dev)) {
unsigned long flags;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
res = ata_sas_queuecmd(cmd, scsi_done,
dev->sata_dev.ap);
spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
goto out;
}
res = -ENOMEM;
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
@ -684,6 +693,16 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
return EH_NOT_HANDLED;
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return ata_scsi_ioctl(sdev, cmd, arg);
return -EINVAL;
}
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
@ -723,10 +742,17 @@ static inline struct domain_device *sas_find_target(struct scsi_target *starget)
int sas_target_alloc(struct scsi_target *starget)
{
struct domain_device *found_dev = sas_find_target(starget);
int res;
if (!found_dev)
return -ENODEV;
if (dev_is_sata(found_dev)) {
res = sas_ata_init_host_and_port(found_dev, starget);
if (res)
return res;
}
starget->hostdata = found_dev;
return 0;
}
@ -741,6 +767,11 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
return 0;
}
sas_ha = dev->port->ha;
sas_read_port_mode_page(scsi_dev);
@ -764,6 +795,10 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
void sas_slave_destroy(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
ata_port_disable(dev->sata_dev.ap);
}
int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
@ -980,10 +1015,38 @@ void sas_task_abort(struct sas_task *task)
return;
}
if (dev_is_sata(task->dev)) {
sas_ata_task_abort(task);
return;
}
scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
}
int sas_slave_alloc(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
return ata_sas_port_init(dev->sata_dev.ap);
return 0;
}
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = sas_find_target(starget);
if (!found_dev)
return;
if (dev_is_sata(found_dev))
ata_sas_port_destroy(found_dev->sata_dev.ap);
return;
}
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
@ -997,3 +1060,6 @@ EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_phy_enable);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
EXPORT_SYMBOL_GPL(sas_slave_alloc);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);

Просмотреть файл

@ -89,6 +89,7 @@ mvme16x_probe(struct device *dev)
out_be32(0xfff4202c, v);
}
dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
@ -104,7 +105,7 @@ mvme16x_probe(struct device *dev)
static __devexit int
mvme16x_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_to_shost(dev);
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
/* Disable scsi chip ints */

Просмотреть файл

@ -2,9 +2,12 @@
# PCMCIA SCSI adapter configuration
#
menu "PCMCIA SCSI adapter support"
menuconfig SCSI_LOWLEVEL_PCMCIA
bool "PCMCIA SCSI adapter support"
depends on SCSI!=n && PCMCIA!=n
if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA
config PCMCIA_AHA152X
tristate "Adaptec AHA152X PCMCIA support"
depends on !64BIT
@ -77,4 +80,4 @@ config PCMCIA_SYM53C500
To compile this driver as a module, choose M here: the
module will be called sym53c500_cs.
endmenu
endif # SCSI_LOWLEVEL_PCMCIA

Просмотреть файл

@ -98,7 +98,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
/* Read NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->nvram_base,
ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->nvram_base,
ha->nvram_size);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@ -119,7 +119,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
return 0;
/* Checksum NVRAM. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
uint32_t *iter;
uint32_t chksum;
@ -143,7 +143,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
/* Write NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
@ -206,7 +206,7 @@ static struct bin_attribute sysfs_optrom_attr = {
.name = "optrom",
.mode = S_IRUSR | S_IWUSR,
},
.size = OPTROM_SIZE_24XX,
.size = 0,
.read = qla2x00_sysfs_read_optrom,
.write = qla2x00_sysfs_write_optrom,
};
@ -252,7 +252,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
}
memset(ha->optrom_buffer, 0, ha->optrom_size);
ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0,
ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 0,
ha->optrom_size);
break;
case 2:
@ -275,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
if (ha->optrom_state != QLA_SWRITING)
break;
ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0,
ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 0,
ha->optrom_size);
break;
}
@ -305,7 +305,8 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
/* Read NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size);
ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->vpd_base,
ha->vpd_size);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ha->vpd_size;
@ -325,7 +326,7 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
/* Write NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return count;
@ -410,7 +411,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)))
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@ -429,7 +430,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
struct sysfs_entry *iter;
for (iter = bin_file_entries; iter->name; iter++) {
if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)))
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@ -437,7 +438,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
}
if (ha->beacon_blink_led == 1)
ha->isp_ops.beacon_off(ha);
ha->isp_ops->beacon_off(ha);
}
/* Scsi_Host attributes. */
@ -455,7 +456,7 @@ qla2x00_fw_version_show(struct class_device *cdev, char *buf)
char fw_str[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops.fw_version_str(ha, fw_str));
ha->isp_ops->fw_version_str(ha, fw_str));
}
static ssize_t
@ -507,7 +508,7 @@ qla2x00_pci_info_show(struct class_device *cdev, char *buf)
char pci_info[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops.pci_info_str(ha, pci_info));
ha->isp_ops->pci_info_str(ha, pci_info));
}
static ssize_t
@ -652,9 +653,9 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf,
return -EINVAL;
if (val)
rval = ha->isp_ops.beacon_on(ha);
rval = ha->isp_ops->beacon_on(ha);
else
rval = ha->isp_ops.beacon_off(ha);
rval = ha->isp_ops->beacon_off(ha);
if (rval != QLA_SUCCESS)
count = 0;
@ -898,7 +899,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
sizeof(stat_buf) / 4, mb_stat);
} else if (atomic_read(&ha->loop_state) == LOOP_READY &&

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -213,6 +213,43 @@ struct qla24xx_fw_dump {
uint32_t ext_mem[1];
};
struct qla25xx_fw_dump {
uint32_t host_status;
uint32_t host_reg[32];
uint32_t shadow_reg[11];
uint32_t risc_io_reg;
uint16_t mailbox_reg[32];
uint32_t xseq_gp_reg[128];
uint32_t xseq_0_reg[48];
uint32_t xseq_1_reg[16];
uint32_t rseq_gp_reg[128];
uint32_t rseq_0_reg[32];
uint32_t rseq_1_reg[16];
uint32_t rseq_2_reg[16];
uint32_t aseq_gp_reg[128];
uint32_t aseq_0_reg[32];
uint32_t aseq_1_reg[16];
uint32_t aseq_2_reg[16];
uint32_t cmd_dma_reg[16];
uint32_t req0_dma_reg[15];
uint32_t resp0_dma_reg[15];
uint32_t req1_dma_reg[15];
uint32_t xmt0_dma_reg[32];
uint32_t xmt1_dma_reg[32];
uint32_t xmt2_dma_reg[32];
uint32_t xmt3_dma_reg[32];
uint32_t xmt4_dma_reg[32];
uint32_t xmt_data_dma_reg[16];
uint32_t rcvt0_data_dma_reg[32];
uint32_t rcvt1_data_dma_reg[32];
uint32_t risc_gp_reg[128];
uint32_t lmc_reg[128];
uint32_t fpm_hdw_reg[192];
uint32_t fb_hdw_reg[192];
uint32_t code_ram[0x2000];
uint32_t ext_mem[1];
};
#define EFT_NUM_BUFFERS 4
#define EFT_BYTES_PER_BUFFER 0x4000
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@ -246,5 +283,6 @@ struct qla2xxx_fw_dump {
struct qla2100_fw_dump isp21;
struct qla2300_fw_dump isp23;
struct qla24xx_fw_dump isp24;
struct qla25xx_fw_dump isp25;
} isp;
};

Просмотреть файл

@ -1711,6 +1711,14 @@ struct ct_fdmi_hba_attributes {
#define FDMI_PORT_OS_DEVICE_NAME 5
#define FDMI_PORT_HOST_NAME 6
#define FDMI_PORT_SPEED_1GB 0x1
#define FDMI_PORT_SPEED_2GB 0x2
#define FDMI_PORT_SPEED_10GB 0x4
#define FDMI_PORT_SPEED_4GB 0x8
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
struct ct_fdmi_port_attr {
uint16_t type;
uint16_t len;
@ -2201,6 +2209,7 @@ typedef struct scsi_qla_host {
#define SWITCH_FOUND BIT_3
#define DFLG_NO_CABLE BIT_4
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@ -2213,8 +2222,11 @@ typedef struct scsi_qla_host {
#define DT_ISP2432 BIT_8
#define DT_ISP5422 BIT_9
#define DT_ISP5432 BIT_10
#define DT_ISP_LAST (DT_ISP5432 << 1)
#define DT_ISP2532 BIT_11
#define DT_ISP_LAST (DT_ISP2532 << 1)
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
#define DT_ZIO_SUPPORTED BIT_28
#define DT_OEM_001 BIT_29
#define DT_ISP2200A BIT_30
@ -2232,12 +2244,16 @@ typedef struct scsi_qla_host {
#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
@ -2274,7 +2290,7 @@ typedef struct scsi_qla_host {
uint16_t rsp_ring_index; /* Current index. */
uint16_t response_q_length;
struct isp_operations isp_ops;
struct isp_operations *isp_ops;
/* Outstandings ISP commands. */
srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
@ -2298,6 +2314,7 @@ typedef struct scsi_qla_host {
#define PORT_SPEED_1GB 0x00
#define PORT_SPEED_2GB 0x01
#define PORT_SPEED_4GB 0x03
#define PORT_SPEED_8GB 0x04
uint16_t link_data_rate; /* F/W operating speed */
uint8_t current_topology;
@ -2564,6 +2581,7 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_2300 0x20000
#define OPTROM_SIZE_2322 0x100000
#define OPTROM_SIZE_24XX 0x100000
#define OPTROM_SIZE_25XX 0x200000
#include "qla_gbl.h"
#include "qla_dbg.h"

Просмотреть файл

@ -8,14 +8,17 @@
#define __QLA_FW_H
#define MBS_CHECKSUM_ERROR 0x4010
#define MBS_INVALID_PRODUCT_KEY 0x4020
/*
* Firmware Options.
*/
#define FO1_ENABLE_PUREX BIT_10
#define FO1_DISABLE_LED_CTRL BIT_6
#define FO1_ENABLE_8016 BIT_0
#define FO2_ENABLE_SEL_CLASS2 BIT_5
#define FO3_NO_ABTS_ON_LINKDOWN BIT_14
#define FO3_HOLD_STS_IOCB BIT_12
/*
* Port Database structure definition for ISP 24xx.
@ -341,7 +344,9 @@ struct init_cb_24xx {
* BIT 10 = Reserved
* BIT 11 = Enable FC-SP Security
* BIT 12 = FC Tape Enable
* BIT 13-31 = Reserved
* BIT 13 = Reserved
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
uint32_t firmware_options_2;
@ -363,7 +368,8 @@ struct init_cb_24xx {
* BIT 13 = Data Rate bit 0
* BIT 14 = Data Rate bit 1
* BIT 15 = Data Rate bit 2
* BIT 16-31 = Reserved
* BIT 16 = Enable 75 ohm Termination Select
* BIT 17-31 = Reserved
*/
uint32_t firmware_options_3;
@ -435,6 +441,7 @@ struct cmd_type_7 {
#define TMF_LUN_RESET BIT_12
#define TMF_CLEAR_TASK_SET BIT_10
#define TMF_ABORT_TASK_SET BIT_9
#define TMF_DSD_LIST_ENABLE BIT_2
#define TMF_READ_DATA BIT_1
#define TMF_WRITE_DATA BIT_0
@ -589,7 +596,7 @@ struct els_entry_24xx {
#define EST_SOFI3 (1 << 4)
#define EST_SOFI2 (3 << 4)
uint32_t rx_xchg_address[2]; /* Receive exchange address. */
uint32_t rx_xchg_address; /* Receive exchange address. */
uint16_t rx_dsd_count;
uint8_t opcode;
@ -650,6 +657,7 @@ struct logio_entry_24xx {
uint16_t control_flags; /* Control flags. */
/* Modifiers. */
#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
@ -779,6 +787,15 @@ struct device_reg_24xx {
#define FA_RISC_CODE_ADDR 0x20000
#define FA_RISC_CODE_SEGMENTS 2
#define FA_FW_AREA_ADDR 0x40000
#define FA_VPD_NVRAM_ADDR 0x48000
#define FA_FEATURE_ADDR 0x4C000
#define FA_FLASH_DESCR_ADDR 0x50000
#define FA_HW_EVENT_ADDR 0x54000
#define FA_BOOT_LOG_ADDR 0x58000
#define FA_FW_DUMP0_ADDR 0x60000
#define FA_FW_DUMP1_ADDR 0x70000
uint32_t flash_data; /* Flash/NVRAM BIOS data. */
uint32_t ctrl_status; /* Control/Status. */
@ -859,10 +876,13 @@ struct device_reg_24xx {
#define HCCRX_CLR_RISC_INT 0xA0000000
uint32_t gpiod; /* GPIO Data register. */
/* LED update mask. */
#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
/* Data update mask. */
#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16)
/* Data update mask. */
#define GPDX_DATA_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
/* LED control mask. */
#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2)
/* LED bit values. Color names as
@ -877,6 +897,8 @@ struct device_reg_24xx {
uint32_t gpioe; /* GPIO Enable register. */
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
/* Enable. */
#define GPEX_ENABLE (BIT_1|BIT_0)
@ -916,6 +938,14 @@ struct device_reg_24xx {
uint16_t mailbox29;
uint16_t mailbox30;
uint16_t mailbox31;
uint32_t iobase_window;
uint32_t unused_4[8]; /* Gap. */
uint32_t iobase_q;
uint32_t unused_5[2]; /* Gap. */
uint32_t iobase_select;
uint32_t unused_6[2]; /* Gap. */
uint32_t iobase_sdata;
};
/* MID Support ***************************************************************/

Просмотреть файл

@ -17,6 +17,7 @@ extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
extern int qla2100_pci_config(struct scsi_qla_host *);
extern int qla2300_pci_config(struct scsi_qla_host *);
extern int qla24xx_pci_config(scsi_qla_host_t *);
extern int qla25xx_pci_config(scsi_qla_host_t *);
extern void qla2x00_reset_chip(struct scsi_qla_host *);
extern void qla24xx_reset_chip(struct scsi_qla_host *);
extern int qla2x00_chip_diag(struct scsi_qla_host *);
@ -281,6 +282,10 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
@ -307,6 +312,7 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
extern void qla2100_fw_dump(scsi_qla_host_t *, int);
extern void qla2300_fw_dump(scsi_qla_host_t *, int);
extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);

Просмотреть файл

@ -127,7 +127,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
ha->host_no, routine, ms_pkt->entry_status));
} else {
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
((struct ct_entry_24xx *)ms_pkt)->comp_status);
else
@ -180,7 +180,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
/* Issue GA_NXT */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GA_NXT_REQ_SIZE, GA_NXT_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE,
GA_NXT_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD,
@ -266,7 +267,8 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
/* Issue GID_PT */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GID_PT_REQ_SIZE, GID_PT_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE,
GID_PT_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
@ -338,7 +340,7 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
GPN_ID_RSP_SIZE);
/* Prepare CT request */
@ -399,7 +401,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
GNN_ID_RSP_SIZE);
/* Prepare CT request */
@ -473,7 +475,8 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
/* Issue RFT_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFT_ID_REQ_SIZE, RFT_ID_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE,
RFT_ID_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD,
@ -528,7 +531,8 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
/* Issue RFF_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE,
RFF_ID_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD,
@ -582,7 +586,8 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
/* Issue RNN_ID */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RNN_ID_REQ_SIZE, RNN_ID_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE,
RNN_ID_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD,
@ -645,7 +650,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
/* Issue RSNN_NN */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@ -1102,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
if (ha->flags.management_server_logged_in)
return ret;
ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
@ -1198,7 +1203,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
} else {
@ -1253,7 +1258,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
/* Issue RHBA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@ -1373,7 +1378,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops.fw_version_str(ha, eiter->a.fw_version);
ha->isp_ops->fw_version_str(ha, eiter->a.fw_version);
alen = strlen(eiter->a.fw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
@ -1439,7 +1444,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
/* Issue RPA */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
/* Prepare CT request */
@ -1497,7 +1502,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
/* Issue RPA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@ -1527,12 +1532,20 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(4);
if (IS_QLA25XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
else if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
FDMI_PORT_SPEED_4GB);
else if (IS_QLA23XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(2);
eiter->a.sup_speed =__constant_cpu_to_be32(
FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
else
eiter->a.sup_speed = __constant_cpu_to_be32(1);
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
@ -1543,14 +1556,25 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
switch (ha->link_data_rate) {
case 0:
eiter->a.cur_speed = __constant_cpu_to_be32(1);
case PORT_SPEED_1GB:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
break;
case 1:
eiter->a.cur_speed = __constant_cpu_to_be32(2);
case PORT_SPEED_2GB:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
break;
case 3:
eiter->a.cur_speed = __constant_cpu_to_be32(4);
case PORT_SPEED_4GB:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
break;
case PORT_SPEED_8GB:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
break;
default:
eiter->a.cur_speed =
__constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
break;
}
size += 4 + 4;
@ -1562,7 +1586,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = __constant_cpu_to_be16(4 + 4);
max_frame_size = IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
max_frame_size = IS_FWI2_CAPABLE(ha) ?
(uint32_t) icb24->frame_payload_size:
(uint32_t) ha->init_cb->frame_payload_size;
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
@ -1678,7 +1702,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
@ -1686,7 +1710,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
memset(list[i].fabric_port_name, 0, WWN_SIZE);
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
GFPN_ID_RSP_SIZE);
/* Prepare CT request */
@ -1786,7 +1810,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
if (!ha->flags.gpsc_supported)
return QLA_FUNCTION_FAILED;

Просмотреть файл

@ -79,20 +79,20 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
rval = ha->isp_ops.pci_config(ha);
rval = ha->isp_ops->pci_config(ha);
if (rval) {
DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
ha->host_no));
return (rval);
}
ha->isp_ops.reset_chip(ha);
ha->isp_ops->reset_chip(ha);
ha->isp_ops.get_flash_version(ha, ha->request_ring);
ha->isp_ops->get_flash_version(ha, ha->request_ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
ha->isp_ops.nvram_config(ha);
ha->isp_ops->nvram_config(ha);
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
@ -108,7 +108,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
rval = ha->isp_ops.chip_diag(ha);
rval = ha->isp_ops->chip_diag(ha);
if (rval)
return (rval);
rval = qla2x00_setup_chip(ha);
@ -129,14 +129,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
int
qla2100_pci_config(scsi_qla_host_t *ha)
{
int ret;
uint16_t w;
uint32_t d;
unsigned long flags;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
ret = pci_set_mwi(ha->pdev);
pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@ -164,7 +163,6 @@ qla2100_pci_config(scsi_qla_host_t *ha)
int
qla2300_pci_config(scsi_qla_host_t *ha)
{
int ret;
uint16_t w;
uint32_t d;
unsigned long flags = 0;
@ -172,7 +170,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
ret = pci_set_mwi(ha->pdev);
pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@ -250,15 +248,13 @@ qla2300_pci_config(scsi_qla_host_t *ha)
int
qla24xx_pci_config(scsi_qla_host_t *ha)
{
int ret;
uint16_t w;
uint32_t d;
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
int pcix_cmd_reg, pcie_dctl_reg;
pci_set_master(ha->pdev);
ret = pci_set_mwi(ha->pdev);
pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@ -268,28 +264,12 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
pcix_cmd_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX);
if (pcix_cmd_reg) {
uint16_t pcix_cmd;
pcix_cmd_reg += PCI_X_CMD;
pci_read_config_word(ha->pdev, pcix_cmd_reg, &pcix_cmd);
pcix_cmd &= ~PCI_X_CMD_MAX_READ;
pcix_cmd |= 0x0008;
pci_write_config_word(ha->pdev, pcix_cmd_reg, pcix_cmd);
}
if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
pcix_set_mmrbc(ha->pdev, 2048);
/* PCIe -- adjust Maximum Read Request Size (2048). */
pcie_dctl_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
if (pcie_dctl_reg) {
uint16_t pcie_dctl;
pcie_dctl_reg += PCI_EXP_DEVCTL;
pci_read_config_word(ha->pdev, pcie_dctl_reg, &pcie_dctl);
pcie_dctl &= ~PCI_EXP_DEVCTL_READRQ;
pcie_dctl |= 0x4000;
pci_write_config_word(ha->pdev, pcie_dctl_reg, pcie_dctl);
}
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
pcie_set_readrq(ha->pdev, 2048);
/* Reset expansion ROM address decode enable */
pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
@ -306,6 +286,40 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
return QLA_SUCCESS;
}
/**
* qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
* @ha: HA context
*
* Returns 0 on success.
*/
int
qla25xx_pci_config(scsi_qla_host_t *ha)
{
uint16_t w;
uint32_t d;
pci_set_master(ha->pdev);
pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
w &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/* PCIe -- adjust Maximum Read Request Size (2048). */
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
pcie_set_readrq(ha->pdev, 2048);
/* Reset expansion ROM address decode enable */
pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
d &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
ha->chip_revision = ha->pdev->revision;
return QLA_SUCCESS;
}
/**
* qla2x00_isp_firmware() - Choose firmware image.
* @ha: HA context
@ -351,7 +365,7 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
uint32_t cnt;
uint16_t cmd;
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -551,7 +565,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
void
qla24xx_reset_chip(scsi_qla_host_t *ha)
{
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
/* Perform RISC reset. */
qla24xx_reset_risc(ha);
@ -736,8 +750,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
} else if (IS_FWI2_CAPABLE(ha)) {
fixed_size = IS_QLA25XX(ha) ?
offsetof(struct qla25xx_fw_dump, ext_mem):
offsetof(struct qla24xx_fw_dump, ext_mem);
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
@ -879,7 +895,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
uint32_t srisc_address = 0;
/* Load firmware sequences */
rval = ha->isp_ops.load_risc(ha, &srisc_address);
rval = ha->isp_ops->load_risc(ha, &srisc_address);
if (rval == QLA_SUCCESS) {
DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
"code.\n", ha->host_no));
@ -1130,12 +1146,12 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
/* Initialize response queue entries */
qla2x00_init_response_q_entries(ha);
ha->isp_ops.config_rings(ha);
ha->isp_ops->config_rings(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops.update_fw_options(ha);
ha->isp_ops->update_fw_options(ha);
DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
@ -1459,7 +1475,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
ha->isp_ops.read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
chksum += *ptr++;
@ -2119,7 +2135,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
int rval;
uint16_t port_speed, mb[6];
if (!IS_QLA24XX(ha))
if (!IS_IIDMA_CAPABLE(ha))
return;
switch (be16_to_cpu(fcport->fp_speed)) {
@ -2267,7 +2283,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
scsi_qla_host_t *pha = to_qla_parent(ha);
/* If FL port exists, then SNS is present */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
loop_id = NPH_F_PORT;
else
loop_id = SNS_FL_PORT;
@ -2294,11 +2310,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
qla2x00_fdmi_register(ha);
/* Ensure we are logged into the SNS. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
loop_id = NPH_SNS;
else
loop_id = SIMPLE_NAME_SERVER;
ha->isp_ops.fabric_login(ha, loop_id, 0xff, 0xff,
ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff,
0xfc, mb, BIT_1 | BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2(qla_printk(KERN_INFO, ha,
@ -2355,7 +2371,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
(fcport->flags & FCF_TAPE_PRESENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
ha->isp_ops.fabric_logout(ha,
ha->isp_ops->fabric_logout(ha,
fcport->loop_id,
fcport->d_id.b.domain,
fcport->d_id.b.area,
@ -2664,7 +2680,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
(fcport->flags & FCF_TAPE_PRESENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
ha->isp_ops.fabric_logout(ha, fcport->loop_id,
ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
@ -2919,7 +2935,7 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
opts |= BIT_1;
rval = qla2x00_get_port_database(ha, fcport, opts);
if (rval != QLA_SUCCESS) {
ha->isp_ops.fabric_logout(ha, fcport->loop_id,
ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
qla2x00_mark_device_lost(ha, fcport, 1, 0);
@ -2964,7 +2980,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
fcport->d_id.b.area, fcport->d_id.b.al_pa));
/* Login fcport on switch. */
ha->isp_ops.fabric_login(ha, fcport->loop_id,
ha->isp_ops->fabric_login(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb, BIT_0);
if (mb[0] == MBS_PORT_ID_USED) {
@ -3032,7 +3048,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
* dead.
*/
*next_loopid = fcport->loop_id;
ha->isp_ops.fabric_logout(ha, fcport->loop_id,
ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
qla2x00_mark_device_lost(ha, fcport, 1, 0);
@ -3050,7 +3066,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
*next_loopid = fcport->loop_id;
ha->isp_ops.fabric_logout(ha, fcport->loop_id,
ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
@ -3206,7 +3222,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
ha->isp_ops.reset_chip(ha);
ha->isp_ops->reset_chip(ha);
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
@ -3232,9 +3248,9 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isp_ops.get_flash_version(ha, ha->request_ring);
ha->isp_ops->get_flash_version(ha, ha->request_ring);
ha->isp_ops.nvram_config(ha);
ha->isp_ops->nvram_config(ha);
if (!qla2x00_restart_isp(ha)) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@ -3249,7 +3265,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->flags.online = 1;
ha->isp_ops.enable_intrs(ha);
ha->isp_ops->enable_intrs(ha);
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
@ -3274,7 +3290,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
* The next call disables the board
* completely.
*/
ha->isp_ops.reset_adapter(ha);
ha->isp_ops->reset_adapter(ha);
ha->flags.online = 0;
clear_bit(ISP_ABORT_RETRY,
&ha->dpc_flags);
@ -3331,7 +3347,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(ha)) {
ha->flags.online = 0;
if (!(status = ha->isp_ops.chip_diag(ha))) {
if (!(status = ha->isp_ops->chip_diag(ha))) {
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
status = qla2x00_setup_chip(ha);
goto done;
@ -3423,7 +3439,7 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
ha->flags.online = 0;
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@ -3440,7 +3456,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ha->flags.online = 0;
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
@ -3498,7 +3514,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
/* Get NVRAM data and calculate checksum. */
dptr = (uint32_t *)nv;
ha->isp_ops.read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
@ -4012,7 +4028,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
{
int ret, retries;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_FWI2_CAPABLE(ha))
return;
if (!ha->fw_major_version)
return;

Просмотреть файл

@ -104,7 +104,7 @@ static __inline__ void qla2x00_poll(scsi_qla_host_t *);
static inline void
qla2x00_poll(scsi_qla_host_t *ha)
{
ha->isp_ops.intr_handler(0, ha);
ha->isp_ops->intr_handler(0, ha);
}
static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
@ -163,7 +163,7 @@ static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t);
static inline int
qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
{
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
return (loop_id > NPH_LAST_HANDLE);
return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||

Просмотреть файл

@ -326,7 +326,7 @@ qla2x00_start_scsi(srb_t *sp)
tot_dsds = nseg;
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (ha->req_q_cnt < (req_cnt + 2)) {
cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
if (ha->req_ring_index < cnt)
@ -364,7 +364,7 @@ qla2x00_start_scsi(srb_t *sp)
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
@ -432,7 +432,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
@ -487,7 +487,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
for (timer = HZ; timer; timer--) {
if ((req_cnt + 2) >= ha->req_q_cnt) {
/* Calculate number of free request entries. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp24.req_q_out);
else
@ -561,7 +561,7 @@ qla2x00_isp_cmd(scsi_qla_host_t *ha)
ha->request_ring_ptr++;
/* Set chip new ring index. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
} else {

Просмотреть файл

@ -143,7 +143,7 @@ qla2300_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr);
ha->isp_ops.fw_dump(ha, 1);
ha->isp_ops->fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
@ -247,7 +247,7 @@ void
qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
{
#define LS_UNKNOWN 2
static char *link_speeds[5] = { "1", "2", "?", "4", "10" };
static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
char *link_speed;
uint16_t handle_cnt;
uint16_t cnt;
@ -334,9 +334,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
mb[1], mb[2], mb[3]);
ha->isp_ops.fw_dump(ha, 1);
ha->isp_ops->fw_dump(ha, 1);
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
qla_printk(KERN_ERR, ha,
"Unrecoverable Hardware Error: adapter "
@ -601,7 +601,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"scsi(%ld): [R|Z]IO update completion.\n",
ha->host_no));
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
qla24xx_process_response_queue(ha);
else
qla2x00_process_response_queue(ha);
@ -823,7 +823,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
} else {
@ -872,7 +872,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
fcport = sp->fcport;
sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
sense_len = le32_to_cpu(sts24->sense_len);
rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
resid_len = le32_to_cpu(sts24->rsp_residual_count);
@ -891,7 +891,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
/* Check for any FCP transport errors. */
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
/* Sense data lies beyond any FCP RESPONSE data. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
sense_data += rsp_info_len;
if (rsp_info_len > 3 && rsp_info[3]) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
@ -990,7 +990,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
case CS_DATA_UNDERRUN:
resid = resid_len;
/* Use F/W calculated residual length. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
resid = fw_resid_len;
if (scsi_status & SS_RESIDUAL_UNDER) {
@ -1062,6 +1062,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
cp->device->id, cp->device->lun, cp,
cp->serial_number));
/*
* In case of a Underrun condition, set both the lscsi
* status and the completion status to appropriate
* values.
*/
if (resid &&
((unsigned)(cp->request_bufflen - resid) <
cp->underflow)) {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): Mid-layer underflow "
"detected (%x of %x bytes)...returning "
"error status.\n", ha->host_no,
cp->device->channel, cp->device->id,
cp->device->lun, resid,
cp->request_bufflen));
cp->result = DID_ERROR << 16 | lscsi_status;
}
if (sense_len)
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
CMD_ACTUAL_SNSLEN(cp)));
@ -1166,7 +1185,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
case CS_TIMEOUT:
cp->result = DID_BUS_BUSY << 16;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
DEBUG2(printk(KERN_INFO
"scsi(%ld:%d:%d:%d): TIMEOUT status detected "
"0x%x-0x%x\n", ha->host_no, cp->device->channel,
@ -1235,7 +1254,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
}
/* Move sense data. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
host_to_fcp_swap(pkt->data, sizeof(pkt->data));
memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
@ -1483,7 +1502,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
ha->isp_ops.fw_dump(ha, 1);
ha->isp_ops->fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@ -1617,7 +1636,7 @@ qla24xx_msix_default(int irq, void *dev_id)
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
ha->isp_ops.fw_dump(ha, 1);
ha->isp_ops->fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@ -1739,11 +1758,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
int ret;
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha))
if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
goto skip_msix;
if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
!QLA_MSIX_FW_MODE_1(ha->fw_attributes)) {
if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
!QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
ha->chip_revision, ha->fw_attributes));
@ -1762,7 +1781,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha))
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
@ -1772,7 +1791,7 @@ skip_msix:
}
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler,
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
if (!ret) {
ha->flags.inta_enabled = 1;

Просмотреть файл

@ -90,7 +90,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@ -154,7 +154,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@ -175,7 +175,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
ha->host_no, command));
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@ -228,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
uint16_t mb0;
uint32_t ictrl;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
} else {
@ -322,7 +322,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
if (MSW(risc_addr) || IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_0;
@ -336,7 +336,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
mcp->mb[6] = MSW(MSD(req_dma));
mcp->mb[7] = LSW(MSD(req_dma));
mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[4] = MSW(risc_code_size);
mcp->mb[5] = LSW(risc_code_size);
mcp->out_mb |= MBX_5|MBX_4;
@ -387,7 +387,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
@ -410,7 +410,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
ha->host_no, rval, mcp->mb[0]));
} else {
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
__func__, ha->host_no, mcp->mb[1]));
} else {
@ -551,7 +551,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
mcp->mb[3] = fwopts[3];
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->in_mb |= MBX_1;
} else {
mcp->mb[10] = fwopts[10];
@ -664,7 +664,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->out_mb |= MBX_2|MBX_1;
@ -681,8 +681,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
(mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])));
ha->host_no, rval, IS_FWI2_CAPABLE(ha) ?
(mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
@ -739,7 +739,7 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr,
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_QLA24XX(ha) || IS_QLA54XX(ha) ? RF_MASK_24XX :RF_MASK;
IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK;
}
return rval;
@ -1085,7 +1085,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mcp->mb[0] = MBC_GET_PORT_DATABASE;
if (opt != 0 && !IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (opt != 0 && !IS_FWI2_CAPABLE(ha))
mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
mcp->mb[2] = MSW(pd_dma);
mcp->mb[3] = LSW(pd_dma);
@ -1094,7 +1094,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
mcp->mb[9] = ha->vp_idx;
mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = fcport->loop_id;
mcp->mb[10] = opt;
mcp->out_mb |= MBX_10|MBX_1;
@ -1107,15 +1107,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
mcp->mb[1] = fcport->loop_id << 8 | opt;
mcp->out_mb |= MBX_1;
}
mcp->buf_size = (IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE);
mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
mcp->flags = MBX_DMA_IN;
mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS)
goto gpd_error_out;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
@ -1333,7 +1333,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_6;
mcp->mb[2] = 0;
@ -1637,7 +1637,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb_ret, opt);
@ -1821,7 +1821,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
ha->host_no));
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = IS_QLA24XX(ha) || IS_QLA54XX(ha) ? BIT_3: 0;
mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0;
mcp->mb[2] = 0;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@ -1871,7 +1871,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
mcp->mb[0] = MBC_GET_ID_LIST;
mcp->out_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[2] = MSW(id_list_dma);
mcp->mb[3] = LSW(id_list_dma);
mcp->mb[6] = MSW(MSD(id_list_dma));
@ -2063,7 +2063,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
mcp->mb[7] = LSW(MSD(stat_buf_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = loop_id;
mcp->mb[4] = 0;
mcp->mb[10] = 0;
@ -2334,7 +2334,7 @@ qla2x00_system_error(scsi_qla_host_t *ha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@ -2444,7 +2444,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@ -2474,7 +2474,7 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@ -2514,7 +2514,7 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@ -2552,7 +2552,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha))
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@ -2595,7 +2595,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha))
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));

Просмотреть файл

@ -265,6 +265,8 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
strcpy(str, "PCIe (");
if (lspeed == 1)
strcat(str, "2.5Gb/s ");
else if (lspeed == 2)
strcat(str, "5.0Gb/s ");
else
strcat(str, "<unknown> ");
snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
@ -343,6 +345,12 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
strcat(str, "[IP] ");
if (ha->fw_attributes & BIT_2)
strcat(str, "[Multi-ID] ");
if (ha->fw_attributes & BIT_3)
strcat(str, "[SB-2] ");
if (ha->fw_attributes & BIT_4)
strcat(str, "[T10 CRC] ");
if (ha->fw_attributes & BIT_5)
strcat(str, "[VI] ");
if (ha->fw_attributes & BIT_13)
strcat(str, "[Experimental]");
return str;
@ -681,7 +689,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
DEBUG3(qla2x00_print_scsi_cmd(cmd));
spin_unlock_irqrestore(&pha->hardware_lock, flags);
if (ha->isp_ops.abort_command(ha, sp)) {
if (ha->isp_ops->abort_command(ha, sp)) {
DEBUG2(printk("%s(%ld): abort_command "
"mbx failed.\n", __func__, ha->host_no));
} else {
@ -813,7 +821,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
#if defined(LOGOUT_AFTER_DEVICE_RESET)
if (ret == SUCCESS) {
if (fcport->flags & FC_FABRIC_DEVICE) {
ha->isp_ops.fabric_logout(ha, fcport->loop_id);
ha->isp_ops->fabric_logout(ha, fcport->loop_id);
qla2x00_mark_device_lost(ha, fcport, 0, 0);
}
}
@ -1105,7 +1113,7 @@ static int
qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
{
/* Abort Target command will clear Reservation */
return ha->isp_ops.abort_target(reset_fcport);
return ha->isp_ops->abort_target(reset_fcport);
}
static int
@ -1184,8 +1192,8 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
!pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
return;
}
}
@ -1194,6 +1202,193 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
}
static void
qla2x00_enable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
RD_REG_WORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla2x00_disable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, 0);
RD_REG_WORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla24xx_enable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
RD_REG_DWORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla24xx_disable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
WRT_REG_DWORD(&reg->ictrl, 0);
RD_REG_DWORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
.chip_diag = qla2x00_chip_diag,
.config_rings = qla2x00_config_rings,
.reset_adapter = qla2x00_reset_adapter,
.nvram_config = qla2x00_nvram_config,
.update_fw_options = qla2x00_update_fw_options,
.load_risc = qla2x00_load_risc,
.pci_info_str = qla2x00_pci_info_str,
.fw_version_str = qla2x00_fw_version_str,
.intr_handler = qla2100_intr_handler,
.enable_intrs = qla2x00_enable_intrs,
.disable_intrs = qla2x00_disable_intrs,
.abort_command = qla2x00_abort_command,
.abort_target = qla2x00_abort_target,
.fabric_login = qla2x00_login_fabric,
.fabric_logout = qla2x00_fabric_logout,
.calc_req_entries = qla2x00_calc_iocbs_32,
.build_iocbs = qla2x00_build_scsi_iocbs_32,
.prep_ms_iocb = qla2x00_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
.read_nvram = qla2x00_read_nvram_data,
.write_nvram = qla2x00_write_nvram_data,
.fw_dump = qla2100_fw_dump,
.beacon_on = NULL,
.beacon_off = NULL,
.beacon_blink = NULL,
.read_optrom = qla2x00_read_optrom_data,
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
};
static struct isp_operations qla2300_isp_ops = {
.pci_config = qla2300_pci_config,
.reset_chip = qla2x00_reset_chip,
.chip_diag = qla2x00_chip_diag,
.config_rings = qla2x00_config_rings,
.reset_adapter = qla2x00_reset_adapter,
.nvram_config = qla2x00_nvram_config,
.update_fw_options = qla2x00_update_fw_options,
.load_risc = qla2x00_load_risc,
.pci_info_str = qla2x00_pci_info_str,
.fw_version_str = qla2x00_fw_version_str,
.intr_handler = qla2300_intr_handler,
.enable_intrs = qla2x00_enable_intrs,
.disable_intrs = qla2x00_disable_intrs,
.abort_command = qla2x00_abort_command,
.abort_target = qla2x00_abort_target,
.fabric_login = qla2x00_login_fabric,
.fabric_logout = qla2x00_fabric_logout,
.calc_req_entries = qla2x00_calc_iocbs_32,
.build_iocbs = qla2x00_build_scsi_iocbs_32,
.prep_ms_iocb = qla2x00_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
.read_nvram = qla2x00_read_nvram_data,
.write_nvram = qla2x00_write_nvram_data,
.fw_dump = qla2300_fw_dump,
.beacon_on = qla2x00_beacon_on,
.beacon_off = qla2x00_beacon_off,
.beacon_blink = qla2x00_beacon_blink,
.read_optrom = qla2x00_read_optrom_data,
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
};
static struct isp_operations qla24xx_isp_ops = {
.pci_config = qla24xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla24xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla24xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.abort_target = qla24xx_abort_target,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla24xx_read_nvram_data,
.write_nvram = qla24xx_write_nvram_data,
.fw_dump = qla24xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla24xx_beacon_blink,
.read_optrom = qla24xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
};
static struct isp_operations qla25xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
.chip_diag = qla24xx_chip_diag,
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla24xx_nvram_config,
.update_fw_options = qla24xx_update_fw_options,
.load_risc = qla24xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
.intr_handler = qla24xx_intr_handler,
.enable_intrs = qla24xx_enable_intrs,
.disable_intrs = qla24xx_disable_intrs,
.abort_command = qla24xx_abort_command,
.abort_target = qla24xx_abort_target,
.fabric_login = qla24xx_login_fabric,
.fabric_logout = qla24xx_fabric_logout,
.calc_req_entries = NULL,
.build_iocbs = NULL,
.prep_ms_iocb = qla24xx_prep_ms_iocb,
.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
.read_nvram = qla25xx_read_nvram_data,
.write_nvram = qla25xx_write_nvram_data,
.fw_dump = qla25xx_fw_dump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla24xx_beacon_blink,
.read_optrom = qla24xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
};
static inline void
qla2x00_set_isp_flags(scsi_qla_host_t *ha)
{
@ -1238,19 +1433,32 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
case PCI_DEVICE_ID_QLOGIC_ISP2422:
ha->device_type |= DT_ISP2422;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2432:
ha->device_type |= DT_ISP2432;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5422:
ha->device_type |= DT_ISP5422;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5432:
ha->device_type |= DT_ISP5432;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2532:
ha->device_type |= DT_ISP2532;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
}
@ -1322,61 +1530,6 @@ iospace_error_exit:
return (-ENOMEM);
}
static void
qla2x00_enable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
RD_REG_WORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla2x00_disable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, 0);
RD_REG_WORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla24xx_enable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
RD_REG_DWORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla24xx_disable_intrs(scsi_qla_host_t *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
WRT_REG_DWORD(&reg->ictrl, 0);
RD_REG_DWORD(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
@ -1422,7 +1575,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532)
sht = &qla24xx_driver_template;
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
if (host == NULL) {
@ -1466,33 +1620,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
ha->isp_ops.pci_config = qla2100_pci_config;
ha->isp_ops.reset_chip = qla2x00_reset_chip;
ha->isp_ops.chip_diag = qla2x00_chip_diag;
ha->isp_ops.config_rings = qla2x00_config_rings;
ha->isp_ops.reset_adapter = qla2x00_reset_adapter;
ha->isp_ops.nvram_config = qla2x00_nvram_config;
ha->isp_ops.update_fw_options = qla2x00_update_fw_options;
ha->isp_ops.load_risc = qla2x00_load_risc;
ha->isp_ops.pci_info_str = qla2x00_pci_info_str;
ha->isp_ops.fw_version_str = qla2x00_fw_version_str;
ha->isp_ops.intr_handler = qla2100_intr_handler;
ha->isp_ops.enable_intrs = qla2x00_enable_intrs;
ha->isp_ops.disable_intrs = qla2x00_disable_intrs;
ha->isp_ops.abort_command = qla2x00_abort_command;
ha->isp_ops.abort_target = qla2x00_abort_target;
ha->isp_ops.fabric_login = qla2x00_login_fabric;
ha->isp_ops.fabric_logout = qla2x00_fabric_logout;
ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
ha->isp_ops.fw_dump = qla2100_fw_dump;
ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
ha->isp_ops.get_flash_version = qla2x00_get_flash_version;
if (IS_QLA2100(ha)) {
host->max_id = MAX_TARGETS_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@ -1501,6 +1628,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
host->sg_tablesize = 32;
ha->gid_list_info_size = 4;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
@ -1508,21 +1636,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
ha->gid_list_info_size = 4;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA23XX(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
ha->request_q_length = REQUEST_ENTRY_CNT_2200;
ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
ha->isp_ops.pci_config = qla2300_pci_config;
ha->isp_ops.intr_handler = qla2300_intr_handler;
ha->isp_ops.fw_dump = qla2300_fw_dump;
ha->isp_ops.beacon_on = qla2x00_beacon_on;
ha->isp_ops.beacon_off = qla2x00_beacon_off;
ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
ha->gid_list_info_size = 6;
if (IS_QLA2322(ha) || IS_QLA6322(ha))
ha->optrom_size = OPTROM_SIZE_2322;
ha->isp_ops = &qla2300_isp_ops;
} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
@ -1531,36 +1655,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
ha->isp_ops.pci_config = qla24xx_pci_config;
ha->isp_ops.reset_chip = qla24xx_reset_chip;
ha->isp_ops.chip_diag = qla24xx_chip_diag;
ha->isp_ops.config_rings = qla24xx_config_rings;
ha->isp_ops.reset_adapter = qla24xx_reset_adapter;
ha->isp_ops.nvram_config = qla24xx_nvram_config;
ha->isp_ops.update_fw_options = qla24xx_update_fw_options;
ha->isp_ops.load_risc = qla24xx_load_risc;
ha->isp_ops.pci_info_str = qla24xx_pci_info_str;
ha->isp_ops.fw_version_str = qla24xx_fw_version_str;
ha->isp_ops.intr_handler = qla24xx_intr_handler;
ha->isp_ops.enable_intrs = qla24xx_enable_intrs;
ha->isp_ops.disable_intrs = qla24xx_disable_intrs;
ha->isp_ops.abort_command = qla24xx_abort_command;
ha->isp_ops.abort_target = qla24xx_abort_target;
ha->isp_ops.fabric_login = qla24xx_login_fabric;
ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
ha->isp_ops.fw_dump = qla24xx_fw_dump;
ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
ha->isp_ops.beacon_on = qla24xx_beacon_on;
ha->isp_ops.beacon_off = qla24xx_beacon_off;
ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
ha->isp_ops.get_flash_version = qla24xx_get_flash_version;
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_24XX;
ha->isp_ops = &qla24xx_isp_ops;
} else if (IS_QLA25XX(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_25XX;
ha->isp_ops = &qla25xx_isp_ops;
}
host->can_queue = ha->request_q_length + 128;
@ -1628,11 +1736,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
ha->host_no, ha));
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
reg = ha->iobase;
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
} else {
@ -1654,7 +1762,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isp_ops.enable_intrs(ha);
ha->isp_ops->enable_intrs(ha);
pci_set_drvdata(pdev, ha);
@ -1679,9 +1787,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
" ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
qla2x00_version_str, ha->model_number,
ha->model_desc ? ha->model_desc: "", pdev->device,
ha->isp_ops.pci_info_str(ha, pci_info), pci_name(pdev),
ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev),
ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
ha->isp_ops.fw_version_str(ha, fw_str));
ha->isp_ops->fw_version_str(ha, fw_str));
return 0;
@ -1747,7 +1855,7 @@ qla2x00_free_device(scsi_qla_host_t *ha)
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
qla2x00_mem_free(ha);
@ -2025,7 +2133,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
}
memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
if (IS_FWI2_CAPABLE(ha)) {
/*
* Get consistent memory allocated for SFP
* block.
@ -2305,7 +2413,7 @@ qla2x00_do_dpc(void *data)
if (fcport->flags & FCF_FABRIC_DEVICE) {
if (fcport->flags &
FCF_TAPE_PRESENT)
ha->isp_ops.fabric_logout(
ha->isp_ops->fabric_logout(
ha, fcport->loop_id,
fcport->d_id.b.domain,
fcport->d_id.b.area,
@ -2385,10 +2493,10 @@ qla2x00_do_dpc(void *data)
}
if (!ha->interrupts_on)
ha->isp_ops.enable_intrs(ha);
ha->isp_ops->enable_intrs(ha);
if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
ha->isp_ops.beacon_blink(ha);
ha->isp_ops->beacon_blink(ha);
qla2x00_do_dpc_all_vps(ha);
@ -2617,18 +2725,20 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
/* Firmware interface routines. */
#define FW_BLOBS 5
#define FW_BLOBS 6
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
#define FW_ISP2322 3
#define FW_ISP24XX 4
#define FW_ISP25XX 5
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
#define FW_FILE_ISP2300 "ql2300_fw.bin"
#define FW_FILE_ISP2322 "ql2322_fw.bin"
#define FW_FILE_ISP24XX "ql2400_fw.bin"
#define FW_FILE_ISP25XX "ql2500_fw.bin"
static DECLARE_MUTEX(qla_fw_lock);
@ -2638,6 +2748,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
{ .name = FW_FILE_ISP24XX, },
{ .name = FW_FILE_ISP25XX, },
};
struct fw_blob *
@ -2656,6 +2767,8 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
blob = &qla_fw_blobs[FW_ISP2322];
} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
blob = &qla_fw_blobs[FW_ISP24XX];
} else if (IS_QLA25XX(ha)) {
blob = &qla_fw_blobs[FW_ISP25XX];
}
down(&qla_fw_lock);
@ -2699,6 +2812,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);

Просмотреть файл

@ -766,6 +766,29 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
return ret;
}
uint8_t *
qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
uint32_t *dwptr;
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
return buf;
}
int
qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
return qla24xx_write_flash_data(ha, (uint32_t *)buf,
FA_VPD_NVRAM_ADDR | naddr, bytes >> 2);
}
static inline void
qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
@ -919,7 +942,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
else
ha->beacon_color_state = QLA_LED_GRN_ON;
ha->isp_ops.beacon_blink(ha); /* This turns green LED off */
ha->isp_ops->beacon_blink(ha); /* This turns green LED off */
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
@ -1031,7 +1054,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
ha->beacon_blink_led = 0;
ha->beacon_color_state = QLA_LED_ALL_ON;
ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */
ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -1419,7 +1442,7 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
/* Suspend HBA. */
scsi_block_requests(ha->host);
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Pause RISC. */
@ -1705,7 +1728,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
{
/* Suspend HBA. */
scsi_block_requests(ha->host);
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
@ -1713,7 +1736,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
ha->isp_ops.enable_intrs(ha);
ha->isp_ops->enable_intrs(ha);
scsi_unblock_requests(ha->host);
return buf;
@ -1727,7 +1750,7 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
/* Suspend HBA. */
scsi_block_requests(ha->host);
ha->isp_ops.disable_intrs(ha);
ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with write. */

Просмотреть файл

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.02.00-k1"
#define QLA2XXX_VERSION "8.02.00-k2"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2

Просмотреть файл

@ -2875,7 +2875,7 @@ static int __init scsi_debug_init(void)
init_all_queued();
sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
sdebug_driver_template.proc_name = sdebug_proc_name;
host_to_add = scsi_debug_add_host;
scsi_debug_add_host = 0;

Просмотреть файл

@ -9,6 +9,7 @@
#include <linux/sysctl.h>
#include "scsi_logging.h"
#include "scsi_priv.h"
static ctl_table scsi_table[] = {

Просмотреть файл

@ -16,6 +16,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_driver.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@ -714,6 +715,7 @@ static int attr_add(struct device *dev, struct device_attribute *attr)
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
int error, i;
struct request_queue *rq = sdev->request_queue;
if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
return error;
@ -733,6 +735,17 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
/* take a reference for the sdev_classdev; this is
* released by the sdev_class .release */
get_device(&sdev->sdev_gendev);
error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL);
if (error)
sdev_printk(KERN_INFO, sdev,
"Failed to register bsg queue, errno=%d\n", error);
/* we're treating error on bsg register as non-fatal, so pretend
* nothing went wrong */
error = 0;
if (sdev->host->hostt->sdev_attrs) {
for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
error = attr_add(&sdev->sdev_gendev,
@ -779,6 +792,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
return;
bsg_unregister_queue(sdev->request_queue);
class_device_unregister(&sdev->sdev_classdev);
transport_remove_device(dev);
device_del(dev);
@ -803,7 +817,7 @@ void scsi_remove_device(struct scsi_device *sdev)
}
EXPORT_SYMBOL(scsi_remove_device);
void __scsi_remove_target(struct scsi_target *starget)
static void __scsi_remove_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;

Просмотреть файл

@ -2358,7 +2358,7 @@ fc_rport_final_delete(struct work_struct *work)
* Notes:
* This routine assumes no locks are held on entry.
**/
struct fc_rport *
static struct fc_rport *
fc_rport_create(struct Scsi_Host *shost, int channel,
struct fc_rport_identifiers *ids)
{

Просмотреть файл

@ -29,6 +29,8 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/bsg.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@ -40,6 +42,7 @@
struct sas_host_attrs {
struct list_head rphy_list;
struct mutex lock;
struct request_queue *q;
u32 next_target_id;
u32 next_expander_id;
int next_port_id;
@ -152,6 +155,106 @@ static struct {
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
struct request *req;
int ret;
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while (!blk_queue_plugged(q)) {
req = elv_next_request(q);
if (!req)
break;
blkdev_dequeue_request(req);
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
ret = handler(shost, rphy, req);
spin_lock_irq(q->queue_lock);
req->end_io(req, ret);
}
}
static void sas_host_smp_request(struct request_queue *q)
{
sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL);
}
static void sas_non_host_smp_request(struct request_queue *q)
{
struct sas_rphy *rphy = q->queuedata;
sas_smp_request(q, rphy_to_shost(rphy), rphy);
}
static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
{
struct request_queue *q;
int error;
struct device *dev;
char namebuf[BUS_ID_SIZE];
const char *name;
if (!to_sas_internal(shost->transportt)->f->smp_handler) {
printk("%s can't handle SMP requests\n", shost->hostt->name);
return 0;
}
if (rphy) {
q = blk_init_queue(sas_non_host_smp_request, NULL);
dev = &rphy->dev;
name = dev->bus_id;
} else {
q = blk_init_queue(sas_host_smp_request, NULL);
dev = &shost->shost_gendev;
snprintf(namebuf, sizeof(namebuf),
"sas_host%d", shost->host_no);
name = namebuf;
}
if (!q)
return -ENOMEM;
error = bsg_register_queue(q, dev, name);
if (error) {
blk_cleanup_queue(q);
return -ENOMEM;
}
if (rphy)
rphy->q = q;
else
to_sas_host_attrs(shost)->q = q;
if (rphy)
q->queuedata = rphy;
else
q->queuedata = shost;
set_bit(QUEUE_FLAG_BIDI, &q->queue_flags);
return 0;
}
static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
{
struct request_queue *q;
if (rphy)
q = rphy->q;
else
q = to_sas_host_attrs(shost)->q;
if (!q)
return;
bsg_unregister_queue(q);
blk_cleanup_queue(q);
}
/*
* SAS host attributes
*/
@ -167,11 +270,26 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
sas_host->next_target_id = 0;
sas_host->next_expander_id = 0;
sas_host->next_port_id = 0;
if (sas_bsg_initialize(shost, NULL))
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
return 0;
}
static int sas_host_remove(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
sas_bsg_remove(shost, NULL);
return 0;
}
static DECLARE_TRANSPORT_CLASS(sas_host_class,
"sas_host", sas_host_setup, NULL, NULL);
"sas_host", sas_host_setup, sas_host_remove, NULL);
static int sas_host_match(struct attribute_container *cont,
struct device *dev)
@ -1287,6 +1405,9 @@ int sas_rphy_add(struct sas_rphy *rphy)
return error;
transport_add_device(&rphy->dev);
transport_configure_device(&rphy->dev);
if (sas_bsg_initialize(shost, rphy))
printk("fail to a bsg device %s\n", rphy->dev.bus_id);
mutex_lock(&sas_host->lock);
list_add_tail(&rphy->list, &sas_host->rphy_list);
@ -1329,6 +1450,8 @@ void sas_rphy_free(struct sas_rphy *rphy)
list_del(&rphy->list);
mutex_unlock(&sas_host->lock);
sas_bsg_remove(shost, rphy);
transport_destroy_device(dev);
put_device(dev);

Просмотреть файл

@ -420,7 +420,7 @@ static inline void borken_wait (void)
#define ULOOP( i ) for (clock = i*8;;)
#define TIMEOUT (!(clock--))
int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
static int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
{
struct Scsi_Host *instance;
int i, j;

Просмотреть файл

@ -138,6 +138,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
goto out_put_host;
}
dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
@ -155,7 +156,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
static __devexit int
sim710_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_to_shost(dev);
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];

Просмотреть файл

@ -175,7 +175,7 @@ static void scsi_cd_put(struct scsi_cd *cd)
* an inode for that to work, and we do not always have one.
*/
int sr_media_change(struct cdrom_device_info *cdi, int slot)
static int sr_media_change(struct cdrom_device_info *cdi, int slot)
{
struct scsi_cd *cd = cdi->handle;
int retval;

Просмотреть файл

@ -89,6 +89,8 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <asm/irq.h>
#include "wd33c93.h"
#define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns
@ -1762,7 +1764,7 @@ static char setup_buffer[SETUP_BUFFER_SIZE];
static char setup_used[MAX_SETUP_ARGS];
static int done_setup = 0;
int
static int
wd33c93_setup(char *str)
{
int i;

Просмотреть файл

@ -130,6 +130,7 @@ static int __devinit zorro7xx_init_one(struct zorro_dev *z,
goto out_put_host;
}
zorro_set_drvdata(z, host);
scsi_scan_host(host);
return 0;
@ -148,7 +149,7 @@ static int __devinit zorro7xx_init_one(struct zorro_dev *z,
static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
{
struct Scsi_Host *host = dev_to_shost(&z->dev);
struct Scsi_Host *host = zorro_get_drvdata(z);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
scsi_remove_host(host);

Просмотреть файл

@ -698,11 +698,6 @@ extern int blk_execute_rq(request_queue_t *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern int blk_fill_sghdr_rq(request_queue_t *, struct request *,
struct sg_io_hdr *, int);
extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *);
extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *,
struct bio *);
extern int blk_verify_command(unsigned char *, int);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)

Просмотреть файл

@ -57,10 +57,10 @@ struct bsg_class_device {
struct request_queue *queue;
};
extern int bsg_register_queue(struct request_queue *, const char *);
extern int bsg_register_queue(struct request_queue *, struct device *, const char *);
extern void bsg_unregister_queue(struct request_queue *);
#else
#define bsg_register_queue(disk, name) (0)
#define bsg_register_queue(disk, dev, name) (0)
#define bsg_unregister_queue(disk) do { } while (0)
#endif

Просмотреть файл

@ -412,6 +412,7 @@ struct ata_queued_cmd {
ata_qc_cb_t complete_fn;
void *private_data;
void *lldd_task;
};
struct ata_port_stats {

Просмотреть файл

@ -2019,6 +2019,8 @@
#define PCI_VENDOR_ID_ARIMA 0x161f
#define PCI_VENDOR_ID_BROCADE 0x1657
#define PCI_VENDOR_ID_SIBYTE 0x166d
#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
#define PCI_DEVICE_ID_BCM1250_HT 0x0002

Просмотреть файл

@ -30,6 +30,7 @@
#include <linux/timer.h>
#include <linux/pci.h>
#include <scsi/sas.h>
#include <linux/libata.h>
#include <linux/list.h>
#include <asm/semaphore.h>
#include <scsi/scsi_device.h>
@ -165,6 +166,13 @@ struct sata_device {
u8 port_no; /* port number, if this is a PM (Port) */
struct list_head children; /* PM Ports if this is a PM */
struct ata_port *ap;
struct ata_host ata_host;
struct ata_taskfile tf;
u32 sstatus;
u32 serror;
u32 scontrol;
};
/* ---------- Domain device ---------- */
@ -624,6 +632,7 @@ int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates);
int sas_phy_enable(struct sas_phy *phy, int enabled);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
int sas_queue_up(struct sas_task *task);
extern int sas_queuecommand(struct scsi_cmnd *,
void (*scsi_done)(struct scsi_cmnd *));
extern int sas_target_alloc(struct scsi_target *);
@ -661,4 +670,10 @@ int __sas_task_abort(struct sas_task *);
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd);
extern void sas_target_destroy(struct scsi_target *);
extern int sas_slave_alloc(struct scsi_device *);
extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req);
#endif /* _SASLIB_H_ */

60
include/scsi/sas_ata.h Normal file
Просмотреть файл

@ -0,0 +1,60 @@
/*
* Support for SATA devices on Serial Attached SCSI (SAS) controllers
*
* Copyright (C) 2006 IBM Corporation
*
* Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#ifndef _SAS_ATA_H_
#define _SAS_ATA_H_
#include <linux/libata.h>
#include <scsi/libsas.h>
#ifdef CONFIG_SCSI_SAS_ATA
static inline int dev_is_sata(struct domain_device *dev)
{
return (dev->rphy->identify.target_port_protocols & SAS_PROTOCOL_SATA);
}
int sas_ata_init_host_and_port(struct domain_device *found_dev,
struct scsi_target *starget);
void sas_ata_task_abort(struct sas_task *task);
#else
static inline int dev_is_sata(struct domain_device *dev)
{
return 0;
}
int sas_ata_init_host_and_port(struct domain_device *found_dev,
struct scsi_target *starget)
{
return 0;
}
void sas_ata_task_abort(struct sas_task *task)
{
}
#endif
#endif /* _SAS_ATA_H_ */

Просмотреть файл

@ -341,7 +341,7 @@ struct scsi_host_template {
/*
* Name of proc directory
*/
char *proc_name;
const char *proc_name;
/*
* Used to store the procfs directory if a driver implements the

Просмотреть файл

@ -7,7 +7,7 @@
struct scsi_transport_template;
struct sas_rphy;
struct request;
enum sas_device_type {
SAS_PHY_UNUSED,
@ -23,6 +23,12 @@ enum sas_protocol {
SAS_PROTOCOL_SSP = 0x08,
};
static inline int sas_protocol_ata(enum sas_protocol proto)
{
return ((proto & SAS_PROTOCOL_SATA) ||
(proto & SAS_PROTOCOL_STP))? 1 : 0;
}
enum sas_linkrate {
/* These Values are defined in the SAS standard */
SAS_LINK_RATE_UNKNOWN = 0,
@ -85,10 +91,12 @@ struct sas_phy {
#define phy_to_shost(phy) \
dev_to_shost((phy)->dev.parent)
struct request_queue;
struct sas_rphy {
struct device dev;
struct sas_identify identify;
struct list_head list;
struct request_queue *q;
u32 scsi_target_id;
};
@ -166,6 +174,7 @@ struct sas_function_template {
int (*phy_reset)(struct sas_phy *, int);
int (*phy_enable)(struct sas_phy *, int);
int (*set_phy_speed)(struct sas_phy *, struct sas_phy_linkrates *);
int (*smp_handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
};