Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (37 commits)
  [SCSI] zfcp: fix double dbf id usage
  [SCSI] zfcp: wait on SCSI work to be finished before proceeding with init dev
  [SCSI] zfcp: fix erp list usage without using locks
  [SCSI] zfcp: prevent fc_remote_port_delete calls for unregistered rport
  [SCSI] zfcp: fix deadlock caused by shared work queue tasks
  [SCSI] zfcp: put threshold data in hba trace
  [SCSI] zfcp: Simplify zfcp data structures
  [SCSI] zfcp: Simplify get_adapter_by_busid
  [SCSI] zfcp: remove all typedefs and replace them with standards
  [SCSI] zfcp: attach and release SAN nameserver port on demand
  [SCSI] zfcp: remove unused references, declarations and flags
  [SCSI] zfcp: Update message with input from review
  [SCSI] zfcp: add queue_full sysfs attribute
  [SCSI] scsi_dh: suppress comparison warning
  [SCSI] scsi_dh: add Dell product information into rdac device handler
  [SCSI] qla2xxx: remove the unused SCSI_QLOGIC_FC_FIRMWARE option
  [SCSI] qla2xxx: fix printk format warnings
  [SCSI] qla2xxx: Update version number to 8.02.01-k8.
  [SCSI] qla2xxx: Ignore payload reserved-bits during RSCN processing.
  [SCSI] qla2xxx: Additional residual-count corrections during UNDERRUN handling.
  ...
This commit is contained in:
Linus Torvalds 2008-10-10 10:53:26 -07:00
Родитель e26feff647 41bfcf9010
Коммит ef5bef357c
50 изменённых файлов: 2108 добавлений и 1113 удалений

Просмотреть файл

@ -436,6 +436,42 @@ Other:
was updated to remove all vports for the fc_host as well.
Transport supplied functions
----------------------------
The following functions are supplied by the FC-transport for use by LLDs.
fc_vport_create - create a vport
fc_vport_terminate - detach and remove a vport
Details:
/**
* fc_vport_create - Admin App or LLDD requests creation of a vport
* @shost: scsi host the virtual port is connected to.
* @ids: The world wide names, FC4 port roles, etc for
* the virtual port.
*
* Notes:
* This routine assumes no locks are held on entry.
*/
struct fc_vport *
fc_vport_create(struct Scsi_Host *shost, struct fc_vport_identifiers *ids)
/**
* fc_vport_terminate - Admin App or LLDD requests termination of a vport
* @vport: fc_vport to be terminated
*
* Calls the LLDD vport_delete() function, then deallocates and removes
* the vport from the shost and object tree.
*
* Notes:
* This routine assumes no locks are held on entry.
*/
int
fc_vport_terminate(struct fc_vport *vport)
Credits
=======
The following people have contributed to this document:

Просмотреть файл

@ -29,6 +29,7 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@ -532,6 +533,8 @@ static int mmc_blk_probe(struct mmc_card *card)
struct mmc_blk_data *md;
int err;
char cap_str[10];
/*
* Check that the card supports the command class(es) we need.
*/
@ -546,10 +549,11 @@ static int mmc_blk_probe(struct mmc_card *card)
if (err)
goto out;
printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
printk(KERN_INFO "%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
(unsigned long long)(get_capacity(md->disk) >> 1),
md->read_only ? "(ro)" : "");
cap_str, md->read_only ? "(ro)" : "");
mmc_set_drvdata(card, md);
add_disk(md->disk);

Просмотреть файл

@ -88,11 +88,13 @@ static int __init zfcp_device_setup(char *devstr)
strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn))
if (!token || strict_strtoull(token, 0,
(unsigned long long *) &zfcp_data.init_wwpn))
goto err_out;
token = strsep(&str, ",");
if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun))
if (!token || strict_strtoull(token, 0,
(unsigned long long *) &zfcp_data.init_fcp_lun))
goto err_out;
kfree(str);
@ -100,24 +102,10 @@ static int __init zfcp_device_setup(char *devstr)
err_out:
kfree(str);
pr_err("zfcp: Parse error for device parameter string %s, "
"device not attached.\n", devstr);
pr_err("zfcp: %s is not a valid SCSI device\n", devstr);
return 0;
}
static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
{
struct zfcp_adapter *adapter;
list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
BUS_ID_SIZE) == 0) &&
!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_REMOVE))
return adapter;
return NULL;
}
static void __init zfcp_init_device_configure(void)
{
struct zfcp_adapter *adapter;
@ -141,7 +129,12 @@ static void __init zfcp_init_device_configure(void)
goto out_unit;
up(&zfcp_data.config_sema);
ccw_device_set_online(adapter->ccw_device);
zfcp_erp_wait(adapter);
wait_event(adapter->erp_done_wqh,
!(atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));
down(&zfcp_data.config_sema);
zfcp_unit_put(unit);
out_unit:
@ -180,9 +173,9 @@ static int __init zfcp_module_init(void)
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
sema_init(&zfcp_data.config_sema, 1);
rwlock_init(&zfcp_data.config_lock);
@ -193,13 +186,14 @@ static int __init zfcp_module_init(void)
retval = misc_register(&zfcp_cfdc_misc);
if (retval) {
pr_err("zfcp: registration of misc device zfcp_cfdc failed\n");
pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
goto out_misc;
}
retval = zfcp_ccw_register();
if (retval) {
pr_err("zfcp: Registration with common I/O layer failed.\n");
pr_err("zfcp: The zfcp device driver could not register with "
"the common I/O layer\n");
goto out_ccw_register;
}
@ -231,8 +225,7 @@ module_init(zfcp_module_init);
*
* Returns: pointer to zfcp_unit or NULL
*/
struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
fcp_lun_t fcp_lun)
struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
@ -251,7 +244,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
* Returns: pointer to zfcp_port or NULL
*/
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
wwn_t wwpn)
u64 wwpn)
{
struct zfcp_port *port;
@ -276,7 +269,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
*
* Sets up some unit internal structures and creates sysfs entry.
*/
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
@ -290,7 +283,8 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
unit->port = port;
unit->fcp_lun = fcp_lun;
snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx",
(unsigned long long) fcp_lun);
unit->sysfs_device.parent = &port->sysfs_device;
unit->sysfs_device.release = zfcp_sysfs_unit_release;
dev_set_drvdata(&unit->sysfs_device, unit);
@ -323,7 +317,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
}
zfcp_unit_get(unit);
unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
write_lock_irq(&zfcp_data.config_lock);
list_add_tail(&unit->list, &port->unit_list_head);
@ -332,7 +325,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
write_unlock_irq(&zfcp_data.config_lock);
port->units++;
zfcp_port_get(port);
return unit;
@ -351,11 +343,10 @@ err_out_free:
*/
void zfcp_unit_dequeue(struct zfcp_unit *unit)
{
zfcp_unit_wait(unit);
wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
write_lock_irq(&zfcp_data.config_lock);
list_del(&unit->list);
write_unlock_irq(&zfcp_data.config_lock);
unit->port->units--;
zfcp_port_put(unit->port);
sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
device_unregister(&unit->sysfs_device);
@ -416,11 +407,6 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.data_gid_pn);
}
static void zfcp_dummy_release(struct device *dev)
{
return;
}
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
@ -450,19 +436,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
stat_work));
}
static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
ZFCP_DID_DIRECTORY_SERVICE);
if (IS_ERR(port))
return PTR_ERR(port);
zfcp_port_put(port);
return 0;
}
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
@ -508,7 +481,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->port_list_head);
INIT_LIST_HEAD(&adapter->port_remove_lh);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
@ -518,7 +490,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
spin_lock_init(&adapter->san_dbf_lock);
spin_lock_init(&adapter->scsi_dbf_lock);
spin_lock_init(&adapter->rec_dbf_lock);
spin_lock_init(&adapter->req_q.lock);
spin_lock_init(&adapter->req_q_lock);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
@ -537,28 +509,15 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto sysfs_failed;
adapter->generic_services.parent = &adapter->ccw_device->dev;
adapter->generic_services.release = zfcp_dummy_release;
snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
"generic_services");
if (device_register(&adapter->generic_services))
goto generic_services_failed;
write_lock_irq(&zfcp_data.config_lock);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
write_unlock_irq(&zfcp_data.config_lock);
zfcp_data.adapters++;
zfcp_nameserver_enqueue(adapter);
zfcp_fc_nameserver_init(adapter);
return 0;
generic_services_failed:
sysfs_remove_group(&ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
sysfs_failed:
zfcp_adapter_debug_unregister(adapter);
debug_register_failed:
@ -585,7 +544,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
zfcp_adapter_scsi_unregister(adapter);
device_unregister(&adapter->generic_services);
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
@ -603,9 +561,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
list_del(&adapter->list);
write_unlock_irq(&zfcp_data.config_lock);
/* decrease number of adapters in list */
zfcp_data.adapters--;
zfcp_qdio_free(adapter);
zfcp_free_low_mem_buffers(adapter);
@ -633,21 +588,19 @@ static void zfcp_sysfs_port_release(struct device *dev)
* d_id is used to enqueue ports with a well known address like the Directory
* Service for nameserver lookup.
*/
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval;
char *bus_id;
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&port->remove_wq);
INIT_LIST_HEAD(&port->unit_list_head);
INIT_LIST_HEAD(&port->unit_remove_lh);
INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
port->adapter = adapter;
port->d_id = d_id;
@ -657,34 +610,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
atomic_set(&port->refcount, 0);
if (status & ZFCP_STATUS_PORT_WKA) {
switch (d_id) {
case ZFCP_DID_DIRECTORY_SERVICE:
bus_id = "directory";
break;
case ZFCP_DID_MANAGEMENT_SERVICE:
bus_id = "management";
break;
case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
bus_id = "key_distribution";
break;
case ZFCP_DID_ALIAS_SERVICE:
bus_id = "alias";
break;
case ZFCP_DID_TIME_SERVICE:
bus_id = "time";
break;
default:
kfree(port);
return ERR_PTR(-EINVAL);
}
snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
port->sysfs_device.parent = &adapter->generic_services;
} else {
snprintf(port->sysfs_device.bus_id,
BUS_ID_SIZE, "0x%016llx", wwpn);
snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx",
(unsigned long long) wwpn);
port->sysfs_device.parent = &adapter->ccw_device->dev;
}
port->sysfs_device.release = zfcp_sysfs_port_release;
dev_set_drvdata(&port->sysfs_device, port);
@ -700,10 +628,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
if (device_register(&port->sysfs_device))
goto err_out_free;
if (status & ZFCP_STATUS_PORT_WKA)
retval = sysfs_create_group(&port->sysfs_device.kobj,
&zfcp_sysfs_ns_port_attrs);
else
retval = sysfs_create_group(&port->sysfs_device.kobj,
&zfcp_sysfs_port_attrs);
@ -718,10 +642,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
list_add_tail(&port->list, &adapter->port_list_head);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
if (!adapter->nameserver_port)
adapter->nameserver_port = port;
adapter->ports++;
write_unlock_irq(&zfcp_data.config_lock);
@ -740,21 +660,15 @@ err_out:
*/
void zfcp_port_dequeue(struct zfcp_port *port)
{
zfcp_port_wait(port);
wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
write_lock_irq(&zfcp_data.config_lock);
list_del(&port->list);
port->adapter->ports--;
write_unlock_irq(&zfcp_data.config_lock);
if (port->rport)
fc_remote_port_delete(port->rport);
port->rport = NULL;
zfcp_adapter_put(port->adapter);
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
sysfs_remove_group(&port->sysfs_device.kobj,
&zfcp_sysfs_ns_port_attrs);
else
sysfs_remove_group(&port->sysfs_device.kobj,
&zfcp_sysfs_port_attrs);
sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
device_unregister(&port->sysfs_device);
}

Просмотреть файл

@ -25,7 +25,8 @@ static int zfcp_ccw_probe(struct ccw_device *ccw_device)
down(&zfcp_data.config_sema);
if (zfcp_adapter_enqueue(ccw_device)) {
dev_err(&ccw_device->dev,
"Setup of data structures failed.\n");
"Setting up data structures for the "
"FCP adapter failed\n");
retval = -EINVAL;
}
up(&zfcp_data.config_sema);
@ -46,6 +47,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
struct zfcp_unit *unit, *u;
LIST_HEAD(unit_remove_lh);
LIST_HEAD(port_remove_lh);
ccw_device_set_offline(ccw_device);
down(&zfcp_data.config_sema);
@ -54,26 +57,26 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
write_lock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
list_move(&unit->list, &port->unit_remove_lh);
list_move(&unit->list, &unit_remove_lh);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
&unit->status);
}
list_move(&port->list, &adapter->port_remove_lh);
list_move(&port->list, &port_remove_lh);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
}
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
write_unlock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) {
list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) {
if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
&unit->status))
list_for_each_entry_safe(port, p, &port_remove_lh, list) {
list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
if (atomic_read(&unit->status) &
ZFCP_STATUS_UNIT_REGISTERED)
scsi_remove_device(unit->device);
zfcp_unit_dequeue(unit);
}
zfcp_port_dequeue(port);
}
zfcp_adapter_wait(adapter);
wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
zfcp_adapter_dequeue(adapter);
up(&zfcp_data.config_sema);
@ -156,15 +159,18 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
switch (event) {
case CIO_GONE:
dev_warn(&adapter->ccw_device->dev, "device gone\n");
dev_warn(&adapter->ccw_device->dev,
"The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
break;
case CIO_NO_PATH:
dev_warn(&adapter->ccw_device->dev, "no path\n");
dev_warn(&adapter->ccw_device->dev,
"The CHPID for the FCP device is offline\n");
zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
break;
case CIO_OPER:
dev_info(&adapter->ccw_device->dev, "operational again\n");
dev_info(&adapter->ccw_device->dev,
"The FCP device is operational again\n");
zfcp_erp_modify_adapter_status(adapter, 11, NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
@ -220,3 +226,20 @@ int __init zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}
/**
* zfcp_get_adapter_by_busid - find zfcp_adapter struct
* @busid: bus id string of zfcp adapter to find
*/
struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
{
struct ccw_device *ccw_device;
struct zfcp_adapter *adapter = NULL;
ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
if (ccw_device) {
adapter = dev_get_drvdata(&ccw_device->dev);
put_device(&ccw_device->dev);
}
return adapter;
}

Просмотреть файл

@ -318,6 +318,26 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
}
/**
* zfcp_hba_dbf_event_berr - trace event for bit error threshold
* @adapter: adapter affected by this QDIO related event
* @req: fsf request
*/
void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *req)
{
struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
struct fsf_status_read_buffer *sr_buf = req->data;
struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
unsigned long flags;
spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
}
static void zfcp_hba_dbf_view_response(char **p,
struct zfcp_hba_dbf_record_response *r)
{
@ -399,6 +419,30 @@ static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
}
static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
{
zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
zfcp_dbf_out(p, "prim_seq_err", "%d",
r->primitive_sequence_error_count);
zfcp_dbf_out(p, "inval_trans_word_err", "%d",
r->invalid_transmission_word_error_count);
zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
zfcp_dbf_out(p, "prim_seq_event_to", "%d",
r->primitive_sequence_event_timeout_count);
zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
r->elastic_buffer_overrun_error_count);
zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
r->advertised_receive_b2b_credit);
zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
r->current_receive_b2b_credit);
zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
r->advertised_transmit_b2b_credit);
zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
r->current_transmit_b2b_credit);
}
static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
@ -418,6 +462,8 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
zfcp_hba_dbf_view_status(&p, &r->u.status);
else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
zfcp_hba_dbf_view_berr(&p, &r->u.berr);
p += sprintf(p, "\n");
return p - out_buf;
@ -519,14 +565,14 @@ static const char *zfcp_rec_dbf_ids[] = {
[75] = "physical port recovery escalation after failed port "
"recovery",
[76] = "port recovery escalation after failed unit recovery",
[77] = "recovery opening nameserver port",
[77] = "",
[78] = "duplicate request id",
[79] = "link down",
[80] = "exclusive read-only unit access unsupported",
[81] = "shared read-write unit access unsupported",
[82] = "incoming rscn",
[83] = "incoming wwpn",
[84] = "",
[84] = "wka port handle not valid close port",
[85] = "online",
[86] = "offline",
[87] = "ccw device gone",
@ -570,7 +616,7 @@ static const char *zfcp_rec_dbf_ids[] = {
[125] = "need newer zfcp",
[126] = "need newer microcode",
[127] = "arbitrated loop not supported",
[128] = "unknown topology",
[128] = "",
[129] = "qtcb size mismatch",
[130] = "unknown fsf status ecd",
[131] = "fcp request too big",
@ -829,9 +875,9 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
struct zfcp_port *port = ct->port;
struct zfcp_adapter *adapter = port->adapter;
struct ct_hdr *hdr = zfcp_sg_to_address(ct->req);
struct zfcp_wka_port *wka_port = ct->wka_port;
struct zfcp_adapter *adapter = wka_port->adapter;
struct ct_hdr *hdr = sg_virt(ct->req);
struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
unsigned long flags;
@ -842,7 +888,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
r->fsf_reqid = (unsigned long)fsf_req;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = fc_host_port_id(adapter->scsi_host);
r->d_id = port->d_id;
r->d_id = wka_port->d_id;
oct->cmd_req_code = hdr->cmd_rsp_code;
oct->revision = hdr->revision;
oct->gs_type = hdr->gs_type;
@ -863,9 +909,9 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
struct zfcp_port *port = ct->port;
struct zfcp_adapter *adapter = port->adapter;
struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp);
struct zfcp_wka_port *wka_port = ct->wka_port;
struct zfcp_adapter *adapter = wka_port->adapter;
struct ct_hdr *hdr = sg_virt(ct->resp);
struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
unsigned long flags;
@ -875,7 +921,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = (unsigned long)fsf_req;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = port->d_id;
r->s_id = wka_port->d_id;
r->d_id = fc_host_port_id(adapter->scsi_host);
rct->cmd_rsp_code = hdr->cmd_rsp_code;
rct->revision = hdr->revision;
@ -922,8 +968,8 @@ void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
zfcp_san_dbf_event_els("oels", 2, fsf_req,
fc_host_port_id(els->adapter->scsi_host),
els->d_id, *(u8 *) zfcp_sg_to_address(els->req),
zfcp_sg_to_address(els->req), els->req->length);
els->d_id, *(u8 *) sg_virt(els->req),
sg_virt(els->req), els->req->length);
}
/**
@ -936,8 +982,7 @@ void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
fc_host_port_id(els->adapter->scsi_host),
*(u8 *)zfcp_sg_to_address(els->req),
zfcp_sg_to_address(els->resp),
*(u8 *)sg_virt(els->req), sg_virt(els->resp),
els->resp->length);
}

Просмотреть файл

@ -151,6 +151,7 @@ struct zfcp_hba_dbf_record {
struct zfcp_hba_dbf_record_response response;
struct zfcp_hba_dbf_record_status status;
struct zfcp_hba_dbf_record_qdio qdio;
struct fsf_bit_error_payload berr;
} u;
} __attribute__ ((packed));

Просмотреть файл

@ -39,29 +39,6 @@
/********************* GENERAL DEFINES *********************************/
/**
* zfcp_sg_to_address - determine kernel address from struct scatterlist
* @list: struct scatterlist
* Return: kernel address
*/
static inline void *
zfcp_sg_to_address(struct scatterlist *list)
{
return sg_virt(list);
}
/**
* zfcp_address_to_sg - set up struct scatterlist from kernel address
* @address: kernel address
* @list: struct scatterlist
* @size: buffer size
*/
static inline void
zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
{
sg_set_buf(list, address, size);
}
#define REQUEST_LIST_SIZE 128
/********************* SCSI SPECIFIC DEFINES *********************************/
@ -101,11 +78,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
typedef unsigned long long wwn_t;
typedef unsigned long long fcp_lun_t;
/* data length field may be at variable position in FCP-2 FCP_CMND IU */
typedef unsigned int fcp_dl_t;
/* timeout for name-server lookup (in seconds) */
#define ZFCP_NS_GID_PN_TIMEOUT 10
@ -129,7 +101,7 @@ typedef unsigned int fcp_dl_t;
/* FCP(-2) FCP_CMND IU */
struct fcp_cmnd_iu {
fcp_lun_t fcp_lun; /* FCP logical unit number */
u64 fcp_lun; /* FCP logical unit number */
u8 crn; /* command reference number */
u8 reserved0:5; /* reserved */
u8 task_attribute:3; /* task attribute */
@ -204,7 +176,7 @@ struct fcp_rscn_element {
struct fcp_logo {
u32 command;
u32 nport_did;
wwn_t nport_wwpn;
u64 nport_wwpn;
} __attribute__((packed));
/*
@ -218,13 +190,6 @@ struct fcp_logo {
#define ZFCP_LS_RSCN 0x61
#define ZFCP_LS_RNID 0x78
struct zfcp_ls_rjt_par {
u8 action;
u8 reason_code;
u8 reason_expl;
u8 vendor_unique;
} __attribute__ ((packed));
struct zfcp_ls_adisc {
u8 code;
u8 field[3];
@ -234,20 +199,6 @@ struct zfcp_ls_adisc {
u32 nport_id;
} __attribute__ ((packed));
struct zfcp_ls_adisc_acc {
u8 code;
u8 field[3];
u32 hard_nport_id;
u64 wwpn;
u64 wwnn;
u32 nport_id;
} __attribute__ ((packed));
struct zfcp_rc_entry {
u8 code;
const char *description;
};
/*
* FC-GS-2 stuff
*/
@ -281,9 +232,7 @@ struct zfcp_rc_entry {
#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
#define ZFCP_STATUS_COMMON_OPENING 0x08000000
#define ZFCP_STATUS_COMMON_OPEN 0x04000000
#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
@ -291,16 +240,15 @@ struct zfcp_rc_entry {
/* adapter status */
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
/* FC-PH/FC-GS well-known address identifiers for generic services */
#define ZFCP_DID_WKA 0xFFFFF0
#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
@ -312,29 +260,27 @@ struct zfcp_rc_entry {
#define ZFCP_STATUS_PORT_DID_DID 0x00000002
#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
/* for ports with well known addresses */
#define ZFCP_STATUS_PORT_WKA \
(ZFCP_STATUS_PORT_NO_WWPN | \
ZFCP_STATUS_PORT_NO_SCSI_ID)
/* well known address (WKA) port status*/
enum zfcp_wka_status {
ZFCP_WKA_PORT_OFFLINE,
ZFCP_WKA_PORT_CLOSING,
ZFCP_WKA_PORT_OPENING,
ZFCP_WKA_PORT_ONLINE,
};
/* logical unit status */
#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
/* FSF request status (this does not have a common part) */
#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
@ -379,7 +325,7 @@ struct ct_hdr {
* a port name is required */
struct ct_iu_gid_pn_req {
struct ct_hdr header;
wwn_t wwpn;
u64 wwpn;
} __attribute__ ((packed));
/* FS_ACC IU and data unit for GID_PN nameserver request */
@ -388,11 +334,9 @@ struct ct_iu_gid_pn_resp {
u32 d_id;
} __attribute__ ((packed));
typedef void (*zfcp_send_ct_handler_t)(unsigned long);
/**
* struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
* @port: port where the request is sent to
* @wka_port: port where the request is sent to
* @req: scatter-gather list for request
* @resp: scatter-gather list for response
* @req_count: number of elements in request scatter-gather list
@ -404,12 +348,12 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
* @status: used to pass error status to calling function
*/
struct zfcp_send_ct {
struct zfcp_port *port;
struct zfcp_wka_port *wka_port;
struct scatterlist *req;
struct scatterlist *resp;
unsigned int req_count;
unsigned int resp_count;
zfcp_send_ct_handler_t handler;
void (*handler)(unsigned long);
unsigned long handler_data;
int timeout;
struct completion *completion;
@ -426,8 +370,6 @@ struct zfcp_gid_pn_data {
struct zfcp_port *port;
};
typedef void (*zfcp_send_els_handler_t)(unsigned long);
/**
* struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
* @adapter: adapter where request is sent from
@ -451,22 +393,28 @@ struct zfcp_send_els {
struct scatterlist *resp;
unsigned int req_count;
unsigned int resp_count;
zfcp_send_els_handler_t handler;
void (*handler)(unsigned long);
unsigned long handler_data;
struct completion *completion;
int ls_code;
int status;
};
struct zfcp_wka_port {
struct zfcp_adapter *adapter;
wait_queue_head_t completion_wq;
enum zfcp_wka_status status;
atomic_t refcount;
u32 d_id;
u32 handle;
struct mutex mutex;
struct delayed_work work;
};
struct zfcp_qdio_queue {
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
u8 first; /* index of next free bfr
in queue (free_count>0) */
atomic_t count; /* number of free buffers
in queue */
spinlock_t lock; /* lock for operations on queue */
int pci_batch; /* SBALs since PCI indication
was last set */
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
u8 first; /* index of next free bfr in queue */
atomic_t count; /* number of free buffers in queue */
};
struct zfcp_erp_action {
@ -475,7 +423,7 @@ struct zfcp_erp_action {
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
struct zfcp_unit *unit;
volatile u32 status; /* recovery status */
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
for this action */
@ -506,8 +454,8 @@ struct zfcp_adapter {
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
refcount drop to zero */
wwn_t peer_wwnn; /* P2P peer WWNN */
wwn_t peer_wwpn; /* P2P peer WWPN */
u64 peer_wwnn; /* P2P peer WWNN */
u64 peer_wwpn; /* P2P peer WWPN */
u32 peer_d_id; /* P2P peer D_ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
u32 hydra_version; /* Hydra version */
@ -518,13 +466,13 @@ struct zfcp_adapter {
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list_head; /* remote port list */
struct list_head port_remove_lh; /* head of ports to be
removed */
u32 ports; /* number of remote ports */
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue req_q; /* request queue */
spinlock_t req_q_lock; /* for operations on queue */
int req_q_pci_batch; /* SBALs since PCI indication
was last set */
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
more avaliable SBALs */
@ -548,7 +496,7 @@ struct zfcp_adapter {
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
struct zfcp_port *nameserver_port; /* adapter's nameserver */
struct zfcp_wka_port nsp; /* adapter's nameserver */
debug_info_t *rec_dbf;
debug_info_t *hba_dbf;
debug_info_t *san_dbf; /* debug feature areas */
@ -563,11 +511,11 @@ struct zfcp_adapter {
struct zfcp_scsi_dbf_record scsi_dbf_buf;
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct qdio_initialize qdio_init_data; /* for qdio_establish */
struct device generic_services; /* directory for WKA ports */
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
atomic_t qdio_outb_full; /* queue full incidents */
};
struct zfcp_port {
@ -579,18 +527,16 @@ struct zfcp_port {
refcount drop to zero */
struct zfcp_adapter *adapter; /* adapter used to access port */
struct list_head unit_list_head; /* head of logical unit list */
struct list_head unit_remove_lh; /* head of luns to be removed
list */
u32 units; /* # of logical units in list */
atomic_t status; /* status of this remote port */
wwn_t wwnn; /* WWNN if known */
wwn_t wwpn; /* WWPN */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */
u32 d_id; /* D_ID */
u32 handle; /* handle assigned by FSF */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
u32 maxframe_size;
u32 supported_classes;
struct work_struct gid_pn_work;
};
struct zfcp_unit {
@ -601,8 +547,7 @@ struct zfcp_unit {
refcount drop to zero */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
unsigned int scsi_lun; /* own SCSI LUN */
fcp_lun_t fcp_lun; /* own FCP_LUN */
u64 fcp_lun; /* own FCP_LUN */
u32 handle; /* handle assigned by FSF */
struct scsi_device *device; /* scsi device struct pointer */
struct zfcp_erp_action erp_action; /* pending error recovery */
@ -625,7 +570,7 @@ struct zfcp_fsf_req {
u8 sbal_response; /* SBAL used in interrupt */
wait_queue_head_t completion_wq; /* can be used by a routine
to wait for completion */
volatile u32 status; /* status of this request */
u32 status; /* status of this request */
u32 fsf_command; /* FSF Command copy */
struct fsf_qtcb *qtcb; /* address of associated QTCB */
u32 seq_no; /* Sequence number of request */
@ -644,11 +589,7 @@ struct zfcp_fsf_req {
struct zfcp_data {
struct scsi_host_template scsi_host_template;
struct scsi_transport_template *scsi_transport_template;
atomic_t status; /* Module status flags */
struct list_head adapter_list_head; /* head of adapter list */
struct list_head adapter_remove_lh; /* head of adapters to be
removed */
u32 adapters; /* # of adapters in list */
rwlock_t config_lock; /* serialises changes
to adapter/port/unit
lists */
@ -656,11 +597,12 @@ struct zfcp_data {
changes */
atomic_t loglevel; /* current loglevel */
char init_busid[BUS_ID_SIZE];
wwn_t init_wwpn;
fcp_lun_t init_fcp_lun;
u64 init_wwpn;
u64 init_fcp_lun;
struct kmem_cache *fsf_req_qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache;
struct workqueue_struct *work_queue;
};
/* struct used by memory pools for fsf_requests */
@ -677,14 +619,7 @@ struct zfcp_fsf_req_qtcb {
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
#ifndef atomic_test_mask
#define atomic_test_mask(mask, target) \
((atomic_read(target) & mask) == mask)
#endif
#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
/*
* Helper functions for request ID management.
@ -744,12 +679,6 @@ zfcp_unit_put(struct zfcp_unit *unit)
wake_up(&unit->remove_wq);
}
static inline void
zfcp_unit_wait(struct zfcp_unit *unit)
{
wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
}
static inline void
zfcp_port_get(struct zfcp_port *port)
{
@ -763,12 +692,6 @@ zfcp_port_put(struct zfcp_port *port)
wake_up(&port->remove_wq);
}
static inline void
zfcp_port_wait(struct zfcp_port *port)
{
wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
}
static inline void
zfcp_adapter_get(struct zfcp_adapter *adapter)
{
@ -782,10 +705,4 @@ zfcp_adapter_put(struct zfcp_adapter *adapter)
wake_up(&adapter->remove_wq);
}
static inline void
zfcp_adapter_wait(struct zfcp_adapter *adapter)
{
wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
}
#endif /* ZFCP_DEF_H */

Просмотреть файл

@ -23,7 +23,6 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
@ -532,7 +531,6 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
struct zfcp_port *port;
list_for_each_entry(port, &adapter->port_list_head, list)
if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA))
_zfcp_erp_port_reopen(port, clear, id, ref);
}
@ -669,8 +667,6 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
int ret;
struct zfcp_adapter *adapter = act->adapter;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(act);
write_unlock_irq(&adapter->erp_lock);
@ -741,8 +737,7 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
failed_qdio:
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_ADAPTER_XPORT_OK,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&act->adapter->status);
return retval;
}
@ -751,15 +746,11 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
{
int retval;
atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
zfcp_erp_adapter_strategy_generic(act, 1); /* close */
atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
if (retval == ZFCP_ERP_FAILED)
ssleep(8);
@ -783,10 +774,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
{
atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
ZFCP_STATUS_COMMON_CLOSING |
ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_PORT_DID_DID |
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_PORT_PHYS_CLOSING |
ZFCP_STATUS_PORT_INVALID_WWPN,
&port->status);
@ -839,73 +827,12 @@ static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
{
unsigned long flags;
struct zfcp_adapter *adapter = ns_act->adapter;
struct zfcp_erp_action *act, *tmp;
int status;
read_lock_irqsave(&adapter->erp_lock, flags);
list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
status = atomic_read(&adapter->nameserver_port->status);
if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
zfcp_erp_port_failed(act->port, 27, NULL);
zfcp_erp_action_ready(act);
}
}
read_unlock_irqrestore(&adapter->erp_lock, flags);
}
static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
{
int retval;
switch (act->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
case ZFCP_ERP_STEP_PORT_CLOSING:
return zfcp_erp_port_strategy_open_port(act);
case ZFCP_ERP_STEP_PORT_OPENING:
if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
retval = ZFCP_ERP_SUCCEEDED;
else
retval = ZFCP_ERP_FAILED;
/* this is needed anyway */
zfcp_erp_port_strategy_open_ns_wake(act);
return retval;
default:
return ZFCP_ERP_FAILED;
}
}
static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
{
int retval;
retval = zfcp_fc_ns_gid_pn_request(act);
if (retval == -ENOMEM)
return ZFCP_ERP_NOMEM;
act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
if (retval)
return ZFCP_ERP_FAILED;
return ZFCP_ERP_CONTINUES;
}
static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
if (port->wwpn != adapter->peer_wwpn) {
dev_err(&adapter->ccw_device->dev,
"Failed to open port 0x%016Lx, "
"Peer WWPN 0x%016Lx does not "
"match.\n", port->wwpn,
adapter->peer_wwpn);
zfcp_erp_port_failed(port, 25, NULL);
return ZFCP_ERP_FAILED;
}
@ -914,11 +841,25 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
return zfcp_erp_port_strategy_open_port(act);
}
void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
{
int retval;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
retval = zfcp_fc_ns_gid_pn(&port->erp_action);
if (retval == -ENOMEM)
zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM);
port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
if (retval)
zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
}
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
struct zfcp_port *ns_port = adapter->nameserver_port;
int p_status = atomic_read(&port->status);
switch (act->step) {
@ -927,28 +868,10 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
case ZFCP_ERP_STEP_PORT_CLOSING:
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
if (!ns_port) {
dev_err(&adapter->ccw_device->dev,
"Nameserver port unavailable.\n");
return ZFCP_ERP_FAILED;
}
if (!(atomic_read(&ns_port->status) &
ZFCP_STATUS_COMMON_UNBLOCKED)) {
/* nameserver port may live again */
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
&ns_port->status);
if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
queue_work(zfcp_data.work_queue, &port->gid_pn_work);
return ZFCP_ERP_CONTINUES;
}
return ZFCP_ERP_FAILED;
}
/* else nameserver port is already open, fall through */
case ZFCP_ERP_STEP_NAMESERVER_OPEN:
if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
return ZFCP_ERP_FAILED;
return zfcp_erp_port_strategy_open_lookup(act);
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
@ -961,25 +884,26 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
case ZFCP_ERP_STEP_PORT_OPENING:
/* D_ID might have changed during open */
if ((p_status & ZFCP_STATUS_COMMON_OPEN) &&
(p_status & ZFCP_STATUS_PORT_DID_DID))
if (p_status & ZFCP_STATUS_COMMON_OPEN) {
if (p_status & ZFCP_STATUS_PORT_DID_DID)
return ZFCP_ERP_SUCCEEDED;
else {
act->step = ZFCP_ERP_STEP_PORT_CLOSING;
return ZFCP_ERP_CONTINUES;
}
/* fall through otherwise */
}
}
return ZFCP_ERP_FAILED;
}
static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
{
if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
return zfcp_erp_port_strategy_open_nameserver(act);
return zfcp_erp_port_strategy_open_common(act);
}
static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)
goto close_init_done;
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_port_strategy_clearstati(port);
@ -992,19 +916,17 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
break;
}
close_init_done:
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
else
return zfcp_erp_port_strategy_open(erp_action);
return ZFCP_ERP_FAILED;
return zfcp_erp_port_strategy_open_common(erp_action);
}
static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
{
atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
ZFCP_STATUS_COMMON_CLOSING |
ZFCP_STATUS_COMMON_ACCESS_DENIED |
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_UNIT_SHARED |
ZFCP_STATUS_UNIT_READONLY,
&unit->status);
@ -1065,8 +987,14 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
break;
case ZFCP_ERP_FAILED :
atomic_inc(&unit->erp_counter);
if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&unit->port->adapter->ccw_device->dev,
"ERP failed for unit 0x%016Lx on "
"port 0x%016Lx\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
zfcp_erp_unit_failed(unit, 21, NULL);
}
break;
}
@ -1091,8 +1019,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
result = ZFCP_ERP_EXIT;
}
atomic_inc(&port->erp_counter);
if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&port->adapter->ccw_device->dev,
"ERP failed for remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
zfcp_erp_port_failed(port, 22, NULL);
}
break;
}
@ -1114,8 +1046,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
case ZFCP_ERP_FAILED :
atomic_inc(&adapter->erp_counter);
if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
dev_err(&adapter->ccw_device->dev,
"ERP cannot recover an error "
"on the FCP device\n");
zfcp_erp_adapter_failed(adapter, 23, NULL);
}
break;
}
@ -1250,9 +1186,10 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
struct zfcp_unit *unit = p->unit;
struct fc_rport *rport = unit->port->rport;
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
unit->scsi_lun, 0);
scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
zfcp_unit_put(unit);
wake_up(&unit->port->adapter->erp_done_wqh);
kfree(p);
}
@ -1263,9 +1200,9 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
dev_err(&unit->port->adapter->ccw_device->dev,
"Out of resources. Could not register unit "
"0x%016Lx on port 0x%016Lx with SCSI stack.\n",
unit->fcp_lun, unit->port->wwpn);
"Registering unit 0x%016Lx on port 0x%016Lx failed\n",
(unsigned long long)unit->fcp_lun,
(unsigned long long)unit->port->wwpn);
return;
}
@ -1273,7 +1210,7 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
schedule_work(&p->work);
queue_work(zfcp_data.work_queue, &p->work);
}
static void zfcp_erp_rport_register(struct zfcp_port *port)
@ -1286,8 +1223,8 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!port->rport) {
dev_err(&port->adapter->ccw_device->dev,
"Failed registration of rport "
"0x%016Lx.\n", port->wwpn);
"Registering port 0x%016Lx failed\n",
(unsigned long long)port->wwpn);
return;
}
@ -1299,9 +1236,9 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
list_for_each_entry(port, &adapter->port_list_head, list)
if (port->rport && !(atomic_read(&port->status) &
ZFCP_STATUS_PORT_WKA)) {
list_for_each_entry(port, &adapter->port_list_head, list) {
if (!port->rport)
continue;
fc_remote_port_delete(port->rport);
port->rport = NULL;
}
@ -1459,9 +1396,9 @@ static int zfcp_erp_thread(void *data)
zfcp_erp_wakeup(adapter);
}
zfcp_rec_dbf_event_thread(4, adapter);
zfcp_rec_dbf_event_thread_lock(4, adapter);
down_interruptible(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread(5, adapter);
zfcp_rec_dbf_event_thread_lock(5, adapter);
}
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@ -1484,7 +1421,7 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
if (retval < 0) {
dev_err(&adapter->ccw_device->dev,
"Creation of ERP thread failed.\n");
"Creating an ERP thread for the FCP device failed.\n");
return retval;
}
wait_event(adapter->erp_thread_wqh,
@ -1506,7 +1443,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
up(&adapter->erp_ready_sem);
zfcp_rec_dbf_event_thread_lock(2, adapter);
zfcp_rec_dbf_event_thread_lock(3, adapter);
wait_event(adapter->erp_thread_wqh,
!(atomic_read(&adapter->status) &
@ -1526,7 +1463,6 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
{
zfcp_erp_modify_adapter_status(adapter, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
}
/**
@ -1539,15 +1475,6 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
{
zfcp_erp_modify_port_status(port, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
dev_err(&port->adapter->ccw_device->dev,
"Port ERP failed for WKA port d_id=0x%06x.\n",
port->d_id);
else
dev_err(&port->adapter->ccw_device->dev,
"Port ERP failed for port wwpn=0x%016Lx.\n",
port->wwpn);
}
/**
@ -1560,10 +1487,6 @@ void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
{
zfcp_erp_modify_unit_status(unit, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
dev_err(&unit->port->adapter->ccw_device->dev,
"Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
unit->fcp_lun, unit->port->wwpn);
}
/**
@ -1754,7 +1677,6 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
if (!(status & ZFCP_STATUS_PORT_WKA))
list_for_each_entry(unit, &port->unit_list_head, list)
zfcp_erp_unit_access_changed(unit, id, ref);
return;
@ -1779,10 +1701,7 @@ void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
return;
read_lock_irqsave(&zfcp_data.config_lock, flags);
if (adapter->nameserver_port)
zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
list_for_each_entry(port, &adapter->port_list_head, list)
if (port != adapter->nameserver_port)
zfcp_erp_port_access_changed(port, id, ref);
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}

Просмотреть файл

@ -12,16 +12,14 @@
#include "zfcp_def.h"
/* zfcp_aux.c */
extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *,
fcp_lun_t);
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
wwn_t);
extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
extern int zfcp_adapter_enqueue(struct ccw_device *);
extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32,
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern void zfcp_port_dequeue(struct zfcp_port *);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
extern void zfcp_unit_dequeue(struct zfcp_unit *);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
@ -29,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
/* zfcp_ccw.c */
extern int zfcp_ccw_register(void);
extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
/* zfcp_cfdc.c */
extern struct miscdevice zfcp_cfdc_misc;
@ -50,6 +49,8 @@ extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
int);
extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
@ -91,17 +92,21 @@ extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
extern void zfcp_erp_timeout_handler(unsigned long);
extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
/* zfcp_fc.c */
extern int zfcp_scan_ports(struct zfcp_adapter *);
extern void _zfcp_scan_ports_later(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *);
extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@ -135,10 +140,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
extern int zfcp_qdio_allocate(struct zfcp_adapter *);
extern void zfcp_qdio_free(struct zfcp_adapter *);
extern int zfcp_qdio_send(struct zfcp_fsf_req *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req(
struct zfcp_fsf_req *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
struct zfcp_fsf_req *);
extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_adapter *);
@ -148,14 +151,12 @@ extern void zfcp_qdio_close(struct zfcp_adapter *);
extern struct zfcp_data zfcp_data;
extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
extern struct fc_function_template zfcp_transport_functions;
/* zfcp_sysfs.c */
extern struct attribute_group zfcp_sysfs_unit_attrs;
extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern struct attribute_group zfcp_sysfs_ns_port_attrs;
extern struct attribute_group zfcp_sysfs_port_attrs;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];

Просмотреть файл

@ -39,6 +39,84 @@ struct zfcp_gpn_ft {
struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
};
struct zfcp_fc_ns_handler_data {
struct completion done;
void (*handler)(unsigned long);
unsigned long handler_data;
};
static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status != ZFCP_WKA_PORT_ONLINE) {
wka_port->status = ZFCP_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
}
mutex_unlock(&wka_port->mutex);
wait_event_timeout(
wka_port->completion_wq,
wka_port->status == ZFCP_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_WKA_PORT_OFFLINE,
HZ >> 1);
if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
}
return -EIO;
}
static void zfcp_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = container_of(work, struct delayed_work, work);
struct zfcp_wka_port *wka_port =
container_of(dw, struct zfcp_wka_port, work);
wait_event(wka_port->completion_wq,
atomic_read(&wka_port->refcount) == 0);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
(wka_port->status != ZFCP_WKA_PORT_ONLINE))
goto out;
wka_port->status = ZFCP_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
out:
mutex_unlock(&wka_port->mutex);
}
static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 miliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
}
void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
{
struct zfcp_wka_port *wka_port = &adapter->nsp;
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fcp_rscn_element *elem)
{
@ -47,10 +125,8 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
continue;
/* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
/* Try to connect to unused ports anyway. */
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
@ -102,7 +178,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
schedule_work(&fsf_req->adapter->scan_work);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn)
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
@ -157,7 +233,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_fc_incoming_rscn(fsf_req);
}
static void zfcp_ns_gid_pn_handler(unsigned long data)
static void zfcp_fc_ns_handler(unsigned long data)
{
struct zfcp_fc_ns_handler_data *compl_rec =
(struct zfcp_fc_ns_handler_data *) data;
if (compl_rec->handler)
compl_rec->handler(compl_rec->handler_data);
complete(&compl_rec->done);
}
static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
{
struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
struct zfcp_send_ct *ct = &gid_pn->ct;
@ -166,43 +253,31 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
struct zfcp_port *port = gid_pn->port;
if (ct->status)
goto out;
return;
if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
goto out;
return;
}
/* paranoia */
if (ct_iu_req->wwpn != port->wwpn)
goto out;
return;
/* looks like a valid d_id */
port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
out:
mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
}
/**
* zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
* @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
struct zfcp_gid_pn_data *gid_pn)
{
int ret;
struct zfcp_gid_pn_data *gid_pn;
struct zfcp_adapter *adapter = erp_action->adapter;
gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
if (!gid_pn)
return -ENOMEM;
memset(gid_pn, 0, sizeof(*gid_pn));
struct zfcp_fc_ns_handler_data compl_rec;
int ret;
/* setup parameters for send generic command */
gid_pn->port = erp_action->port;
gid_pn->ct.port = adapter->nameserver_port;
gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
gid_pn->ct.handler_data = (unsigned long) gid_pn;
gid_pn->ct.wka_port = &adapter->nsp;
gid_pn->ct.handler = zfcp_fc_ns_handler;
gid_pn->ct.handler_data = (unsigned long) &compl_rec;
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
gid_pn->ct.req = &gid_pn->req;
gid_pn->ct.resp = &gid_pn->resp;
@ -222,9 +297,41 @@ int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
init_completion(&compl_rec.done);
compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
compl_rec.handler_data = (unsigned long) gid_pn;
ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
erp_action);
if (!ret)
wait_for_completion(&compl_rec.done);
return ret;
}
/**
* zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
* @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
{
int ret;
struct zfcp_gid_pn_data *gid_pn;
struct zfcp_adapter *adapter = erp_action->adapter;
gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
if (!gid_pn)
return -ENOMEM;
memset(gid_pn, 0, sizeof(*gid_pn));
ret = zfcp_wka_port_get(&adapter->nsp);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
zfcp_wka_port_put(&adapter->nsp);
out:
mempool_free(gid_pn, adapter->pool.data_gid_pn);
return ret;
}
@ -255,14 +362,14 @@ struct zfcp_els_adisc {
struct scatterlist req;
struct scatterlist resp;
struct zfcp_ls_adisc ls_adisc;
struct zfcp_ls_adisc_acc ls_adisc_acc;
struct zfcp_ls_adisc ls_adisc_acc;
};
static void zfcp_fc_adisc_handler(unsigned long data)
{
struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
struct zfcp_port *port = adisc->els.port;
struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc;
struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
if (adisc->els.status) {
/* request rejected or timed out */
@ -295,7 +402,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
sg_init_one(adisc->els.req, &adisc->ls_adisc,
sizeof(struct zfcp_ls_adisc));
sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
sizeof(struct zfcp_ls_adisc_acc));
sizeof(struct zfcp_ls_adisc));
adisc->els.req_count = 1;
adisc->els.resp_count = 1;
@ -338,30 +445,6 @@ void zfcp_test_link(struct zfcp_port *port)
zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
}
static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
{
int ret;
if (!adapter->nameserver_port)
return -EINTR;
if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
&adapter->nameserver_port->status)) {
ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
NULL);
if (ret)
return ret;
zfcp_erp_wait(adapter);
}
return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
&adapter->nameserver_port->status);
}
static void zfcp_gpn_ft_handler(unsigned long _done)
{
complete((struct completion *)_done);
}
static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
{
struct scatterlist *sg = &gpn_ft->sg_req;
@ -403,7 +486,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
struct completion done;
struct zfcp_fc_ns_handler_data compl_rec;
int ret;
/* prepare CT IU for GPN_FT */
@ -420,19 +503,20 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
req->fc4_type = ZFCP_CT_SCSI_FCP;
/* prepare zfcp_send_ct */
ct->port = adapter->nameserver_port;
ct->handler = zfcp_gpn_ft_handler;
ct->handler_data = (unsigned long)&done;
ct->wka_port = &adapter->nsp;
ct->handler = zfcp_fc_ns_handler;
ct->handler_data = (unsigned long)&compl_rec;
ct->timeout = 10;
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
ct->req_count = 1;
ct->resp_count = ZFCP_GPN_FT_BUFFERS;
init_completion(&done);
init_completion(&compl_rec.done);
compl_rec.handler = NULL;
ret = zfcp_fsf_send_ct(ct, NULL, NULL);
if (!ret)
wait_for_completion(&done);
wait_for_completion(&compl_rec.done);
return ret;
}
@ -442,9 +526,8 @@ static void zfcp_validate_port(struct zfcp_port *port)
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if (port == adapter->nameserver_port)
return;
if ((port->supported_classes != 0) || (port->units != 0)) {
if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list_head)) {
zfcp_port_put(port);
return;
}
@ -460,7 +543,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
struct scatterlist *sg = gpn_ft->sg_resp;
struct ct_hdr *hdr = sg_virt(sg);
struct gpn_ft_resp_acc *acc = sg_virt(sg);
struct zfcp_adapter *adapter = ct->port->adapter;
struct zfcp_adapter *adapter = ct->wka_port->adapter;
struct zfcp_port *port, *tmp;
u32 d_id;
int ret = 0, x, last = 0;
@ -490,6 +573,9 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
acc->port_id[2];
/* don't attach ports with a well known address */
if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
continue;
/* skip the adapter's port and known remote ports */
if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
continue;
@ -528,13 +614,15 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
return 0;
ret = zfcp_scan_get_nameserver(adapter);
ret = zfcp_wka_port_get(&adapter->nsp);
if (ret)
return ret;
gpn_ft = zfcp_alloc_sg_env();
if (!gpn_ft)
return -ENOMEM;
if (!gpn_ft) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < 3; i++) {
ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
@ -547,7 +635,8 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
}
}
zfcp_free_sg_env(gpn_ft);
out:
zfcp_wka_port_put(&adapter->nsp);
return ret;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -71,13 +71,6 @@
#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
#define FSF_ELS_COMMAND_REJECTED 0x00000050
#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
#define FSF_AUTHORIZATION_FAILURE 0x00000053
#define FSF_CFDC_ERROR_DETECTED 0x00000054
#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
#define FSF_CONFLICTS_OVERRULED 0x00000058
#define FSF_PORT_BOXED 0x00000059
#define FSF_LUN_BOXED 0x0000005A
#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
@ -85,9 +78,7 @@
#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
#define FSF_SBAL_MISMATCH 0x00000063
#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
@ -102,20 +93,9 @@
#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
#define FSF_SQ_COMMAND_ABORTED 0x06
#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
/* FSF status qualifier for CFDC commands */
#define FSF_SQ_CFDC_HARDENED_ON_SE 0x00000000
#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
/* CFDC subtable codes */
#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
/* FSF status qualifier (most significant 4 bytes), local link down */
#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
@ -145,7 +125,6 @@
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
/* status subtypes in status read buffer */
@ -159,20 +138,9 @@
/* status subtypes for unsolicited status notification lost */
#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
#define FSF_STATUS_READ_SUB_SENSE_DATA 0x00000002
#define FSF_STATUS_READ_SUB_LINK_STATUS 0x00000004
#define FSF_STATUS_READ_SUB_PORT_CLOSED 0x00000008
#define FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD 0x00000010
#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020
#define FSF_STATUS_READ_SUB_ACT_HARDENED 0x00000040
#define FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT 0x00000080
/* status subtypes for CFDC */
#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
/* topologie that is detected by the adapter */
#define FSF_TOPO_ERROR 0x00000000
#define FSF_TOPO_P2P 0x00000001
#define FSF_TOPO_FABRIC 0x00000002
#define FSF_TOPO_AL 0x00000003
@ -180,17 +148,13 @@
/* data direction for FCP commands */
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
#define FSF_DATADIR_READ_WRITE 0x00000003
#define FSF_DATADIR_CMND 0x00000004
/* fc service class */
#define FSF_CLASS_1 0x00000001
#define FSF_CLASS_2 0x00000002
#define FSF_CLASS_3 0x00000003
/* SBAL chaining */
#define FSF_MAX_SBALS_PER_REQ 36
#define FSF_MAX_SBALS_PER_ELS_REQ 2
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@ -200,50 +164,16 @@
#define FSF_FEATURE_LUN_SHARING 0x00000004
#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
/* adapter types */
#define FSF_ADAPTER_TYPE_FICON 0x00000001
#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
/* port types */
#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
#define FSF_HBA_PORTTYPE_NPORT 0x00000005
#define FSF_HBA_PORTTYPE_PTP 0x00000021
/* following are not defined and used by FSF Spec
but are additionally defined by FC-HBA */
#define FSF_HBA_PORTTYPE_OTHER 0x00000002
#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
#define FSF_HBA_PORTTYPE_FPORT 0x00000008
#define FSF_HBA_PORTTYPE_LPORT 0x00000020
/* port states */
#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
#define FSF_HBA_PORTSTATE_ERROR 0x00000007
/* IO states of adapter */
#define FSF_IOSTAT_NPORT_RJT 0x00000004
#define FSF_IOSTAT_FABRIC_RJT 0x00000005
#define FSF_IOSTAT_LS_RJT 0x00000009
/* open LUN access flags*/
#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
@ -265,11 +195,6 @@ struct fsf_queue_designator {
u32 res1;
} __attribute__ ((packed));
struct fsf_port_closed_payload {
struct fsf_queue_designator queue_designator;
u32 port_handle;
} __attribute__ ((packed));
struct fsf_bit_error_payload {
u32 res1;
u32 link_failure_error_count;

Просмотреть файл

@ -28,7 +28,7 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
static volatile struct qdio_buffer_element *
static struct qdio_buffer_element *
zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
{
return &q->sbal[sbal_idx]->element[sbale_idx];
@ -57,7 +57,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
{
dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n");
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
@ -145,7 +145,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->resp_q;
volatile struct qdio_buffer_element *sbale;
struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no;
if (unlikely(qdio_err)) {
@ -174,8 +174,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
dev_warn(&adapter->ccw_device->dev,
"Protocol violation by adapter. "
"Continuing operations.\n");
"A QDIO protocol error occurred, "
"operations continue\n");
}
/*
@ -190,8 +190,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
* @fsf_req: pointer to struct fsf_req
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
volatile struct qdio_buffer_element *
zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
{
return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
}
@ -201,8 +200,7 @@ zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
* @fsf_req: pointer to struct fsf_req
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
volatile struct qdio_buffer_element *
zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
{
return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
req->sbale_curr);
@ -216,10 +214,10 @@ static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
% QDIO_MAX_BUFFERS_PER_Q;
}
static volatile struct qdio_buffer_element *
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
{
volatile struct qdio_buffer_element *sbale;
struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(fsf_req);
@ -250,7 +248,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
return sbale;
}
static volatile struct qdio_buffer_element *
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
{
if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@ -273,7 +271,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
unsigned int sbtype, void *start_addr,
unsigned int total_length)
{
volatile struct qdio_buffer_element *sbale;
struct qdio_buffer_element *sbale;
unsigned long remaining, length;
void *addr;
@ -282,6 +280,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
addr += length, remaining -= length) {
sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
if (!sbale) {
atomic_inc(&fsf_req->adapter->qdio_outb_full);
zfcp_qdio_undo_sbals(fsf_req);
return -EINVAL;
}
@ -307,7 +306,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
struct scatterlist *sg, int max_sbals)
{
volatile struct qdio_buffer_element *sbale;
struct qdio_buffer_element *sbale;
int retval, bytes = 0;
/* figure out last allowed SBAL */
@ -344,10 +343,10 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
int first = fsf_req->sbal_first;
int count = fsf_req->sbal_number;
int retval, pci, pci_batch;
volatile struct qdio_buffer_element *sbale;
struct qdio_buffer_element *sbale;
/* acknowledgements for transferred buffers */
pci_batch = req_q->pci_batch + count;
pci_batch = adapter->req_q_pci_batch + count;
if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
pci = first + count - (pci_batch + 1);
@ -367,7 +366,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
atomic_sub(count, &req_q->count);
req_q->first += count;
req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
req_q->pci_batch = pci_batch;
adapter->req_q_pci_batch = pci_batch;
return 0;
}
@ -418,14 +417,14 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
struct zfcp_qdio_queue *req_q;
int first, count;
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
req_q = &adapter->req_q;
spin_lock_bh(&req_q->lock);
spin_lock_bh(&adapter->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_bh(&req_q->lock);
spin_unlock_bh(&adapter->req_q_lock);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
@ -438,7 +437,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
}
req_q->first = 0;
atomic_set(&req_q->count, 0);
req_q->pci_batch = 0;
adapter->req_q_pci_batch = 0;
adapter->resp_q.first = 0;
atomic_set(&adapter->resp_q.count, 0);
}
@ -450,23 +449,17 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
*/
int zfcp_qdio_open(struct zfcp_adapter *adapter)
{
volatile struct qdio_buffer_element *sbale;
struct qdio_buffer_element *sbale;
int cc;
if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
if (qdio_establish(&adapter->qdio_init_data)) {
dev_err(&adapter->ccw_device->dev,
"Establish of QDIO queues failed.\n");
return -EIO;
}
if (qdio_establish(&adapter->qdio_init_data))
goto failed_establish;
if (qdio_activate(adapter->ccw_device)) {
dev_err(&adapter->ccw_device->dev,
"Activate of QDIO queues failed.\n");
if (qdio_activate(adapter->ccw_device))
goto failed_qdio;
}
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(adapter->resp_q.sbal[cc]->element[0]);
@ -476,20 +469,20 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
}
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
QDIO_MAX_BUFFERS_PER_Q)) {
dev_err(&adapter->ccw_device->dev,
"Init of QDIO response queue failed.\n");
QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
}
/* set index of first avalable SBALS / number of available SBALS */
adapter->req_q.first = 0;
atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
adapter->req_q.pci_batch = 0;
adapter->req_q_pci_batch = 0;
return 0;
failed_qdio:
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
dev_err(&adapter->ccw_device->dev,
"Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}

Просмотреть файл

@ -21,20 +21,6 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
return fcp_sns_info_ptr;
}
void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
{
fcp_dl_t *fcp_dl_ptr;
/*
* fcp_dl_addr = start address of fcp_cmnd structure +
* size of fixed part + size of dynamically sized add_dcp_cdb field
* SEE FCP-2 documentation
*/
fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
(fcp_cmd->add_fcp_cdb_length << 2));
*fcp_dl_ptr = fcp_dl;
}
static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@ -119,14 +105,18 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
{
struct zfcp_port *port;
struct zfcp_unit *unit;
int scsi_lun;
list_for_each_entry(port, &adapter->port_list_head, list) {
if (!port->rport || (id != port->rport->scsi_target_id))
continue;
list_for_each_entry(unit, &port->unit_list_head, list)
if (lun == unit->scsi_lun)
list_for_each_entry(unit, &port->unit_list_head, list) {
scsi_lun = scsilun_to_int(
(struct scsi_lun *)&unit->fcp_lun);
if (lun == scsi_lun)
return unit;
}
}
return NULL;
}
@ -183,7 +173,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
return retval;
}
fsf_req->data = NULL;
fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
/* don't access old fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
@ -294,7 +283,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
"registration with SCSI stack failed.");
"Registering the FCP device with the "
"SCSI stack failed\n");
return -EIO;
}
@ -312,7 +302,6 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
scsi_host_put(adapter->scsi_host);
return -EIO;
}
atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
return 0;
}
@ -336,7 +325,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
return;
}

Просмотреть файл

@ -26,9 +26,9 @@ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
atomic_read(&adapter->status));
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
adapter->peer_wwnn);
(unsigned long long) adapter->peer_wwnn);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
adapter->peer_wwpn);
(unsigned long long) adapter->peer_wwpn);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
adapter->peer_d_id);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
@ -135,8 +135,9 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
{
struct zfcp_adapter *adapter = dev_get_drvdata(dev);
struct zfcp_port *port;
wwn_t wwpn;
u64 wwpn;
int retval = 0;
LIST_HEAD(port_remove_lh);
down(&zfcp_data.config_sema);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
@ -144,7 +145,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
goto out;
}
if (strict_strtoull(buf, 0, &wwpn)) {
if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
retval = -EINVAL;
goto out;
}
@ -154,7 +155,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
if (port && (atomic_read(&port->refcount) == 0)) {
zfcp_port_get(port);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
list_move(&port->list, &adapter->port_remove_lh);
list_move(&port->list, &port_remove_lh);
} else
port = NULL;
write_unlock_irq(&zfcp_data.config_lock);
@ -200,7 +201,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
{
struct zfcp_port *port = dev_get_drvdata(dev);
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
u64 fcp_lun;
int retval = -EINVAL;
down(&zfcp_data.config_sema);
@ -209,7 +210,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
goto out;
}
if (strict_strtoull(buf, 0, &fcp_lun))
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
goto out;
unit = zfcp_unit_enqueue(port, fcp_lun);
@ -233,8 +234,9 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
{
struct zfcp_port *port = dev_get_drvdata(dev);
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
u64 fcp_lun;
int retval = 0;
LIST_HEAD(unit_remove_lh);
down(&zfcp_data.config_sema);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
@ -242,7 +244,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
goto out;
}
if (strict_strtoull(buf, 0, &fcp_lun)) {
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
retval = -EINVAL;
goto out;
}
@ -252,7 +254,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
if (unit && (atomic_read(&unit->refcount) == 0)) {
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
list_move(&unit->list, &port->unit_remove_lh);
list_move(&unit->list, &unit_remove_lh);
} else
unit = NULL;
@ -273,22 +275,7 @@ out:
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
static struct attribute *zfcp_port_ns_attrs[] = {
&dev_attr_port_failed.attr,
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
NULL
};
/**
* zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
*/
struct attribute_group zfcp_sysfs_ns_port_attrs = {
.attrs = zfcp_port_ns_attrs,
};
static struct attribute *zfcp_port_no_ns_attrs[] = {
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
@ -302,7 +289,7 @@ static struct attribute *zfcp_port_no_ns_attrs[] = {
* zfcp_sysfs_port_attrs - sysfs attributes for all other ports
*/
struct attribute_group zfcp_sysfs_port_attrs = {
.attrs = zfcp_port_no_ns_attrs,
.attrs = zfcp_port_attrs,
};
static struct attribute *zfcp_unit_attrs[] = {
@ -395,8 +382,10 @@ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
unit->port->adapter->ccw_device->dev.bus_id);
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
(unsigned long long) unit->port->wwpn);
ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
(unsigned long long) unit->fcp_lun);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
@ -487,10 +476,23 @@ ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *scsi_host = class_to_shost(dev);
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scsi_host->hostdata[0];
return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full));
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
&dev_attr_queue_full,
NULL
};

Просмотреть файл

@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
To compile this driver as a module, choose M here: the
module will be called qlogicfas.
config SCSI_QLOGIC_FC_FIRMWARE
bool "Include loadable firmware in driver"
depends on SCSI_QLOGIC_FC
help
Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
expanded LUN addressing and FcTape (FCP-2) support, in the
qlogicfc driver. This is required on some platforms.
config SCSI_QLOGIC_1280
tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
depends on PCI && SCSI

Просмотреть файл

@ -84,7 +84,7 @@ struct clariion_dh_data {
/*
* I/O buffer for both MODE_SELECT and INQUIRY commands.
*/
char buffer[CLARIION_BUFFER_SIZE];
unsigned char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
err = SCSI_DH_DEV_TEMP_BUSY;
goto out;
}
if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
if (csdev->buffer[4] > 2) {
/* Invalid buffer format */
sdev_printk(KERN_NOTICE, sdev,
"%s: invalid VPD page 0xC0 format\n",
@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
return NULL;
}
memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;

Просмотреть файл

@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
req->sense = h->sense;
@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(START_STOP);
memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
req->timeout = HP_SW_TIMEOUT;

Просмотреть файл

@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
rq->retries = RDAC_RETRIES;
@ -590,6 +588,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_F"},
{"DELL", "MD3000"},
{"DELL", "MD3000i"},
{NULL, NULL},
};

Просмотреть файл

@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
{
struct device *cdev;
struct Scsi_Host *shost = ERR_PTR(-ENXIO);
struct Scsi_Host *shost = NULL;
cdev = class_find_device(&shost_class, NULL, &hostnum,
__scsi_host_match);

Просмотреть файл

@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
if (lun == task->sc->device->lun || lun == -1) {
debug_scsi("failing in progress sc %p itt 0x%x\n",
task->sc, task->itt);
fail_command(conn, task, DID_BUS_BUSY << 16);
fail_command(conn, task, error << 16);
}
}
}

Просмотреть файл

@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
valid = 0;
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
else if (start == (FA_BOOT_CODE_ADDR*4) ||
start == (FA_RISC_CODE_ADDR*4))
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4))
else if (IS_QLA25XX(ha) &&
start == (ha->flt_region_vpd_nvram * 4))
valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
@ -1065,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);

Просмотреть файл

@ -25,7 +25,6 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
uint64_t output_bytes;
};
/*
@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host {
#define FCPORT_UPDATE_NEEDED 27
#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
#define UNLOADING 29
#define NPIV_CONFIG_NEEDED 30
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host {
uint64_t fce_wr, fce_rd;
struct mutex fce_mutex;
uint32_t hw_event_start;
uint32_t hw_event_ptr;
uint32_t hw_event_pause_errors;
@ -2553,6 +2554,14 @@ typedef struct scsi_qla_host {
uint32_t fdt_unprotect_sec_cmd;
uint32_t fdt_protect_sec_cmd;
uint32_t flt_region_flt;
uint32_t flt_region_fdt;
uint32_t flt_region_boot;
uint32_t flt_region_fw;
uint32_t flt_region_vpd_nvram;
uint32_t flt_region_hw_event;
uint32_t flt_region_npiv_conf;
/* Needed for BEACON */
uint16_t beacon_blink_led;
uint8_t beacon_color_state;

Просмотреть файл

@ -789,14 +789,23 @@ struct device_reg_24xx {
#define FA_RISC_CODE_ADDR 0x20000
#define FA_RISC_CODE_SEGMENTS 2
#define FA_FLASH_DESCR_ADDR_24 0x11000
#define FA_FLASH_LAYOUT_ADDR_24 0x11400
#define FA_NPIV_CONF0_ADDR_24 0x16000
#define FA_NPIV_CONF1_ADDR_24 0x17000
#define FA_FW_AREA_ADDR 0x40000
#define FA_VPD_NVRAM_ADDR 0x48000
#define FA_FEATURE_ADDR 0x4C000
#define FA_FLASH_DESCR_ADDR 0x50000
#define FA_FLASH_LAYOUT_ADDR 0x50400
#define FA_HW_EVENT0_ADDR 0x54000
#define FA_HW_EVENT1_ADDR 0x54200
#define FA_HW_EVENT1_ADDR 0x54400
#define FA_HW_EVENT_SIZE 0x200
#define FA_HW_EVENT_ENTRY_SIZE 4
#define FA_NPIV_CONF0_ADDR 0x5C000
#define FA_NPIV_CONF1_ADDR 0x5D000
/*
* Flash Error Log Event Codes.
*/
@ -806,10 +815,6 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
#define FA_BOOT_LOG_ADDR 0x58000
#define FA_FW_DUMP0_ADDR 0x60000
#define FA_FW_DUMP1_ADDR 0x70000
uint32_t flash_data; /* Flash/NVRAM BIOS data. */
uint32_t ctrl_status; /* Control/Status. */
@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
uint8_t unused2[65];
};
/* Flash Layout Table ********************************************************/
struct qla_flt_location {
uint8_t sig[4];
uint32_t start_lo;
uint32_t start_hi;
uint16_t unused;
uint16_t checksum;
};
struct qla_flt_header {
uint16_t version;
uint16_t length;
uint16_t checksum;
uint16_t unused;
};
#define FLT_REG_FW 0x01
#define FLT_REG_BOOT_CODE 0x07
#define FLT_REG_VPD_0 0x14
#define FLT_REG_NVRAM_0 0x15
#define FLT_REG_VPD_1 0x16
#define FLT_REG_NVRAM_1 0x17
#define FLT_REG_FDT 0x1a
#define FLT_REG_FLT 0x1c
#define FLT_REG_HW_EVENT_0 0x1d
#define FLT_REG_HW_EVENT_1 0x1f
#define FLT_REG_NPIV_CONF_0 0x29
#define FLT_REG_NPIV_CONF_1 0x2a
struct qla_flt_region {
uint32_t code;
uint32_t size;
uint32_t start;
uint32_t end;
};
/* Flash NPIV Configuration Table ********************************************/
struct qla_npiv_header {
uint8_t sig[2];
uint16_t version;
uint16_t entries;
uint16_t unused[4];
uint16_t checksum;
};
struct qla_npiv_entry {
uint16_t flags;
uint16_t vf_id;
uint16_t qos;
uint16_t unused1;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
};
/* 84XX Support **************************************************************/
#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */

Просмотреть файл

@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
uint16_t, uint16_t);
extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
*/

Просмотреть файл

@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
ha->isp_ops->reset_chip(ha);
rval = qla2xxx_get_flash_info(ha);
if (rval) {
DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
ha->host_no));
return (rval);
}
ha->isp_ops->get_flash_version(ha, ha->request_ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
rval = qla2x00_setup_chip(ha);
if (rval)
return (rval);
qla2xxx_get_flash_info(ha);
}
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(ha);
@ -2016,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
DEBUG3(printk("%s: exiting normally\n", __func__));
}
/* Restore state if a resync event occured during processing */
/* Restore state if a resync event occurred during processing */
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@ -2561,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
rval = QLA_SUCCESS;
/* Try GID_PT to get device list, else GAN. */
swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC);
swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
if (!swl) {
/*EMPTY*/
DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@ -3751,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
rval = QLA_SUCCESS;
segments = FA_RISC_CODE_SEGMENTS;
faddr = FA_RISC_CODE_ADDR;
faddr = ha->flt_region_fw;
dcode = (uint32_t *)ha->request_ring;
*srisc_addr = 0;

Просмотреть файл

@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
* @ha: HA context
* @ha_locked: is function called with the hardware lock
*
* Returns non-zero if a failure occured, else zero.
* Returns non-zero if a failure occurred, else zero.
*/
static inline int
qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)

Просмотреть файл

@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
* Returns the proper CF_* direction based on CDB.
*/
static inline uint16_t
qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
cflags = 0;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE)
if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
sp->fcport->ha->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
sp->fcport->ha->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
return (cflags);
}
@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
ha = sp->ha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
avail_dsds = 3;
@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
ha = sp->ha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
avail_dsds = 2;
@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* qla2x00_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
* Returns non-zero if a failure occured, else zero.
* Returns non-zero if a failure occurred, else zero.
*/
int
qla2x00_start_scsi(srb_t *sp)
@ -407,7 +412,7 @@ queuing_error:
*
* Can be called from both normal and interrupt context.
*
* Returns non-zero if a failure occured, else zero.
* Returns non-zero if a failure occurred, else zero.
*/
int
__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
ha = sp->ha;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE)
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
sp->fcport->ha->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
sp->fcport->ha->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* qla24xx_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
* Returns non-zero if a failure occured, else zero.
* Returns non-zero if a failure occurred, else zero.
*/
int
qla24xx_start_scsi(srb_t *sp)

Просмотреть файл

@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
mb[1]));
qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
ha->host_no, mb[1]));
qla_printk(KERN_INFO, ha,
"LIP reset occured (%x).\n", mb[1]);
"LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
case MBA_PORT_UPDATE: /* Port database update */
/*
* If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
* If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
* event etc. earlier indicating loop is down) then process
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
ha->host_no, mb[1], mb[2], mb[3]));
rscn_entry = (mb[1] << 16) | mb[2];
rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
ha->d_id.b.al_pa;
if (rscn_entry == host_pid) {
@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
}
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
rscn_queue_index = ha->rscn_in_ptr + 1;
if (rscn_queue_index == MAX_RSCN_COUNT)
rscn_queue_index = 0;
@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
resid = resid_len;
/* Use F/W calculated residual length. */
if (IS_FWI2_CAPABLE(ha)) {
if (scsi_status & SS_RESIDUAL_UNDER &&
resid != fw_resid_len) {
if (!(scsi_status & SS_RESIDUAL_UNDER)) {
lscsi_status = 0;
} else if (resid != fw_resid_len) {
scsi_status &= ~SS_RESIDUAL_UNDER;
lscsi_status = 0;
}

Просмотреть файл

@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG2_3_11(printk("%s(%ld): timeout schedule "
"isp_abort_needed.\n", __func__, ha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occured. Scheduling ISP "
"Mailbox command timeout occurred. Scheduling ISP "
"abort.\n");
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
qla2xxx_wake_dpc(ha);
@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG2_3_11(printk("%s(%ld): timeout calling "
"abort_isp\n", __func__, ha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occured. Issuing ISP "
"Mailbox command timeout occurred. Issuing ISP "
"abort.\n");
set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
char *pmap;
dma_addr_t pmap_dma;
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma);
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
__func__, ha->host_no));

Просмотреть файл

@ -1517,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
set_bit(RSCN_UPDATE, &ha->dpc_flags);
set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
}
static int
@ -1663,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_25XX;
ha->isp_ops = &qla25xx_isp_ops;
ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
}
host->can_queue = ha->request_q_length + 128;
@ -2433,6 +2432,12 @@ qla2x00_do_dpc(void *data)
ha->host_no));
}
if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
atomic_read(&ha->loop_state) == LOOP_READY) {
clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
qla2xxx_flash_npiv_conf(ha);
}
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);

Просмотреть файл

@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
}
}
void
qla2xxx_get_flash_info(scsi_qla_host_t *ha)
static int
qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
uint32_t *dcode;
uint8_t *buf, *bcode, last_image;
uint16_t cnt, chksum, *wptr;
struct qla_flt_location *fltl;
/*
* FLT-location structure resides after the last PCI region.
*/
/* Begin with sane defaults. */
loc = locations[0];
*start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
FA_FLASH_LAYOUT_ADDR;
/* Begin with first PCI expansion ROM header. */
buf = (uint8_t *)ha->request_ring;
dcode = (uint32_t *)ha->request_ring;
pcihdr = 0;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
bcode = buf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
goto end;
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
bcode = buf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
bcode[0x2] != 'I' || bcode[0x3] != 'R')
goto end;
last_image = bcode[0x15] & BIT_7;
/* Locate next PCI expansion ROM. */
pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
} while (!last_image);
/* Now verify FLT-location structure. */
fltl = (struct qla_flt_location *)ha->request_ring;
qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
sizeof(struct qla_flt_location) >> 2);
if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
goto end;
wptr = (uint16_t *)ha->request_ring;
cnt = sizeof(struct qla_flt_location) >> 1;
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
qla_printk(KERN_ERR, ha,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
return QLA_FUNCTION_FAILED;
}
/* Good data. Use specified location. */
loc = locations[1];
*start = le16_to_cpu(fltl->start_hi) << 16 |
le16_to_cpu(fltl->start_lo);
end:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
return QLA_SUCCESS;
}
static void
qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
{
const char *loc, *locations[] = { "DEF", "FLT" };
uint16_t *wptr;
uint16_t cnt, chksum;
uint32_t start;
struct qla_flt_header *flt;
struct qla_flt_region *region;
ha->flt_region_flt = flt_addr;
wptr = (uint16_t *)ha->request_ring;
flt = (struct qla_flt_header *)ha->request_ring;
region = (struct qla_flt_region *)&flt[1];
ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
flt_addr << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (flt->version != __constant_cpu_to_le16(1)) {
DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
le16_to_cpu(flt->checksum)));
goto no_flash_data;
}
cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
chksum));
goto no_flash_data;
}
loc = locations[1];
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
for ( ; cnt; cnt--, region++) {
/* Store addresses as DWORD offsets. */
start = le32_to_cpu(region->start) >> 2;
DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
"end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
switch (le32_to_cpu(region->code)) {
case FLT_REG_FW:
ha->flt_region_fw = start;
break;
case FLT_REG_BOOT_CODE:
ha->flt_region_boot = start;
break;
case FLT_REG_VPD_0:
ha->flt_region_vpd_nvram = start;
break;
case FLT_REG_FDT:
ha->flt_region_fdt = start;
break;
case FLT_REG_HW_EVENT_0:
if (!PCI_FUNC(ha->pdev->devfn))
ha->flt_region_hw_event = start;
break;
case FLT_REG_HW_EVENT_1:
if (PCI_FUNC(ha->pdev->devfn))
ha->flt_region_hw_event = start;
break;
case FLT_REG_NPIV_CONF_0:
if (!PCI_FUNC(ha->pdev->devfn))
ha->flt_region_npiv_conf = start;
break;
case FLT_REG_NPIV_CONF_1:
if (PCI_FUNC(ha->pdev->devfn))
ha->flt_region_npiv_conf = start;
break;
}
}
goto done;
no_flash_data:
/* Use hardcoded defaults. */
loc = locations[0];
ha->flt_region_fw = FA_RISC_CODE_ADDR;
ha->flt_region_boot = FA_BOOT_CODE_ADDR;
ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
FA_FLASH_DESCR_ADDR;
ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
(IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
(IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
"vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
ha->flt_region_npiv_conf));
}
static void
qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
{
#define FLASH_BLK_SIZE_32K 0x8000
#define FLASH_BLK_SIZE_64K 0x10000
const char *loc, *locations[] = { "MID", "FDT" };
uint16_t cnt, chksum;
uint16_t *wptr;
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return;
uint16_t mid, fid;
wptr = (uint16_t *)ha->request_ring;
fdt = (struct qla_fdt_layout *)ha->request_ring;
ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE);
ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
goto no_flash_data;
}
ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f;
loc = locations[1];
mid = le16_to_cpu(fdt->man_id);
fid = le16_to_cpu(fdt->id);
ha->fdt_odd_index = mid == 0x1f;
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
flash_conf_to_access_addr(0x0336);
}
DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
"pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
return;
goto done;
no_flash_data:
loc = locations[0];
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
mid = man_id;
fid = flash_id;
ha->fdt_wrt_disable = 0x9c;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
switch (man_id) {
@ -625,14 +799,117 @@ no_flash_data:
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
break;
}
DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x "
"pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id,
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
"pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
ha->fdt_block_size));
}
int
qla2xxx_get_flash_info(scsi_qla_host_t *ha)
{
int ret;
uint32_t flt_addr;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(ha, &flt_addr);
if (ret != QLA_SUCCESS)
return ret;
qla2xxx_get_flt_info(ha, flt_addr);
qla2xxx_get_fdt_info(ha);
return QLA_SUCCESS;
}
void
qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
{
#define NPIV_CONFIG_SIZE (16*1024)
void *data;
uint16_t *wptr;
uint16_t cnt, chksum;
struct qla_npiv_header hdr;
struct qla_npiv_entry *entry;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return;
ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
if (hdr.version == __constant_cpu_to_le16(0xffff))
return;
if (hdr.version != __constant_cpu_to_le16(1)) {
DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
le16_to_cpu(hdr.checksum)));
return;
}
data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
if (!data) {
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
"allocate memory.\n"));
return;
}
ha->isp_ops->read_optrom(ha, (uint8_t *)data,
ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
sizeof(struct qla_npiv_entry)) >> 1;
for (wptr = data, chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
chksum));
goto done;
}
entry = data + sizeof(struct qla_npiv_header);
cnt = le16_to_cpu(hdr.entries);
for ( ; cnt; cnt--, entry++) {
uint16_t flags;
struct fc_vport_identifiers vid;
struct fc_vport *vport;
flags = le16_to_cpu(entry->flags);
if (flags == 0xffff)
continue;
if ((flags & BIT_0) == 0)
continue;
memset(&vid, 0, sizeof(vid));
vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
vid.vport_type = FC_PORTTYPE_NPIV;
vid.disable = false;
vid.port_name = wwn_to_u64(entry->port_name);
vid.node_name = wwn_to_u64(entry->node_name);
DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
"wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name,
le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
vport = fc_vport_create(ha->host, 0, &vid);
if (!vport)
qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
"create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name);
}
done:
kfree(data);
}
static void
qla24xx_unprotect_flash(scsi_qla_host_t *ha)
{
@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
flash_data_to_access_addr(ha->flt_region_vpd_nvram |
naddr)));
return buf;
}
@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dbuf = vmalloc(RMW_BUFFER_SIZE);
if (!dbuf)
return QLA_MEMORY_ALLOC_FAILED;
ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2,
ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
memcpy(dbuf + (naddr << 2), buf, bytes);
ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2,
ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
vfree(dbuf);
@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(dbyte, 0, 8);
dcode = (uint16_t *)dbyte;
qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10,
qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
__func__, ha->host_no));
@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw revision at "
"%x.\n", __func__, FA_RISC_CODE_ADDR * 4));
"%x.\n", __func__, ha->flt_region_fw * 4));
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
dcode = mbuf;
/* Begin with first PCI expansion ROM header. */
pcihdr = 0;
pcihdr = ha->flt_region_boot;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4);
qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
__func__, FA_RISC_CODE_ADDR));
__func__, ha->flt_region_fw));
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
/* Locate first empty entry. */
for (;;) {
if (ha->hw_event_ptr >=
ha->hw_event_start + FA_HW_EVENT_SIZE) {
ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Log Full!\n"));
return QLA_MEMORY_ALLOC_FAILED;
@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
int rval;
uint32_t marker[2], fdata[4];
if (ha->hw_event_start == 0)
if (ha->flt_region_hw_event == 0)
return QLA_FUNCTION_FAILED;
DEBUG2(qla_printk(KERN_WARNING, ha,
@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
/* Locate marker. */
ha->hw_event_ptr = ha->hw_event_start;
ha->hw_event_ptr = ha->flt_region_hw_event;
for (;;) {
qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
4);
@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
break;
ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
if (ha->hw_event_ptr >=
ha->hw_event_start + FA_HW_EVENT_SIZE) {
ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Log Full!\n"));
return QLA_MEMORY_ALLOC_FAILED;

Просмотреть файл

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.02.01-k7"
#define QLA2XXX_VERSION "8.02.01-k8"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2

Просмотреть файл

@ -668,13 +668,14 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
goto out;
}
/* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
/* Check to see if the scsi lld made this device blocked. */
if (unlikely(scsi_device_blocked(cmd->device))) {
/*
* in SDEV_BLOCK, the command is just put back on the device
* queue. The suspend state has already blocked the queue so
* future requests should not occur until the device
* transitions out of the suspend state.
* in blocked state, the command is just put back on
* the device queue. The suspend state has already
* blocked the queue so future requests should not
* occur until the device transitions out of the
* suspend state.
*/
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);

Просмотреть файл

@ -1250,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
break;
case SDEV_QUIESCE:
case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
/*
* If the devices is blocked we defer normal commands.
*/
@ -2073,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (state) {
case SDEV_CREATED:
/* There are no legal states that come back to
* created. This is the manually initialised start
* state */
switch (oldstate) {
case SDEV_CREATED_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_RUNNING:
switch (oldstate) {
@ -2114,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_BLOCK:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
break;
default:
goto illegal;
}
break;
case SDEV_CREATED_BLOCK:
switch (oldstate) {
case SDEV_CREATED:
break;
default:
goto illegal;
@ -2403,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
int err = 0;
err = scsi_device_set_state(sdev, SDEV_BLOCK);
if (err) {
err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
if (err)
return err;
}
/*
* The device has transitioned to SDEV_BLOCK. Stop the
@ -2447,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
* and goose the device queue if successful.
*/
err = scsi_device_set_state(sdev, SDEV_RUNNING);
if (err) {
err = scsi_device_set_state(sdev, SDEV_CREATED);
if (err)
return err;
}
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);

Просмотреть файл

@ -21,6 +21,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/security.h>
#include <linux/delay.h>
#include <net/sock.h>
#include <net/netlink.h>
@ -30,6 +31,39 @@
struct sock *scsi_nl_sock = NULL;
EXPORT_SYMBOL_GPL(scsi_nl_sock);
static DEFINE_SPINLOCK(scsi_nl_lock);
static struct list_head scsi_nl_drivers;
static u32 scsi_nl_state;
#define STATE_EHANDLER_BSY 0x00000001
struct scsi_nl_transport {
int (*msg_handler)(struct sk_buff *);
void (*event_handler)(struct notifier_block *, unsigned long, void *);
unsigned int refcnt;
int flags;
};
/* flags values (bit flags) */
#define HANDLER_DELETING 0x1
static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
{ {NULL, }, };
struct scsi_nl_drvr {
struct list_head next;
int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid);
void (*devt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr);
struct scsi_host_template *hostt;
u64 vendor_id;
unsigned int refcnt;
int flags;
};
/**
* scsi_nl_rcv_msg - Receive message handler.
@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct scsi_nl_hdr *hdr;
uint32_t rlen;
int err;
unsigned long flags;
u32 rlen;
int err, tport;
while (skb->len >= NLMSG_SPACE(0)) {
err = 0;
@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
err = -EBADMSG;
return;
goto next_msg;
}
hdr = NLMSG_DATA(nlh);
@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
__func__);
return;
goto next_msg;
}
/*
* We currently don't support anyone sending us a message
* Deliver message to the appropriate transport
*/
spin_lock_irqsave(&scsi_nl_lock, flags);
tport = hdr->transport;
if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
!(transports[tport].flags & HANDLER_DELETING) &&
(transports[tport].msg_handler)) {
transports[tport].refcnt++;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = transports[tport].msg_handler(skb);
spin_lock_irqsave(&scsi_nl_lock, flags);
transports[tport].refcnt--;
} else
err = -ENOENT;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@ -110,14 +160,42 @@ static int
scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct scsi_nl_drvr *driver;
unsigned long flags;
int tport;
if (n->protocol != NETLINK_SCSITRANSPORT)
return NOTIFY_DONE;
spin_lock_irqsave(&scsi_nl_lock, flags);
scsi_nl_state |= STATE_EHANDLER_BSY;
/*
* Currently, we are not tracking PID's, etc. There is nothing
* to handle.
* Pass event on to any transports that may be listening
*/
for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
if (!(transports[tport].flags & HANDLER_DELETING) &&
(transports[tport].event_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
transports[tport].event_handler(this, event, ptr);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
}
/*
* Pass event on to any drivers that may be listening
*/
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (!(driver->flags & HANDLER_DELETING) &&
(driver->devt_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
driver->devt_handler(this, event, ptr);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
}
scsi_nl_state &= ~STATE_EHANDLER_BSY;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return NOTIFY_DONE;
}
@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = {
/**
* scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface
* GENERIC SCSI transport receive and event handlers
**/
/**
* scsi_generic_msg_handler - receive message handler for GENERIC transport
* messages
*
* @skb: socket receive buffer
*
**/
static int
scsi_generic_msg_handler(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
struct scsi_nl_drvr *driver;
struct Scsi_Host *shost;
unsigned long flags;
int err = 0, match, pid;
pid = NETLINK_CREDS(skb)->pid;
switch (snlh->msgtype) {
case SCSI_NL_SHOST_VENDOR:
{
struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
/* Locate the driver that corresponds to the message */
spin_lock_irqsave(&scsi_nl_lock, flags);
match = 0;
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (driver->vendor_id == msg->vendor_id) {
match = 1;
break;
}
}
if ((!match) || (!driver->dmsg_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = -ESRCH;
goto rcv_exit;
}
if (driver->flags & HANDLER_DELETING) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = -ESHUTDOWN;
goto rcv_exit;
}
driver->refcnt++;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
/* if successful, scsi_host_lookup takes a shost reference */
shost = scsi_host_lookup(msg->host_no);
if (!shost) {
err = -ENODEV;
goto driver_exit;
}
/* is this host owned by the vendor ? */
if (shost->hostt != driver->hostt) {
err = -EINVAL;
goto vendormsg_put;
}
/* pass message on to the driver */
err = driver->dmsg_handler(shost, (void *)&msg[1],
msg->vmsg_datalen, pid);
vendormsg_put:
/* release reference by scsi_host_lookup */
scsi_host_put(shost);
driver_exit:
/* release our own reference on the registration object */
spin_lock_irqsave(&scsi_nl_lock, flags);
driver->refcnt--;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
break;
}
default:
err = -EBADR;
break;
}
rcv_exit:
if (err)
printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
__func__, snlh->msgtype, err);
return err;
}
/**
* scsi_nl_add_transport -
* Registers message and event handlers for a transport. Enables
* receipt of netlink messages and events to a transport.
*
* @tport: transport registering handlers
* @msg_handler: receive message handler callback
* @event_handler: receive event handler callback
**/
int
scsi_nl_add_transport(u8 tport,
int (*msg_handler)(struct sk_buff *),
void (*event_handler)(struct notifier_block *, unsigned long, void *))
{
unsigned long flags;
int err = 0;
if (tport >= SCSI_NL_MAX_TRANSPORTS)
return -EINVAL;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
if (transports[tport].msg_handler || transports[tport].event_handler) {
err = -EALREADY;
goto register_out;
}
transports[tport].msg_handler = msg_handler;
transports[tport].event_handler = event_handler;
transports[tport].flags = 0;
transports[tport].refcnt = 0;
register_out:
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
/**
* scsi_nl_remove_transport -
* Disable transport receiption of messages and events
*
* @tport: transport deregistering handlers
*
**/
void
scsi_nl_remove_transport(u8 tport)
{
unsigned long flags;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
if (tport < SCSI_NL_MAX_TRANSPORTS) {
transports[tport].flags |= HANDLER_DELETING;
while (transports[tport].refcnt != 0) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
schedule_timeout_uninterruptible(HZ/4);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
transports[tport].msg_handler = NULL;
transports[tport].event_handler = NULL;
transports[tport].flags = 0;
}
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
/**
* scsi_nl_add_driver -
* A driver is registering its interfaces for SCSI netlink messages
*
* @vendor_id: A unique identification value for the driver.
* @hostt: address of the driver's host template. Used
* to verify an shost is bound to the driver
* @nlmsg_handler: receive message handler callback
* @nlevt_handler: receive event handler callback
*
* Returns:
* 0 on Success
* error result otherwise
**/
int
scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid),
void (*nlevt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr))
{
struct scsi_nl_drvr *driver;
unsigned long flags;
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (unlikely(!driver)) {
printk(KERN_ERR "%s: allocation failure\n", __func__);
return -ENOMEM;
}
driver->dmsg_handler = nlmsg_handler;
driver->devt_handler = nlevt_handler;
driver->hostt = hostt;
driver->vendor_id = vendor_id;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_add_tail(&driver->next, &scsi_nl_drivers);
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
/**
* scsi_nl_remove_driver -
* An driver is unregistering with the SCSI netlink messages
*
* @vendor_id: The unique identification value for the driver.
**/
void
scsi_nl_remove_driver(u64 vendor_id)
{
struct scsi_nl_drvr *driver;
unsigned long flags;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (driver->vendor_id == vendor_id) {
driver->flags |= HANDLER_DELETING;
while (driver->refcnt != 0) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
schedule_timeout_uninterruptible(HZ/4);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_del(&driver->next);
kfree(driver);
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return;
}
}
spin_unlock_irqrestore(&scsi_nl_lock, flags);
printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
__func__, (unsigned long long)vendor_id);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
/**
* scsi_netlink_init - Called by SCSI subsystem to intialize
* the SCSI transport netlink interface
*
**/
void
@ -136,6 +488,8 @@ scsi_netlink_init(void)
{
int error;
INIT_LIST_HEAD(&scsi_nl_drivers);
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
@ -150,8 +504,15 @@ scsi_netlink_init(void)
printk(KERN_ERR "%s: register of recieve handler failed\n",
__func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
return;
}
/* Register the entry points for the generic SCSI transport */
error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
scsi_generic_msg_handler, NULL);
if (error)
printk(KERN_ERR "%s: register of GENERIC transport handler"
" failed - %d\n", __func__, error);
return;
}
@ -163,6 +524,8 @@ scsi_netlink_init(void)
void
scsi_netlink_exit(void)
{
scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
if (scsi_nl_sock) {
netlink_kernel_release(scsi_nl_sock);
netlink_unregister_notifier(&scsi_netlink_notifier);
@ -172,3 +535,147 @@ scsi_netlink_exit(void)
}
/*
* Exported Interfaces
*/
/**
* scsi_nl_send_transport_msg -
* Generic function to send a single message from a SCSI transport to
* a single process
*
* @pid: receiving pid
* @hdr: message payload
*
**/
void
scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
const char *fn;
char *datab;
u32 len, skblen;
int err;
if (!scsi_nl_sock) {
err = -ENOENT;
fn = "netlink socket";
goto msg_fail;
}
len = NLMSG_SPACE(hdr->msglen);
skblen = NLMSG_SPACE(len);
skb = alloc_skb(skblen, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
fn = "alloc_skb";
goto msg_fail;
}
nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
if (!nlh) {
err = -ENOBUFS;
fn = "nlmsg_put";
goto msg_fail_skb;
}
datab = NLMSG_DATA(nlh);
memcpy(datab, hdr, hdr->msglen);
err = nlmsg_unicast(scsi_nl_sock, skb, pid);
if (err < 0) {
fn = "nlmsg_unicast";
/* nlmsg_unicast already kfree_skb'd */
goto msg_fail;
}
return;
msg_fail_skb:
kfree_skb(skb);
msg_fail:
printk(KERN_WARNING
"%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
"msglen %d: %s : err %d\n",
__func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
fn, err);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
/**
* scsi_nl_send_vendor_msg - called to send a shost vendor unique message
* to a specific process id.
*
* @pid: process id of the receiver
* @host_no: host # sending the message
* @vendor_id: unique identifier for the driver's vendor
* @data_len: amount, in bytes, of vendor unique payload data
* @data_buf: pointer to vendor unique data buffer
*
* Returns:
* 0 on succesful return
* otherwise, failing error code
*
* Notes:
* This routine assumes no locks are held on entry.
*/
int
scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
char *data_buf, u32 data_len)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct scsi_nl_host_vendor_msg *msg;
u32 len, skblen;
int err;
if (!scsi_nl_sock) {
err = -ENOENT;
goto send_vendor_fail;
}
len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
skblen = NLMSG_SPACE(len);
skb = alloc_skb(skblen, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto send_vendor_fail;
}
nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
skblen - sizeof(*nlh), 0);
if (!nlh) {
err = -ENOBUFS;
goto send_vendor_fail_skb;
}
msg = NLMSG_DATA(nlh);
INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
SCSI_NL_SHOST_VENDOR, len);
msg->vendor_id = vendor_id;
msg->host_no = host_no;
msg->vmsg_datalen = data_len; /* bytes */
memcpy(&msg[1], data_buf, data_len);
err = nlmsg_unicast(scsi_nl_sock, skb, pid);
if (err)
/* nlmsg_multicast already kfree_skb'd */
goto send_vendor_fail;
return 0;
send_vendor_fail_skb:
kfree_skb(skb);
send_vendor_fail:
printk(KERN_WARNING
"%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
__func__, host_no, err);
return err;
}
EXPORT_SYMBOL(scsi_nl_send_vendor_msg);

Просмотреть файл

@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
int error = -ENXIO;
shost = scsi_host_lookup(host);
if (IS_ERR(shost))
return PTR_ERR(shost);
if (!shost)
return error;
if (shost->transportt->user_scan)
error = shost->transportt->user_scan(shost, channel, id, lun);
@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
int error = -ENXIO;
shost = scsi_host_lookup(host);
if (IS_ERR(shost))
return PTR_ERR(shost);
if (!shost)
return error;
sdev = scsi_device_lookup(shost, channel, id, lun);
if (sdev) {
scsi_remove_device(sdev);

Просмотреть файл

@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
int *bflags, int async)
{
int ret;
/*
* XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev.
@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* set the device running here so that slave configure
* may do I/O */
scsi_device_set_state(sdev, SDEV_RUNNING);
ret = scsi_device_set_state(sdev, SDEV_RUNNING);
if (ret) {
ret = scsi_device_set_state(sdev, SDEV_BLOCK);
if (ret) {
sdev_printk(KERN_ERR, sdev,
"in wrong state %s to complete scan\n",
scsi_device_state_name(sdev->sdev_state));
return SCSI_SCAN_NO_RESPONSE;
}
}
if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
sdev->use_192_bytes_for_3f = 1;
@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
int ret = sdev->host->hostt->slave_configure(sdev);
ret = sdev->host->hostt->slave_configure(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
*/
sdev = scsi_device_lookup_by_target(starget, lun);
if (sdev) {
if (rescan || sdev->sdev_state != SDEV_CREATED) {
if (rescan || !scsi_device_created(sdev)) {
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
"scsi scan: device exists on %s\n",
sdev->sdev_gendev.bus_id));
@ -1467,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
kfree(lun_data);
out:
scsi_device_put(sdev);
if (sdev->sdev_state == SDEV_CREATED)
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/

Просмотреть файл

@ -34,6 +34,7 @@ static const struct {
{ SDEV_QUIESCE, "quiesce" },
{ SDEV_OFFLINE, "offline" },
{ SDEV_BLOCK, "blocked" },
{ SDEV_CREATED_BLOCK, "created-blocked" },
};
const char *scsi_device_state_name(enum scsi_device_state state)

Просмотреть файл

@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
/* TODO: replace with a O(1) alg */
shost = scsi_host_lookup(host_no);
if (IS_ERR(shost)) {
if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return -EINVAL;
}
@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
shost = scsi_host_lookup(host_no);
if (IS_ERR(shost)) {
if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}
@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
shost = scsi_host_lookup(host_no);
if (IS_ERR(shost)) {
if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}

Просмотреть файл

@ -40,31 +40,7 @@
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
static void fc_vport_sched_delete(struct work_struct *work);
/*
* This is a temporary carrier for creating a vport. It will eventually
* be replaced by a real message definition for sgio or netlink.
*
* fc_vport_identifiers: This set of data contains all elements
* to uniquely identify and instantiate a FC virtual port.
*
* Notes:
* symbolic_name: The driver is to append the symbolic_name string data
* to the symbolic_node_name data that it generates by default.
* the resulting combination should then be registered with the switch.
* It is expected that things like Xen may stuff a VM title into
* this field.
*/
struct fc_vport_identifiers {
u64 node_name;
u64 port_name;
u32 roles;
bool disable;
enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
};
static int fc_vport_create(struct Scsi_Host *shost, int channel,
static int fc_vport_setup(struct Scsi_Host *shost, int channel,
struct device *pdev, struct fc_vport_identifiers *ids,
struct fc_vport **vport);
@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
vid.disable = false; /* always enabled */
/* we only allow support on Channel 0 !!! */
stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
return stat ? stat : count;
}
static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work)
/**
* fc_vport_create - allocates and creates a FC virtual port.
* fc_vport_setup - allocates and creates a FC virtual port.
* @shost: scsi host the virtual port is connected to.
* @channel: Channel on shost port connected to.
* @pdev: parent device for vport
@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static int
fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@ -3231,6 +3207,28 @@ delete_vport:
return error;
}
/**
* fc_vport_create - Admin App or LLDD requests creation of a vport
* @shost: scsi host the virtual port is connected to.
* @channel: channel on shost port connected to.
* @ids: The world wide names, FC4 port roles, etc for
* the virtual port.
*
* Notes:
* This routine assumes no locks are held on entry.
*/
struct fc_vport *
fc_vport_create(struct Scsi_Host *shost, int channel,
struct fc_vport_identifiers *ids)
{
int stat;
struct fc_vport *vport;
stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
ids, &vport);
return stat ? NULL : vport;
}
EXPORT_SYMBOL(fc_vport_create);
/**
* fc_vport_terminate - Admin App or LLDD requests termination of a vport

Просмотреть файл

@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
return -EINVAL;
shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
if (IS_ERR(shost)) {
if (!shost) {
printk(KERN_ERR "target discovery could not find host no %u\n",
ev->u.tgt_dscvr.host_no);
return -ENODEV;
@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
if (IS_ERR(shost)) {
if (!shost) {
printk(KERN_ERR "set_host_param could not find host no %u\n",
ev->u.set_host_param.host_no);
return -ENODEV;

Просмотреть файл

@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#include <asm/uaccess.h>
#include <scsi/scsi.h>
@ -1433,27 +1434,21 @@ got_data:
*/
sector_size = 512;
}
{
/*
* The msdos fs needs to know the hardware sector size
* So I have created this table. See ll_rw_blk.c
* Jacques Gelinas (Jacques@solucorp.qc.ca)
*/
int hard_sector = sector_size;
sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
struct request_queue *queue = sdp->request_queue;
sector_t mb = sz;
blk_queue_hardsect_size(sdp->request_queue, sector_size);
blk_queue_hardsect_size(queue, hard_sector);
/* avoid 64-bit division on 32-bit platforms */
sector_div(sz, 625);
mb -= sz - 974;
sector_div(mb, 1950);
{
char cap_str_2[10], cap_str_10[10];
u64 sz = sdkp->capacity << ffz(~sector_size);
string_get_size(sz, STRING_UNITS_2, cap_str_2,
sizeof(cap_str_2));
string_get_size(sz, STRING_UNITS_10, cap_str_10,
sizeof(cap_str_10));
sd_printk(KERN_NOTICE, sdkp,
"%llu %d-byte hardware sectors (%llu MB)\n",
"%llu %d-byte hardware sectors: (%s/%s)\n",
(unsigned long long)sdkp->capacity,
hard_sector, (unsigned long long)mb);
sector_size, cap_str_10, cap_str_2);
}
/* Rescale capacity to 512-byte units */

Просмотреть файл

@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
static int __init dc390_module_init(void)
{
if (!disable_clustering)
printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"
"\twith \"disable_clustering=1\" and report to maintainers\n");
printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
if (tmscsim[0] == -1 || tmscsim[0] > 15) {
tmscsim[0] = 7;

Просмотреть файл

@ -0,0 +1,16 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
#include <linux/types.h>
/* Descriptions of the types of units to
* print in */
enum string_size_units {
STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
STRING_UNITS_2, /* use binary powers of 2^10 */
};
int string_get_size(u64 size, enum string_size_units units,
char *buf, int len);
#endif

Просмотреть файл

@ -42,9 +42,11 @@ enum scsi_device_state {
* originate in the mid-layer) */
SDEV_OFFLINE, /* Device offlined (by error handling or
* user request */
SDEV_BLOCK, /* Device blocked by scsi lld. No scsi
* commands from user or midlayer should be issued
* to the scsi lld. */
SDEV_BLOCK, /* Device blocked by scsi lld. No
* scsi commands from user or midlayer
* should be issued to the scsi
* lld. */
SDEV_CREATED_BLOCK, /* same as above but for created devices */
};
enum scsi_device_event {
@ -384,10 +386,23 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
#define scmd_id(scmd) sdev_id((scmd)->device)
#define scmd_channel(scmd) sdev_channel((scmd)->device)
/*
* checks for positions of the SCSI state machine
*/
static inline int scsi_device_online(struct scsi_device *sdev)
{
return sdev->sdev_state != SDEV_OFFLINE;
}
static inline int scsi_device_blocked(struct scsi_device *sdev)
{
return sdev->sdev_state == SDEV_BLOCK ||
sdev->sdev_state == SDEV_CREATED_BLOCK;
}
static inline int scsi_device_created(struct scsi_device *sdev)
{
return sdev->sdev_state == SDEV_CREATED ||
sdev->sdev_state == SDEV_CREATED_BLOCK;
}
/* accessor functions for the SCSI parameters */
static inline int scsi_device_sync(struct scsi_device *sdev)

Просмотреть файл

@ -22,6 +22,9 @@
#ifndef SCSI_NETLINK_H
#define SCSI_NETLINK_H
#include <linux/netlink.h>
/*
* This file intended to be included by both kernel and user space
*/
@ -55,7 +58,41 @@ struct scsi_nl_hdr {
#define SCSI_NL_TRANSPORT_FC 1
#define SCSI_NL_MAX_TRANSPORTS 2
/* scsi_nl_hdr->msgtype values are defined in each transport */
/* Transport-based scsi_nl_hdr->msgtype values are defined in each transport */
/*
* GENERIC SCSI scsi_nl_hdr->msgtype Values
*/
/* kernel -> user */
#define SCSI_NL_SHOST_VENDOR 0x0001
/* user -> kernel */
/* SCSI_NL_SHOST_VENDOR msgtype is kernel->user and user->kernel */
/*
* Message Structures :
*/
/* macro to round up message lengths to 8byte boundary */
#define SCSI_NL_MSGALIGN(len) (((len) + 7) & ~7)
/*
* SCSI HOST Vendor Unique messages :
* SCSI_NL_SHOST_VENDOR
*
* Note: The Vendor Unique message payload will begin directly after
* this structure, with the length of the payload per vmsg_datalen.
*
* Note: When specifying vendor_id, be sure to read the Vendor Type and ID
* formatting requirements specified below
*/
struct scsi_nl_host_vendor_msg {
struct scsi_nl_hdr snlh; /* must be 1st element ! */
uint64_t vendor_id;
uint16_t host_no;
uint16_t vmsg_datalen;
} __attribute__((aligned(sizeof(uint64_t))));
/*
@ -83,5 +120,28 @@ struct scsi_nl_hdr {
}
#ifdef __KERNEL__
#include <scsi/scsi_host.h>
/* Exported Kernel Interfaces */
int scsi_nl_add_transport(u8 tport,
int (*msg_handler)(struct sk_buff *),
void (*event_handler)(struct notifier_block *, unsigned long, void *));
void scsi_nl_remove_transport(u8 tport);
int scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid),
void (*nlevt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr));
void scsi_nl_remove_driver(u64 vendor_id);
void scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr);
int scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
char *data_buf, u32 data_len);
#endif /* __KERNEL__ */
#endif /* SCSI_NETLINK_H */

Просмотреть файл

@ -167,6 +167,26 @@ enum fc_tgtid_binding_type {
struct device_attribute dev_attr_vport_##_name = \
__ATTR(_name,_mode,_show,_store)
/*
* fc_vport_identifiers: This set of data contains all elements
* to uniquely identify and instantiate a FC virtual port.
*
* Notes:
* symbolic_name: The driver is to append the symbolic_name string data
* to the symbolic_node_name data that it generates by default.
* the resulting combination should then be registered with the switch.
* It is expected that things like Xen may stuff a VM title into
* this field.
*/
#define FC_VPORT_SYMBOLIC_NAMELEN 64
struct fc_vport_identifiers {
u64 node_name;
u64 port_name;
u32 roles;
bool disable;
enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
};
/*
* FC Virtual Port Attributes
@ -197,7 +217,6 @@ struct device_attribute dev_attr_vport_##_name = \
* managed by the transport w/o driver interaction.
*/
#define FC_VPORT_SYMBOLIC_NAMELEN 64
struct fc_vport {
/* Fixed Attributes */
@ -732,6 +751,8 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
* be sure to read the Vendor Type and ID formatting requirements
* specified in scsi_netlink.h
*/
struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
struct fc_vport_identifiers *);
int fc_vport_terminate(struct fc_vport *vport);
#endif /* SCSI_TRANSPORT_FC_H */

Просмотреть файл

@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG

64
lib/string_helpers.c Normal file
Просмотреть файл

@ -0,0 +1,64 @@
/*
* Helpers for formatting and printing strings
*
* Copyright 31 August 2008 James Bottomley
*/
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/string_helpers.h>
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted
* @units: units to use (powers of 1000 or 1024)
* @buf: buffer to format to
* @len: length of buffer
*
* This function returns a string formatted to 3 significant figures
* giving the size in the required units. Returns 0 on success or
* error on failure. @buf is always zero terminated.
*
*/
int string_get_size(u64 size, const enum string_size_units units,
char *buf, int len)
{
const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB",
"EB", "ZB", "YB", NULL};
const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
"EiB", "ZiB", "YiB", NULL };
const char **units_str[] = {
[STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
const int divisor[] = {
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
int i, j;
u64 remainder = 0, sf_cap;
char tmp[8];
tmp[0] = '\0';
for (i = 0; size > divisor[units] && units_str[units][i]; i++)
remainder = do_div(size, divisor[units]);
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
if (j) {
remainder *= 1000;
do_div(remainder, divisor[units]);
snprintf(tmp, sizeof(tmp), ".%03lld",
(unsigned long long)remainder);
tmp[j+1] = '\0';
}
snprintf(buf, len, "%lld%s%s", (unsigned long long)size,
tmp, units_str[units][i]);
return 0;
}
EXPORT_SYMBOL(string_get_size);