[S390] dasd: do path verification for paths added at runtime
When a new path is added at runtime, the CIO layer will call the drivers path_event callback. The DASD device driver uses this callback to trigger a path verification for the new path. The driver will use only those paths for I/O, which have been successfully verified. Signed-off-by: Stefan Weinhuber <wein@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
ef19298b40
Коммит
a4d26c6aec
|
@ -913,6 +913,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
cqr->startclk = get_clock();
|
||||
cqr->starttime = jiffies;
|
||||
cqr->retries--;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
|
||||
cqr->lpm &= device->path_data.opm;
|
||||
if (!cqr->lpm)
|
||||
cqr->lpm = device->path_data.opm;
|
||||
}
|
||||
if (cqr->cpmode == 1) {
|
||||
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
|
||||
(long) cqr, cqr->lpm);
|
||||
|
@ -925,35 +930,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
|||
cqr->status = DASD_CQR_IN_IO;
|
||||
break;
|
||||
case -EBUSY:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: device busy, retry later");
|
||||
break;
|
||||
case -ETIMEDOUT:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: request timeout, retry later");
|
||||
break;
|
||||
case -EACCES:
|
||||
/* -EACCES indicates that the request used only a
|
||||
* subset of the available pathes and all these
|
||||
* pathes are gone.
|
||||
* Do a retry with all available pathes.
|
||||
/* -EACCES indicates that the request used only a subset of the
|
||||
* available paths and all these paths are gone. If the lpm of
|
||||
* this request was only a subset of the opm (e.g. the ppm) then
|
||||
* we just do a retry with all available paths.
|
||||
* If we already use the full opm, something is amiss, and we
|
||||
* need a full path verification.
|
||||
*/
|
||||
cqr->lpm = LPM_ANYPATH;
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
"start_IO: selected pathes gone,"
|
||||
" retry on all pathes");
|
||||
if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device,
|
||||
"start_IO: selected paths gone (%x)",
|
||||
cqr->lpm);
|
||||
} else if (cqr->lpm != device->path_data.opm) {
|
||||
cqr->lpm = device->path_data.opm;
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
"start_IO: selected paths gone,"
|
||||
" retry on all paths");
|
||||
} else {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: all paths in opm gone,"
|
||||
" do path verification");
|
||||
dasd_generic_last_path_gone(device);
|
||||
device->path_data.opm = 0;
|
||||
device->path_data.ppm = 0;
|
||||
device->path_data.npm = 0;
|
||||
device->path_data.tbvpm =
|
||||
ccw_device_get_path_mask(device->cdev);
|
||||
}
|
||||
break;
|
||||
case -ENODEV:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: -ENODEV device gone, retry");
|
||||
break;
|
||||
case -EIO:
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: -EIO device gone, retry");
|
||||
break;
|
||||
case -EINVAL:
|
||||
/* most likely caused in power management context */
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"start_IO: -EINVAL device currently "
|
||||
"not accessible");
|
||||
break;
|
||||
|
@ -1175,12 +1198,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
*/
|
||||
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
|
||||
cqr->retries > 0) {
|
||||
if (cqr->lpm == LPM_ANYPATH)
|
||||
if (cqr->lpm == device->path_data.opm)
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"default ERP in fastpath "
|
||||
"(%i retries left)",
|
||||
cqr->retries);
|
||||
cqr->lpm = LPM_ANYPATH;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
next = cqr;
|
||||
} else
|
||||
|
@ -1364,8 +1388,14 @@ static void __dasd_device_start_head(struct dasd_device *device)
|
|||
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
|
||||
if (cqr->status != DASD_CQR_QUEUED)
|
||||
return;
|
||||
/* when device is stopped, return request to previous layer */
|
||||
if (device->stopped) {
|
||||
/* when device is stopped, return request to previous layer
|
||||
* exception: only the disconnect or unresumed bits are set and the
|
||||
* cqr is a path verification request
|
||||
*/
|
||||
if (device->stopped &&
|
||||
!(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
|
||||
&& test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
|
||||
cqr->intrc = -EAGAIN;
|
||||
cqr->status = DASD_CQR_CLEARED;
|
||||
dasd_schedule_device_bh(device);
|
||||
return;
|
||||
|
@ -1381,6 +1411,23 @@ static void __dasd_device_start_head(struct dasd_device *device)
|
|||
dasd_device_set_timer(device, 50);
|
||||
}
|
||||
|
||||
static void __dasd_device_check_path_events(struct dasd_device *device)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (device->path_data.tbvpm) {
|
||||
if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
|
||||
DASD_UNRESUMED_PM))
|
||||
return;
|
||||
rc = device->discipline->verify_path(
|
||||
device, device->path_data.tbvpm);
|
||||
if (rc)
|
||||
dasd_device_set_timer(device, 50);
|
||||
else
|
||||
device->path_data.tbvpm = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Go through all request on the dasd_device request queue,
|
||||
* terminate them on the cdev if necessary, and return them to the
|
||||
|
@ -1455,6 +1502,7 @@ static void dasd_device_tasklet(struct dasd_device *device)
|
|||
__dasd_device_check_expire(device);
|
||||
/* find final requests on ccw queue */
|
||||
__dasd_device_process_ccw_queue(device, &final_queue);
|
||||
__dasd_device_check_path_events(device);
|
||||
spin_unlock_irq(get_ccwdev_lock(device->cdev));
|
||||
/* Now call the callback function of requests with final status */
|
||||
__dasd_device_process_final_queue(device, &final_queue);
|
||||
|
@ -2586,10 +2634,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dasd_generic_last_path_gone(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
|
||||
dev_warn(&device->cdev->dev, "No operational channel path is left "
|
||||
"for the device\n");
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
|
||||
/* First of all call extended error reporting. */
|
||||
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
||||
|
||||
if (device->state < DASD_STATE_BASIC)
|
||||
return 0;
|
||||
/* Device is active. We want to keep it. */
|
||||
list_for_each_entry(cqr, &device->ccw_queue, devlist)
|
||||
if ((cqr->status == DASD_CQR_IN_IO) ||
|
||||
(cqr->status == DASD_CQR_CLEAR_PENDING)) {
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
cqr->retries++;
|
||||
}
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
dasd_device_clear_timer(device);
|
||||
dasd_schedule_device_bh(device);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
|
||||
|
||||
int dasd_generic_path_operational(struct dasd_device *device)
|
||||
{
|
||||
dev_info(&device->cdev->dev, "A channel path to the device has become "
|
||||
"operational\n");
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
if (device->stopped & DASD_UNRESUMED_PM) {
|
||||
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
|
||||
dasd_restore_device(device);
|
||||
return 1;
|
||||
}
|
||||
dasd_schedule_device_bh(device);
|
||||
if (device->block)
|
||||
dasd_schedule_block_bh(device->block);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
|
||||
|
||||
int dasd_generic_notify(struct ccw_device *cdev, int event)
|
||||
{
|
||||
struct dasd_device *device;
|
||||
struct dasd_ccw_req *cqr;
|
||||
int ret;
|
||||
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
|
@ -2600,41 +2691,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
|
|||
case CIO_GONE:
|
||||
case CIO_BOXED:
|
||||
case CIO_NO_PATH:
|
||||
/* First of all call extended error reporting. */
|
||||
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
|
||||
|
||||
if (device->state < DASD_STATE_BASIC)
|
||||
break;
|
||||
/* Device is active. We want to keep it. */
|
||||
list_for_each_entry(cqr, &device->ccw_queue, devlist)
|
||||
if (cqr->status == DASD_CQR_IN_IO) {
|
||||
cqr->status = DASD_CQR_QUEUED;
|
||||
cqr->retries++;
|
||||
}
|
||||
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
dasd_device_clear_timer(device);
|
||||
dasd_schedule_device_bh(device);
|
||||
ret = 1;
|
||||
device->path_data.opm = 0;
|
||||
device->path_data.ppm = 0;
|
||||
device->path_data.npm = 0;
|
||||
ret = dasd_generic_last_path_gone(device);
|
||||
break;
|
||||
case CIO_OPER:
|
||||
/* FIXME: add a sanity check. */
|
||||
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
|
||||
if (device->stopped & DASD_UNRESUMED_PM) {
|
||||
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
|
||||
dasd_restore_device(device);
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
dasd_schedule_device_bh(device);
|
||||
if (device->block)
|
||||
dasd_schedule_block_bh(device->block);
|
||||
ret = 1;
|
||||
if (device->path_data.opm)
|
||||
ret = dasd_generic_path_operational(device);
|
||||
break;
|
||||
}
|
||||
dasd_put_device(device);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
|
||||
{
|
||||
int chp;
|
||||
__u8 oldopm, eventlpm;
|
||||
struct dasd_device *device;
|
||||
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
if (IS_ERR(device))
|
||||
return;
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
eventlpm = 0x80 >> chp;
|
||||
if (path_event[chp] & PE_PATH_GONE) {
|
||||
oldopm = device->path_data.opm;
|
||||
device->path_data.opm &= ~eventlpm;
|
||||
device->path_data.ppm &= ~eventlpm;
|
||||
device->path_data.npm &= ~eventlpm;
|
||||
if (oldopm && !device->path_data.opm)
|
||||
dasd_generic_last_path_gone(device);
|
||||
}
|
||||
if (path_event[chp] & PE_PATH_AVAILABLE) {
|
||||
device->path_data.opm &= ~eventlpm;
|
||||
device->path_data.ppm &= ~eventlpm;
|
||||
device->path_data.npm &= ~eventlpm;
|
||||
device->path_data.tbvpm |= eventlpm;
|
||||
dasd_schedule_device_bh(device);
|
||||
}
|
||||
}
|
||||
dasd_put_device(device);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
|
||||
|
||||
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
if (!device->path_data.opm && lpm) {
|
||||
device->path_data.opm = lpm;
|
||||
dasd_generic_path_operational(device);
|
||||
} else
|
||||
device->path_data.opm |= lpm;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
|
||||
|
||||
|
||||
int dasd_generic_pm_freeze(struct ccw_device *cdev)
|
||||
{
|
||||
struct dasd_ccw_req *cqr, *n;
|
||||
|
|
|
@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
|
|||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
opm = ccw_device_get_path_mask(device->cdev);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
//FIXME: start with get_opm ?
|
||||
if (erp->lpm == 0)
|
||||
erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
|
||||
erp->lpm = device->path_data.opm &
|
||||
~(erp->irb.esw.esw0.sublog.lpum);
|
||||
else
|
||||
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
|
||||
|
||||
|
@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
|
|||
{
|
||||
erp->function = dasd_3990_erp_action_1;
|
||||
dasd_3990_erp_alternate_path(erp);
|
||||
if (erp->status == DASD_CQR_FAILED) {
|
||||
if (erp->status == DASD_CQR_FAILED &&
|
||||
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
|
||||
erp->status = DASD_CQR_FILLED;
|
||||
erp->retries = 10;
|
||||
erp->lpm = LPM_ANYPATH;
|
||||
erp->lpm = erp->startdev->path_data.opm;
|
||||
erp->function = dasd_3990_erp_action_1_sec;
|
||||
}
|
||||
return erp;
|
||||
|
@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
|
|||
static void
|
||||
dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
|
||||
{
|
||||
|
||||
if (sense[25] & DASD_SENSE_BIT_3) {
|
||||
dasd_3990_erp_alternate_path(erp);
|
||||
|
||||
if (erp->status == DASD_CQR_FAILED) {
|
||||
if (erp->status == DASD_CQR_FAILED &&
|
||||
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
|
||||
/* reset the lpm and the status to be able to
|
||||
* try further actions. */
|
||||
|
||||
erp->lpm = 0;
|
||||
erp->lpm = erp->startdev->path_data.opm;
|
||||
erp->status = DASD_CQR_NEED_ERP;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -639,6 +639,7 @@ dasd_put_device_wake(struct dasd_device *device)
|
|||
{
|
||||
wake_up(&dasd_delete_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dasd_put_device_wake);
|
||||
|
||||
/*
|
||||
* Return dasd_device structure associated with cdev.
|
||||
|
|
|
@ -619,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
|
|||
.ebcname = "DIAG",
|
||||
.max_blocks = DIAG_MAX_BLOCKS,
|
||||
.check_device = dasd_diag_check_device,
|
||||
.verify_path = dasd_generic_verify_path,
|
||||
.fill_geometry = dasd_diag_fill_geometry,
|
||||
.start_IO = dasd_start_diag,
|
||||
.term_IO = dasd_diag_term_IO,
|
||||
|
|
|
@ -90,6 +90,18 @@ static struct {
|
|||
} *dasd_reserve_req;
|
||||
static DEFINE_MUTEX(dasd_reserve_mutex);
|
||||
|
||||
/* definitions for the path verification worker */
|
||||
struct path_verification_work_data {
|
||||
struct work_struct worker;
|
||||
struct dasd_device *device;
|
||||
struct dasd_ccw_req cqr;
|
||||
struct ccw1 ccw;
|
||||
__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
|
||||
int isglobal;
|
||||
__u8 tbvpm;
|
||||
};
|
||||
static struct path_verification_work_data *path_verification_worker;
|
||||
static DEFINE_MUTEX(dasd_path_verification_mutex);
|
||||
|
||||
/* initial attempt at a probe function. this can be simplified once
|
||||
* the other detection code is gone */
|
||||
|
@ -755,26 +767,27 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
|
||||
void *rcd_buffer,
|
||||
struct ciw *ciw, __u8 lpm)
|
||||
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
|
||||
struct dasd_ccw_req *cqr,
|
||||
__u8 *rcd_buffer,
|
||||
__u8 lpm)
|
||||
{
|
||||
struct dasd_ccw_req *cqr;
|
||||
struct ccw1 *ccw;
|
||||
|
||||
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
|
||||
device);
|
||||
|
||||
if (IS_ERR(cqr)) {
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"Could not allocate RCD request");
|
||||
return cqr;
|
||||
}
|
||||
/*
|
||||
* buffer has to start with EBCDIC "V1.0" to show
|
||||
* support for virtual device SNEQ
|
||||
*/
|
||||
rcd_buffer[0] = 0xE5;
|
||||
rcd_buffer[1] = 0xF1;
|
||||
rcd_buffer[2] = 0x4B;
|
||||
rcd_buffer[3] = 0xF0;
|
||||
|
||||
ccw = cqr->cpaddr;
|
||||
ccw->cmd_code = ciw->cmd;
|
||||
ccw->cmd_code = DASD_ECKD_CCW_RCD;
|
||||
ccw->flags = 0;
|
||||
ccw->cda = (__u32)(addr_t)rcd_buffer;
|
||||
ccw->count = ciw->count;
|
||||
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
|
||||
cqr->magic = DASD_ECKD_MAGIC;
|
||||
|
||||
cqr->startdev = device;
|
||||
cqr->memdev = device;
|
||||
|
@ -784,7 +797,29 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
|
|||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
|
||||
}
|
||||
|
||||
static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
|
||||
struct dasd_ccw_req *cqr,
|
||||
__u8 *rcd_buffer,
|
||||
__u8 lpm)
|
||||
{
|
||||
struct ciw *ciw;
|
||||
int rc;
|
||||
/*
|
||||
* sanity check: scan for RCD command in extended SenseID data
|
||||
* some devices do not support RCD
|
||||
*/
|
||||
ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
|
||||
if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
|
||||
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
|
||||
cqr->retries = 5;
|
||||
rc = dasd_sleep_on_immediatly(cqr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
|
||||
|
@ -797,32 +832,29 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
|
|||
struct dasd_ccw_req *cqr;
|
||||
|
||||
/*
|
||||
* scan for RCD command in extended SenseID data
|
||||
* sanity check: scan for RCD command in extended SenseID data
|
||||
* some devices do not support RCD
|
||||
*/
|
||||
ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
|
||||
if (!ciw || ciw->cmd == 0) {
|
||||
if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_error;
|
||||
}
|
||||
rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
|
||||
rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
|
||||
if (!rcd_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
/*
|
||||
* buffer has to start with EBCDIC "V1.0" to show
|
||||
* support for virtual device SNEQ
|
||||
*/
|
||||
rcd_buf[0] = 0xE5;
|
||||
rcd_buf[1] = 0xF1;
|
||||
rcd_buf[2] = 0x4B;
|
||||
rcd_buf[3] = 0xF0;
|
||||
cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
|
||||
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
|
||||
0, /* use rcd_buf as data ara */
|
||||
device);
|
||||
if (IS_ERR(cqr)) {
|
||||
ret = PTR_ERR(cqr);
|
||||
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
|
||||
"Could not allocate RCD request");
|
||||
ret = -ENOMEM;
|
||||
goto out_error;
|
||||
}
|
||||
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
|
||||
ret = dasd_sleep_on(cqr);
|
||||
/*
|
||||
* on success we update the user input parms
|
||||
|
@ -831,7 +863,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
|
|||
if (ret)
|
||||
goto out_error;
|
||||
|
||||
*rcd_buffer_size = ciw->count;
|
||||
*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
|
||||
*rcd_buffer = rcd_buf;
|
||||
return 0;
|
||||
out_error:
|
||||
|
@ -901,18 +933,18 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
void *conf_data;
|
||||
int conf_len, conf_data_saved;
|
||||
int rc;
|
||||
__u8 lpm;
|
||||
__u8 lpm, opm;
|
||||
struct dasd_eckd_private *private;
|
||||
struct dasd_eckd_path *path_data;
|
||||
struct dasd_path *path_data;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
path_data = (struct dasd_eckd_path *) &private->path_data;
|
||||
path_data->opm = ccw_device_get_path_mask(device->cdev);
|
||||
path_data = &device->path_data;
|
||||
opm = ccw_device_get_path_mask(device->cdev);
|
||||
lpm = 0x80;
|
||||
conf_data_saved = 0;
|
||||
/* get configuration data per operational path */
|
||||
for (lpm = 0x80; lpm; lpm>>= 1) {
|
||||
if (lpm & path_data->opm){
|
||||
if (lpm & opm) {
|
||||
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
|
||||
&conf_len, lpm);
|
||||
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
|
||||
|
@ -925,6 +957,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
|
||||
"No configuration data "
|
||||
"retrieved");
|
||||
/* no further analysis possible */
|
||||
path_data->opm |= lpm;
|
||||
continue; /* no error */
|
||||
}
|
||||
/* save first valid configuration data */
|
||||
|
@ -948,6 +982,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
path_data->ppm |= lpm;
|
||||
break;
|
||||
}
|
||||
path_data->opm |= lpm;
|
||||
if (conf_data != private->conf_data)
|
||||
kfree(conf_data);
|
||||
}
|
||||
|
@ -955,6 +990,140 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
struct dasd_eckd_private *private;
|
||||
int mdc;
|
||||
u32 fcx_max_data;
|
||||
|
||||
private = (struct dasd_eckd_private *) device->private;
|
||||
if (private->fcx_max_data) {
|
||||
mdc = ccw_device_get_mdc(device->cdev, lpm);
|
||||
if ((mdc < 0)) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"Detecting the maximum data size for zHPF "
|
||||
"requests failed (rc=%d) for a new path %x\n",
|
||||
mdc, lpm);
|
||||
return mdc;
|
||||
}
|
||||
fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
|
||||
if (fcx_max_data < private->fcx_max_data) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"The maximum data size for zHPF requests %u "
|
||||
"on a new path %x is below the active maximum "
|
||||
"%u\n", fcx_max_data, lpm,
|
||||
private->fcx_max_data);
|
||||
return -EACCES;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_path_verification_work(struct work_struct *work)
|
||||
{
|
||||
struct path_verification_work_data *data;
|
||||
struct dasd_device *device;
|
||||
__u8 lpm, opm, npm, ppm, epm;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
data = container_of(work, struct path_verification_work_data, worker);
|
||||
device = data->device;
|
||||
|
||||
opm = 0;
|
||||
npm = 0;
|
||||
ppm = 0;
|
||||
epm = 0;
|
||||
for (lpm = 0x80; lpm; lpm >>= 1) {
|
||||
if (lpm & data->tbvpm) {
|
||||
memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
|
||||
memset(&data->cqr, 0, sizeof(data->cqr));
|
||||
data->cqr.cpaddr = &data->ccw;
|
||||
rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
|
||||
data->rcd_buffer,
|
||||
lpm);
|
||||
if (!rc) {
|
||||
switch (dasd_eckd_path_access(data->rcd_buffer,
|
||||
DASD_ECKD_RCD_DATA_SIZE)) {
|
||||
case 0x02:
|
||||
npm |= lpm;
|
||||
break;
|
||||
case 0x03:
|
||||
ppm |= lpm;
|
||||
break;
|
||||
}
|
||||
opm |= lpm;
|
||||
} else if (rc == -EOPNOTSUPP) {
|
||||
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
|
||||
"path verification: No configuration "
|
||||
"data retrieved");
|
||||
opm |= lpm;
|
||||
} else if (rc == -EAGAIN) {
|
||||
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
|
||||
"path verification: device is stopped,"
|
||||
" try again later");
|
||||
epm |= lpm;
|
||||
} else {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"Reading device feature codes failed "
|
||||
"(rc=%d) for new path %x\n", rc, lpm);
|
||||
continue;
|
||||
}
|
||||
if (verify_fcx_max_data(device, lpm)) {
|
||||
opm &= ~lpm;
|
||||
npm &= ~lpm;
|
||||
ppm &= ~lpm;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* There is a small chance that a path is lost again between
|
||||
* above path verification and the following modification of
|
||||
* the device opm mask. We could avoid that race here by using
|
||||
* yet another path mask, but we rather deal with this unlikely
|
||||
* situation in dasd_start_IO.
|
||||
*/
|
||||
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
|
||||
if (!device->path_data.opm && opm) {
|
||||
device->path_data.opm = opm;
|
||||
dasd_generic_path_operational(device);
|
||||
} else
|
||||
device->path_data.opm |= opm;
|
||||
device->path_data.npm |= npm;
|
||||
device->path_data.ppm |= ppm;
|
||||
device->path_data.tbvpm |= epm;
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
|
||||
|
||||
dasd_put_device(device);
|
||||
if (data->isglobal)
|
||||
mutex_unlock(&dasd_path_verification_mutex);
|
||||
else
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
struct path_verification_work_data *data;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
|
||||
if (!data) {
|
||||
if (mutex_trylock(&dasd_path_verification_mutex)) {
|
||||
data = path_verification_worker;
|
||||
data->isglobal = 1;
|
||||
} else
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
memset(data, 0, sizeof(*data));
|
||||
data->isglobal = 0;
|
||||
}
|
||||
INIT_WORK(&data->worker, do_path_verification_work);
|
||||
dasd_get_device(device);
|
||||
data->device = device;
|
||||
data->tbvpm = lpm;
|
||||
schedule_work(&data->worker);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dasd_eckd_read_features(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_psf_prssd_data *prssdp;
|
||||
|
@ -1749,6 +1918,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
|
|||
if (cqr->block && (cqr->startdev != cqr->block->base)) {
|
||||
dasd_eckd_reset_ccw_to_base_io(cqr);
|
||||
cqr->startdev = cqr->block->base;
|
||||
cqr->lpm = cqr->block->base->path_data.opm;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -2017,7 +2187,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = private->path_data.ppm;
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -2194,7 +2364,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = private->path_data.ppm;
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -2484,7 +2654,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
|
|||
cqr->memdev = startdev;
|
||||
cqr->block = block;
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = private->path_data.ppm;
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
@ -3624,6 +3794,7 @@ static struct ccw_driver dasd_eckd_driver = {
|
|||
.set_offline = dasd_generic_set_offline,
|
||||
.set_online = dasd_eckd_set_online,
|
||||
.notify = dasd_generic_notify,
|
||||
.path_event = dasd_generic_path_event,
|
||||
.freeze = dasd_generic_pm_freeze,
|
||||
.thaw = dasd_generic_restore_device,
|
||||
.restore = dasd_generic_restore_device,
|
||||
|
@ -3651,6 +3822,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
|
|||
.check_device = dasd_eckd_check_characteristics,
|
||||
.uncheck_device = dasd_eckd_uncheck_device,
|
||||
.do_analysis = dasd_eckd_do_analysis,
|
||||
.verify_path = dasd_eckd_verify_path,
|
||||
.ready_to_online = dasd_eckd_ready_to_online,
|
||||
.online_to_ready = dasd_eckd_online_to_ready,
|
||||
.fill_geometry = dasd_eckd_fill_geometry,
|
||||
|
@ -3683,11 +3855,19 @@ dasd_eckd_init(void)
|
|||
GFP_KERNEL | GFP_DMA);
|
||||
if (!dasd_reserve_req)
|
||||
return -ENOMEM;
|
||||
path_verification_worker = kmalloc(sizeof(*path_verification_worker),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!path_verification_worker) {
|
||||
kfree(dasd_reserve_req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = ccw_driver_register(&dasd_eckd_driver);
|
||||
if (!ret)
|
||||
wait_for_device_probe();
|
||||
else
|
||||
else {
|
||||
kfree(path_verification_worker);
|
||||
kfree(dasd_reserve_req);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3695,6 +3875,7 @@ static void __exit
|
|||
dasd_eckd_cleanup(void)
|
||||
{
|
||||
ccw_driver_unregister(&dasd_eckd_driver);
|
||||
kfree(path_verification_worker);
|
||||
kfree(dasd_reserve_req);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#define DASD_ECKD_CCW_PFX 0xE7
|
||||
#define DASD_ECKD_CCW_PFX_READ 0xEA
|
||||
#define DASD_ECKD_CCW_RSCK 0xF9
|
||||
#define DASD_ECKD_CCW_RCD 0xFA
|
||||
|
||||
/*
|
||||
* Perform Subsystem Function / Sub-Orders
|
||||
|
@ -59,6 +60,7 @@
|
|||
|
||||
|
||||
#define FCX_MAX_DATA_FACTOR 65536
|
||||
#define DASD_ECKD_RCD_DATA_SIZE 256
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -335,12 +337,6 @@ struct dasd_gneq {
|
|||
__u8 reserved2[22];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct dasd_eckd_path {
|
||||
__u8 opm;
|
||||
__u8 ppm;
|
||||
__u8 npm;
|
||||
};
|
||||
|
||||
struct dasd_rssd_features {
|
||||
char feature[256];
|
||||
} __attribute__((packed));
|
||||
|
@ -446,7 +442,6 @@ struct dasd_eckd_private {
|
|||
struct vd_sneq *vdsneq;
|
||||
struct dasd_gneq *gneq;
|
||||
|
||||
struct dasd_eckd_path path_data;
|
||||
struct eckd_count count_area[5];
|
||||
int init_cqr_status;
|
||||
int uses_cdl;
|
||||
|
|
|
@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
|
|||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"default ERP called (%i retries left)",
|
||||
cqr->retries);
|
||||
cqr->lpm = LPM_ANYPATH;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
|
||||
cqr->lpm = device->path_data.opm;
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
} else {
|
||||
pr_err("%s: default ERP has run out of retries and failed\n",
|
||||
|
|
|
@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = {
|
|||
.set_offline = dasd_generic_set_offline,
|
||||
.set_online = dasd_fba_set_online,
|
||||
.notify = dasd_generic_notify,
|
||||
.path_event = dasd_generic_path_event,
|
||||
.freeze = dasd_generic_pm_freeze,
|
||||
.thaw = dasd_generic_restore_device,
|
||||
.restore = dasd_generic_restore_device,
|
||||
|
@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
|
|||
}
|
||||
|
||||
device->default_expires = DASD_EXPIRES;
|
||||
device->path_data.opm = LPM_ANYPATH;
|
||||
|
||||
readonly = dasd_device_is_ro(device);
|
||||
if (readonly)
|
||||
|
@ -596,6 +598,7 @@ static struct dasd_discipline dasd_fba_discipline = {
|
|||
.max_blocks = 96,
|
||||
.check_device = dasd_fba_check_characteristics,
|
||||
.do_analysis = dasd_fba_do_analysis,
|
||||
.verify_path = dasd_generic_verify_path,
|
||||
.fill_geometry = dasd_fba_fill_geometry,
|
||||
.start_IO = dasd_start_IO,
|
||||
.term_IO = dasd_term_IO,
|
||||
|
|
|
@ -231,6 +231,7 @@ struct dasd_ccw_req {
|
|||
/* per dasd_ccw_req flags */
|
||||
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
|
||||
#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
|
||||
#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
|
||||
|
||||
/* Signature for error recovery functions. */
|
||||
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
|
||||
|
@ -286,6 +287,14 @@ struct dasd_discipline {
|
|||
*/
|
||||
int (*do_analysis) (struct dasd_block *);
|
||||
|
||||
/*
|
||||
* This function is called, when new paths become available.
|
||||
* Disciplins may use this callback to do necessary setup work,
|
||||
* e.g. verify that new path is compatible with the current
|
||||
* configuration.
|
||||
*/
|
||||
int (*verify_path)(struct dasd_device *, __u8);
|
||||
|
||||
/*
|
||||
* Last things to do when a device is set online, and first things
|
||||
* when it is set offline.
|
||||
|
@ -362,6 +371,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
|
|||
#define DASD_EER_STATECHANGE 3
|
||||
#define DASD_EER_PPRCSUSPEND 4
|
||||
|
||||
struct dasd_path {
|
||||
__u8 opm;
|
||||
__u8 tbvpm;
|
||||
__u8 ppm;
|
||||
__u8 npm;
|
||||
};
|
||||
|
||||
struct dasd_device {
|
||||
/* Block device stuff. */
|
||||
struct dasd_block *block;
|
||||
|
@ -377,6 +393,7 @@ struct dasd_device {
|
|||
struct dasd_discipline *discipline;
|
||||
struct dasd_discipline *base_discipline;
|
||||
char *private;
|
||||
struct dasd_path path_data;
|
||||
|
||||
/* Device state and target state. */
|
||||
int state, target;
|
||||
|
@ -620,10 +637,15 @@ void dasd_generic_remove (struct ccw_device *cdev);
|
|||
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
|
||||
int dasd_generic_set_offline (struct ccw_device *cdev);
|
||||
int dasd_generic_notify(struct ccw_device *, int);
|
||||
int dasd_generic_last_path_gone(struct dasd_device *);
|
||||
int dasd_generic_path_operational(struct dasd_device *);
|
||||
|
||||
void dasd_generic_handle_state_change(struct dasd_device *);
|
||||
int dasd_generic_pm_freeze(struct ccw_device *);
|
||||
int dasd_generic_restore_device(struct ccw_device *);
|
||||
enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
|
||||
void dasd_generic_path_event(struct ccw_device *, int *);
|
||||
int dasd_generic_verify_path(struct dasd_device *, __u8);
|
||||
|
||||
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
|
||||
char *dasd_get_sense(struct irb *);
|
||||
|
|
Загрузка…
Ссылка в новой задаче