s390/dasd: Implement block timeout handling

This patch implements generic block layer timeout handling
callbacks for DASDs. When the timeout expires the respective
cqr is aborted.

With this timeout handler time-critical request abort
is guaranteed as the abort does not depend on the internal
state of the various DASD driver queues.

Signed-off-by: Hannes Reinecke <hare@suse.de>
Acked-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Hannes Reinecke 2013-01-30 09:26:14 +00:00 коммит произвёл Martin Schwidefsky
Родитель 1fbdb8be9b
Коммит a2ace46632
4 изменённых файлов: 88 добавлений и 2 удалений

Просмотреть файл

@ -2573,8 +2573,10 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/ */
cqr->callback_data = (void *) req; cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
req->completion_data = cqr;
blk_start_request(req); blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue); list_add_tail(&cqr->blocklist, &block->ccw_queue);
INIT_LIST_HEAD(&cqr->devlist);
dasd_profile_start(block, cqr, req); dasd_profile_start(block, cqr, req);
} }
} }
@ -2861,6 +2863,80 @@ static void do_dasd_request(struct request_queue *queue)
spin_unlock(&block->queue_lock); spin_unlock(&block->queue_lock);
} }
/*
* Block timeout callback, called from the block layer
*
* request_queue lock is held on entry.
*
* Return values:
* BLK_EH_RESET_TIMER if the request should be left running
* BLK_EH_NOT_HANDLED if the request is handled or terminated
* by the driver.
*/
enum blk_eh_timer_return dasd_times_out(struct request *req)
{
struct dasd_ccw_req *cqr = req->completion_data;
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
int rc = 0;
if (!cqr)
return BLK_EH_NOT_HANDLED;
device = cqr->startdev ? cqr->startdev : block->base;
DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_times_out cqr %p status %x",
cqr, cqr->status);
spin_lock(&block->queue_lock);
spin_lock(get_ccwdev_lock(device->cdev));
cqr->retries = -1;
cqr->intrc = -ETIMEDOUT;
if (cqr->status >= DASD_CQR_QUEUED) {
spin_unlock(get_ccwdev_lock(device->cdev));
rc = dasd_cancel_req(cqr);
} else if (cqr->status == DASD_CQR_FILLED ||
cqr->status == DASD_CQR_NEED_ERP) {
cqr->status = DASD_CQR_TERMINATED;
spin_unlock(get_ccwdev_lock(device->cdev));
} else if (cqr->status == DASD_CQR_IN_ERP) {
struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
list_for_each_entry_safe(searchcqr, nextcqr,
&block->ccw_queue, blocklist) {
tmpcqr = searchcqr;
while (tmpcqr->refers)
tmpcqr = tmpcqr->refers;
if (tmpcqr != cqr)
continue;
/* searchcqr is an ERP request for cqr */
searchcqr->retries = -1;
searchcqr->intrc = -ETIMEDOUT;
if (searchcqr->status >= DASD_CQR_QUEUED) {
spin_unlock(get_ccwdev_lock(device->cdev));
rc = dasd_cancel_req(searchcqr);
spin_lock(get_ccwdev_lock(device->cdev));
} else if ((searchcqr->status == DASD_CQR_FILLED) ||
(searchcqr->status == DASD_CQR_NEED_ERP)) {
searchcqr->status = DASD_CQR_TERMINATED;
rc = 0;
} else if (searchcqr->status == DASD_CQR_IN_ERP) {
/*
* Shouldn't happen; most recent ERP
* request is at the front of queue
*/
continue;
}
break;
}
spin_unlock(get_ccwdev_lock(device->cdev));
}
dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
}
/* /*
* Allocate and initialize request queue and default I/O scheduler. * Allocate and initialize request queue and default I/O scheduler.
*/ */

Просмотреть файл

@ -583,7 +583,10 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr) static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
{ {
cqr->status = DASD_CQR_FILLED; if (cqr->retries < 0)
cqr->status = DASD_CQR_FAILED;
else
cqr->status = DASD_CQR_FILLED;
}; };
/* Fill in IOCTL data for device. */ /* Fill in IOCTL data for device. */

Просмотреть файл

@ -2381,6 +2381,10 @@ sleep:
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
{ {
if (cqr->retries < 0) {
cqr->status = DASD_CQR_FAILED;
return;
}
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
if (cqr->block && (cqr->startdev != cqr->block->base)) { if (cqr->block && (cqr->startdev != cqr->block->base)) {
dasd_eckd_reset_ccw_to_base_io(cqr); dasd_eckd_reset_ccw_to_base_io(cqr);

Просмотреть файл

@ -428,7 +428,10 @@ out:
static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr) static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
{ {
cqr->status = DASD_CQR_FILLED; if (cqr->retries < 0)
cqr->status = DASD_CQR_FAILED;
else
cqr->status = DASD_CQR_FILLED;
}; };
static int static int