s390/dasd: simplify locking in dasd_times_out

Provide __dasd_cancel_req that is called with the ccw device lock
held to simplify the locking in dasd_times_out. Also this removes
the following sparse warning:
context imbalance in 'dasd_times_out' - different lock contexts for basic block

Note: with this change dasd_schedule_device_bh is now called (via
dasd_cancel_req) with the ccw device lock held. But is is already
the case for other codepaths.

Signed-off-by: Sebastian Ott <sebott@linux.ibm.com>
Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Sebastian Ott 2018-05-16 11:25:21 +02:00 коммит произвёл Martin Schwidefsky
Родитель a166c368e7
Коммит 1bcdb5354a
1 изменённых файлов: 17 добавлений и 14 удалений

Просмотреть файл

@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
* Cancellation of a request is an asynchronous operation! The calling * Cancellation of a request is an asynchronous operation! The calling
* function has to wait until the request is properly returned via callback. * function has to wait until the request is properly returned via callback.
*/ */
int dasd_cancel_req(struct dasd_ccw_req *cqr) static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
{ {
struct dasd_device *device = cqr->startdev; struct dasd_device *device = cqr->startdev;
unsigned long flags; int rc = 0;
int rc;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
switch (cqr->status) { switch (cqr->status) {
case DASD_CQR_QUEUED: case DASD_CQR_QUEUED:
/* request was not started - just set to cleared */ /* request was not started - just set to cleared */
@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
default: /* already finished or clear pending - do nothing */ default: /* already finished or clear pending - do nothing */
break; break;
} }
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
return rc; return rc;
} }
EXPORT_SYMBOL(dasd_cancel_req);
int dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
int rc;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
rc = __dasd_cancel_req(cqr);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
/* /*
* SECTION: Operations of the dasd_block layer. * SECTION: Operations of the dasd_block layer.
@ -3082,12 +3089,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
cqr->retries = -1; cqr->retries = -1;
cqr->intrc = -ETIMEDOUT; cqr->intrc = -ETIMEDOUT;
if (cqr->status >= DASD_CQR_QUEUED) { if (cqr->status >= DASD_CQR_QUEUED) {
spin_unlock(get_ccwdev_lock(device->cdev)); rc = __dasd_cancel_req(cqr);
rc = dasd_cancel_req(cqr);
} else if (cqr->status == DASD_CQR_FILLED || } else if (cqr->status == DASD_CQR_FILLED ||
cqr->status == DASD_CQR_NEED_ERP) { cqr->status == DASD_CQR_NEED_ERP) {
cqr->status = DASD_CQR_TERMINATED; cqr->status = DASD_CQR_TERMINATED;
spin_unlock(get_ccwdev_lock(device->cdev));
} else if (cqr->status == DASD_CQR_IN_ERP) { } else if (cqr->status == DASD_CQR_IN_ERP) {
struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
@ -3102,9 +3107,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
searchcqr->retries = -1; searchcqr->retries = -1;
searchcqr->intrc = -ETIMEDOUT; searchcqr->intrc = -ETIMEDOUT;
if (searchcqr->status >= DASD_CQR_QUEUED) { if (searchcqr->status >= DASD_CQR_QUEUED) {
spin_unlock(get_ccwdev_lock(device->cdev)); rc = __dasd_cancel_req(searchcqr);
rc = dasd_cancel_req(searchcqr);
spin_lock(get_ccwdev_lock(device->cdev));
} else if ((searchcqr->status == DASD_CQR_FILLED) || } else if ((searchcqr->status == DASD_CQR_FILLED) ||
(searchcqr->status == DASD_CQR_NEED_ERP)) { (searchcqr->status == DASD_CQR_NEED_ERP)) {
searchcqr->status = DASD_CQR_TERMINATED; searchcqr->status = DASD_CQR_TERMINATED;
@ -3118,8 +3121,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
} }
break; break;
} }
spin_unlock(get_ccwdev_lock(device->cdev));
} }
spin_unlock(get_ccwdev_lock(device->cdev));
dasd_schedule_block_bh(block); dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock); spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&cqr->dq->lock, flags); spin_unlock_irqrestore(&cqr->dq->lock, flags);