s390/scm_blk: suspend writes
Stop writing to scm after certain error conditions such as a concurrent firmware upgrade. Resume to normal state once scm_blk_set_available is called (due to an scm availability notification). Reviewed-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Родитель
93481c9020
Коммит
4fa3c01964
|
@ -34,6 +34,8 @@ struct arsb {
|
|||
u32 reserved[4];
|
||||
} __packed;
|
||||
|
||||
#define EQC_WR_PROHIBIT 22
|
||||
|
||||
struct msb {
|
||||
u8 fmt:4;
|
||||
u8 oc:4;
|
||||
|
|
|
@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = {
|
|||
.release = scm_release,
|
||||
};
|
||||
|
||||
static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
|
||||
{
|
||||
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
|
||||
}
|
||||
|
||||
static void scm_request_prepare(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
|
@ -222,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq)
|
|||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
continue;
|
||||
|
||||
if (!scm_permit_request(bdev, req)) {
|
||||
scm_ensure_queue_restart(bdev);
|
||||
return;
|
||||
}
|
||||
scmrq = scm_request_fetch();
|
||||
if (!scmrq) {
|
||||
SCM_LOG(5, "no request");
|
||||
|
@ -285,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
|
|||
tasklet_hi_schedule(&bdev->tasklet);
|
||||
}
|
||||
|
||||
static void scm_blk_handle_error(struct scm_request *scmrq)
|
||||
{
|
||||
struct scm_blk_dev *bdev = scmrq->bdev;
|
||||
unsigned long flags;
|
||||
|
||||
if (scmrq->error != -EIO)
|
||||
goto restart;
|
||||
|
||||
/* For -EIO the response block is valid. */
|
||||
switch (scmrq->aob->response.eqc) {
|
||||
case EQC_WR_PROHIBIT:
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
if (bdev->state != SCM_WR_PROHIBIT)
|
||||
pr_info("%lu: Write access to the SCM increment is suspended\n",
|
||||
(unsigned long) bdev->scmdev->address);
|
||||
bdev->state = SCM_WR_PROHIBIT;
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
goto requeue;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
restart:
|
||||
if (!scm_start_aob(scmrq->aob))
|
||||
return;
|
||||
|
||||
requeue:
|
||||
spin_lock_irqsave(&bdev->rq_lock, flags);
|
||||
scm_request_requeue(scmrq);
|
||||
spin_unlock_irqrestore(&bdev->rq_lock, flags);
|
||||
}
|
||||
|
||||
static void scm_blk_tasklet(struct scm_blk_dev *bdev)
|
||||
{
|
||||
struct scm_request *scmrq;
|
||||
|
@ -298,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
|
|||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
|
||||
if (scmrq->error && scmrq->retries-- > 0) {
|
||||
if (scm_start_aob(scmrq->aob)) {
|
||||
spin_lock_irqsave(&bdev->rq_lock, flags);
|
||||
scm_request_requeue(scmrq);
|
||||
spin_unlock_irqrestore(&bdev->rq_lock, flags);
|
||||
}
|
||||
scm_blk_handle_error(scmrq);
|
||||
|
||||
/* Request restarted or requeued, handle next. */
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
continue;
|
||||
|
@ -336,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
|||
}
|
||||
|
||||
bdev->scmdev = scmdev;
|
||||
bdev->state = SCM_OPER;
|
||||
spin_lock_init(&bdev->rq_lock);
|
||||
spin_lock_init(&bdev->lock);
|
||||
INIT_LIST_HEAD(&bdev->finished_requests);
|
||||
|
@ -400,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
|
|||
put_disk(bdev->gendisk);
|
||||
}
|
||||
|
||||
void scm_blk_set_available(struct scm_blk_dev *bdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bdev->lock, flags);
|
||||
if (bdev->state == SCM_WR_PROHIBIT)
|
||||
pr_info("%lu: Write access to the SCM increment is restored\n",
|
||||
(unsigned long) bdev->scmdev->address);
|
||||
bdev->state = SCM_OPER;
|
||||
spin_unlock_irqrestore(&bdev->lock, flags);
|
||||
}
|
||||
|
||||
static int __init scm_blk_init(void)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
|
|
@ -21,6 +21,7 @@ struct scm_blk_dev {
|
|||
spinlock_t rq_lock; /* guard the request queue */
|
||||
spinlock_t lock; /* guard the rest of the blockdev */
|
||||
atomic_t queued_reqs;
|
||||
enum {SCM_OPER, SCM_WR_PROHIBIT} state;
|
||||
struct list_head finished_requests;
|
||||
#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
|
||||
struct list_head cluster_list;
|
||||
|
@ -48,6 +49,7 @@ struct scm_request {
|
|||
|
||||
int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
|
||||
void scm_blk_dev_cleanup(struct scm_blk_dev *);
|
||||
void scm_blk_set_available(struct scm_blk_dev *);
|
||||
void scm_blk_irq(struct scm_device *, void *, int);
|
||||
|
||||
void scm_request_finish(struct scm_request *);
|
||||
|
|
Загрузка…
Ссылка в новой задаче