ms_block: convert to blk-mq
Straight forward conversion, room for optimization in how everything is punted to a work queue. Also looks plenty racy all over the map, with the state changes. I fixed a bunch of them up while doing the conversion, but there are surely more. Cc: Maxim Levitsky <maximlevitsky@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
fa182a1fa9
Коммит
db1142a83b
|
@ -15,7 +15,7 @@
|
|||
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/memstick.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/hdreg.h>
|
||||
|
@ -1873,69 +1873,65 @@ static void msb_io_work(struct work_struct *work)
|
|||
struct msb_data *msb = container_of(work, struct msb_data, io_work);
|
||||
int page, error, len;
|
||||
sector_t lba;
|
||||
unsigned long flags;
|
||||
struct scatterlist *sg = msb->prealloc_sg;
|
||||
struct request *req;
|
||||
|
||||
dbg_verbose("IO: work started");
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
spin_lock_irq(&msb->q_lock);
|
||||
|
||||
if (msb->need_flush_cache) {
|
||||
msb->need_flush_cache = false;
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
msb_cache_flush(msb);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!msb->req) {
|
||||
msb->req = blk_fetch_request(msb->queue);
|
||||
if (!msb->req) {
|
||||
dbg_verbose("IO: no more requests exiting");
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
return;
|
||||
}
|
||||
req = msb->req;
|
||||
if (!req) {
|
||||
dbg_verbose("IO: no more requests exiting");
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
|
||||
/* If card was removed meanwhile */
|
||||
if (!msb->req)
|
||||
return;
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
|
||||
/* process the request */
|
||||
dbg_verbose("IO: processing new request");
|
||||
blk_rq_map_sg(msb->queue, msb->req, sg);
|
||||
blk_rq_map_sg(msb->queue, req, sg);
|
||||
|
||||
lba = blk_rq_pos(msb->req);
|
||||
lba = blk_rq_pos(req);
|
||||
|
||||
sector_div(lba, msb->page_size / 512);
|
||||
page = sector_div(lba, msb->pages_in_block);
|
||||
|
||||
if (rq_data_dir(msb->req) == READ)
|
||||
error = msb_do_read_request(msb, lba, page, sg,
|
||||
blk_rq_bytes(msb->req), &len);
|
||||
blk_rq_bytes(req), &len);
|
||||
else
|
||||
error = msb_do_write_request(msb, lba, page, sg,
|
||||
blk_rq_bytes(msb->req), &len);
|
||||
blk_rq_bytes(req), &len);
|
||||
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
|
||||
if (len)
|
||||
if (!__blk_end_request(msb->req, BLK_STS_OK, len))
|
||||
msb->req = NULL;
|
||||
if (len && !blk_update_request(req, BLK_STS_OK, len)) {
|
||||
__blk_mq_end_request(req, BLK_STS_OK);
|
||||
spin_lock_irq(&msb->q_lock);
|
||||
msb->req = NULL;
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
}
|
||||
|
||||
if (error && msb->req) {
|
||||
blk_status_t ret = errno_to_blk_status(error);
|
||||
|
||||
dbg_verbose("IO: ending one sector of the request with error");
|
||||
if (!__blk_end_request(msb->req, ret, msb->page_size))
|
||||
msb->req = NULL;
|
||||
blk_mq_end_request(req, ret);
|
||||
spin_lock_irq(&msb->q_lock);
|
||||
msb->req = NULL;
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
}
|
||||
|
||||
if (msb->req)
|
||||
dbg_verbose("IO: request still pending");
|
||||
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2002,29 +1998,40 @@ static int msb_bd_getgeo(struct block_device *bdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void msb_submit_req(struct request_queue *q)
|
||||
static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
struct memstick_dev *card = q->queuedata;
|
||||
struct memstick_dev *card = hctx->queue->queuedata;
|
||||
struct msb_data *msb = memstick_get_drvdata(card);
|
||||
struct request *req = NULL;
|
||||
struct request *req = bd->rq;
|
||||
|
||||
dbg_verbose("Submit request");
|
||||
|
||||
spin_lock_irq(&msb->q_lock);
|
||||
|
||||
if (msb->card_dead) {
|
||||
dbg("Refusing requests on removed card");
|
||||
|
||||
WARN_ON(!msb->io_queue_stopped);
|
||||
|
||||
while ((req = blk_fetch_request(q)) != NULL)
|
||||
__blk_end_request_all(req, BLK_STS_IOERR);
|
||||
return;
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
blk_mq_start_request(req);
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
if (msb->req)
|
||||
return;
|
||||
if (msb->req) {
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
}
|
||||
|
||||
blk_mq_start_request(req);
|
||||
msb->req = req;
|
||||
|
||||
if (!msb->io_queue_stopped)
|
||||
queue_work(msb->io_queue, &msb->io_work);
|
||||
|
||||
spin_unlock_irq(&msb->q_lock);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static int msb_check_card(struct memstick_dev *card)
|
||||
|
@ -2040,21 +2047,20 @@ static void msb_stop(struct memstick_dev *card)
|
|||
|
||||
dbg("Stopping all msblock IO");
|
||||
|
||||
blk_mq_stop_hw_queues(msb->queue);
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
blk_stop_queue(msb->queue);
|
||||
msb->io_queue_stopped = true;
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
|
||||
del_timer_sync(&msb->cache_flush_timer);
|
||||
flush_workqueue(msb->io_queue);
|
||||
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
if (msb->req) {
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
blk_requeue_request(msb->queue, msb->req);
|
||||
blk_mq_requeue_request(msb->req, false);
|
||||
msb->req = NULL;
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
}
|
||||
|
||||
static void msb_start(struct memstick_dev *card)
|
||||
|
@ -2077,9 +2083,7 @@ static void msb_start(struct memstick_dev *card)
|
|||
msb->need_flush_cache = true;
|
||||
msb->io_queue_stopped = false;
|
||||
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
blk_start_queue(msb->queue);
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
blk_mq_start_hw_queues(msb->queue);
|
||||
|
||||
queue_work(msb->io_queue, &msb->io_work);
|
||||
|
||||
|
@ -2092,10 +2096,15 @@ static const struct block_device_operations msb_bdops = {
|
|||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
static const struct blk_mq_ops msb_mq_ops = {
|
||||
.queue_rq = msb_queue_rq,
|
||||
};
|
||||
|
||||
/* Registers the block device */
|
||||
static int msb_init_disk(struct memstick_dev *card)
|
||||
{
|
||||
struct msb_data *msb = memstick_get_drvdata(card);
|
||||
struct blk_mq_tag_set *set = NULL;
|
||||
int rc;
|
||||
unsigned long capacity;
|
||||
|
||||
|
@ -2112,9 +2121,11 @@ static int msb_init_disk(struct memstick_dev *card)
|
|||
goto out_release_id;
|
||||
}
|
||||
|
||||
msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
|
||||
if (!msb->queue) {
|
||||
rc = -ENOMEM;
|
||||
msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(msb->queue)) {
|
||||
rc = PTR_ERR(msb->queue);
|
||||
msb->queue = NULL;
|
||||
goto out_put_disk;
|
||||
}
|
||||
|
||||
|
@ -2202,12 +2213,13 @@ static void msb_remove(struct memstick_dev *card)
|
|||
/* Take care of unhandled + new requests from now on */
|
||||
spin_lock_irqsave(&msb->q_lock, flags);
|
||||
msb->card_dead = true;
|
||||
blk_start_queue(msb->queue);
|
||||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
blk_mq_start_hw_queues(msb->queue);
|
||||
|
||||
/* Remove the disk */
|
||||
del_gendisk(msb->disk);
|
||||
blk_cleanup_queue(msb->queue);
|
||||
blk_mq_free_tag_set(&msb->tag_set);
|
||||
msb->queue = NULL;
|
||||
|
||||
mutex_lock(&msb_disk_lock);
|
||||
|
|
|
@ -152,6 +152,7 @@ struct msb_data {
|
|||
struct gendisk *disk;
|
||||
struct request_queue *queue;
|
||||
spinlock_t q_lock;
|
||||
struct blk_mq_tag_set tag_set;
|
||||
struct hd_geometry geometry;
|
||||
struct attribute_group attr_group;
|
||||
struct request *req;
|
||||
|
|
Загрузка…
Ссылка в новой задаче