blk-mq: remove blk_mq_put_ctx()
No code that occurs between blk_mq_get_ctx() and blk_mq_put_ctx() depends on preemption being disabled for its correctness. Since removing the CPU preemption calls does not measurably affect performance, simplify the blk-mq code by removing the blk_mq_put_ctx() function and also by not disabling preemption in blk_mq_get_ctx(). Cc: Hannes Reinecke <hare@suse.com> Cc: Omar Sandoval <osandov@fb.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
417232880c
Коммит
c05f42206f
|
@ -330,10 +330,8 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
bool ret = false;
|
||||
enum hctx_type type;
|
||||
|
||||
if (e && e->type->ops.bio_merge) {
|
||||
blk_mq_put_ctx(ctx);
|
||||
if (e && e->type->ops.bio_merge)
|
||||
return e->type->ops.bio_merge(hctx, bio, nr_segs);
|
||||
}
|
||||
|
||||
type = hctx->type;
|
||||
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
|
||||
|
@ -344,7 +342,6 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
blk_mq_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||
struct sbq_wait_state *ws;
|
||||
DEFINE_SBQ_WAIT(wait);
|
||||
unsigned int tag_offset;
|
||||
bool drop_ctx;
|
||||
int tag;
|
||||
|
||||
if (data->flags & BLK_MQ_REQ_RESERVED) {
|
||||
|
@ -136,7 +135,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||
return BLK_MQ_TAG_FAIL;
|
||||
|
||||
ws = bt_wait_ptr(bt, data->hctx);
|
||||
drop_ctx = data->ctx == NULL;
|
||||
do {
|
||||
struct sbitmap_queue *bt_prev;
|
||||
|
||||
|
@ -161,9 +159,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||
if (tag != -1)
|
||||
break;
|
||||
|
||||
if (data->ctx)
|
||||
blk_mq_put_ctx(data->ctx);
|
||||
|
||||
bt_prev = bt;
|
||||
io_schedule();
|
||||
|
||||
|
@ -189,9 +184,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||
ws = bt_wait_ptr(bt, data->hctx);
|
||||
} while (1);
|
||||
|
||||
if (drop_ctx && data->ctx)
|
||||
blk_mq_put_ctx(data->ctx);
|
||||
|
||||
sbitmap_finish_wait(bt, ws, &wait);
|
||||
|
||||
found_tag:
|
||||
|
|
|
@ -355,13 +355,13 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||
struct elevator_queue *e = q->elevator;
|
||||
struct request *rq;
|
||||
unsigned int tag;
|
||||
bool put_ctx_on_error = false;
|
||||
bool clear_ctx_on_error = false;
|
||||
|
||||
blk_queue_enter_live(q);
|
||||
data->q = q;
|
||||
if (likely(!data->ctx)) {
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
put_ctx_on_error = true;
|
||||
clear_ctx_on_error = true;
|
||||
}
|
||||
if (likely(!data->hctx))
|
||||
data->hctx = blk_mq_map_queue(q, data->cmd_flags,
|
||||
|
@ -387,10 +387,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||
|
||||
tag = blk_mq_get_tag(data);
|
||||
if (tag == BLK_MQ_TAG_FAIL) {
|
||||
if (put_ctx_on_error) {
|
||||
blk_mq_put_ctx(data->ctx);
|
||||
if (clear_ctx_on_error)
|
||||
data->ctx = NULL;
|
||||
}
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -427,8 +425,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
|||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
|
@ -1977,7 +1973,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
plug = current->plug;
|
||||
if (unlikely(is_flush_fua)) {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||
|
||||
/* bypass scheduler for flush rq */
|
||||
|
@ -1991,7 +1986,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
unsigned int request_count = plug->rq_count;
|
||||
struct request *last = NULL;
|
||||
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||
|
||||
if (!request_count)
|
||||
|
@ -2025,8 +2019,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
blk_add_rq_to_plug(plug, rq);
|
||||
trace_block_plug(q);
|
||||
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
|
||||
if (same_queue_rq) {
|
||||
data.hctx = same_queue_rq->mq_hctx;
|
||||
trace_block_unplug(q, 1, true);
|
||||
|
@ -2035,11 +2027,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
|
||||
!data.hctx->dispatch_busy)) {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
||||
} else {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||
blk_mq_sched_insert_request(rq, false, true, true);
|
||||
}
|
||||
|
|
|
@ -151,12 +151,7 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
|||
*/
|
||||
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
|
||||
{
|
||||
return __blk_mq_get_ctx(q, get_cpu());
|
||||
}
|
||||
|
||||
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
|
||||
{
|
||||
put_cpu();
|
||||
return __blk_mq_get_ctx(q, raw_smp_processor_id());
|
||||
}
|
||||
|
||||
struct blk_mq_alloc_data {
|
||||
|
|
|
@ -575,7 +575,6 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
|||
spin_lock(&kcq->lock);
|
||||
merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
|
||||
spin_unlock(&kcq->lock);
|
||||
blk_mq_put_ctx(ctx);
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче