blk-mq: Record nr_active_requests per queue for when using shared sbitmap
The per-hctx nr_active value can no longer be used to fairly assign a share of tag depth per request queue for when using a shared sbitmap, as it does not consider that the tags are shared tags over all hctx's. For this case, record the nr_active_requests per request_queue, and make the judgement based on that value. Co-developed-with: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: John Garry <john.garry@huawei.com> Tested-by: Don Brace<don.brace@microsemi.com> #SCSI resv cmds patches used Tested-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
a0235d230f
Коммит
bccf5e26d9
|
@ -543,6 +543,8 @@ struct request_queue *blk_alloc_queue(int node_id)
|
||||||
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||||
q->node = node_id;
|
q->node = node_id;
|
||||||
|
|
||||||
|
atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
|
||||||
|
|
||||||
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
|
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
|
||||||
laptop_mode_timer_fn, 0);
|
laptop_mode_timer_fn, 0);
|
||||||
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
|
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
|
||||||
|
|
|
@ -519,7 +519,7 @@ void blk_mq_free_request(struct request *rq)
|
||||||
|
|
||||||
ctx->rq_completed[rq_is_sync(rq)]++;
|
ctx->rq_completed[rq_is_sync(rq)]++;
|
||||||
if (rq->rq_flags & RQF_MQ_INFLIGHT)
|
if (rq->rq_flags & RQF_MQ_INFLIGHT)
|
||||||
atomic_dec(&hctx->nr_active);
|
__blk_mq_dec_active_requests(hctx);
|
||||||
|
|
||||||
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
|
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
|
||||||
laptop_io_completion(q->backing_dev_info);
|
laptop_io_completion(q->backing_dev_info);
|
||||||
|
@ -1127,7 +1127,7 @@ static bool blk_mq_get_driver_tag(struct request *rq)
|
||||||
if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
|
if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
|
||||||
!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
|
!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
|
||||||
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
rq->rq_flags |= RQF_MQ_INFLIGHT;
|
||||||
atomic_inc(&hctx->nr_active);
|
__blk_mq_inc_active_requests(hctx);
|
||||||
}
|
}
|
||||||
hctx->tags->rqs[rq->tag] = rq;
|
hctx->tags->rqs[rq->tag] = rq;
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -199,6 +199,28 @@ static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
if (blk_mq_is_sbitmap_shared(hctx->flags))
|
||||||
|
atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
|
||||||
|
else
|
||||||
|
atomic_inc(&hctx->nr_active);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
if (blk_mq_is_sbitmap_shared(hctx->flags))
|
||||||
|
atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
|
||||||
|
else
|
||||||
|
atomic_dec(&hctx->nr_active);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
if (blk_mq_is_sbitmap_shared(hctx->flags))
|
||||||
|
return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
|
||||||
|
return atomic_read(&hctx->nr_active);
|
||||||
|
}
|
||||||
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
|
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
|
||||||
struct request *rq)
|
struct request *rq)
|
||||||
{
|
{
|
||||||
|
@ -207,7 +229,7 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
||||||
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
|
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
|
||||||
rq->rq_flags &= ~RQF_MQ_INFLIGHT;
|
rq->rq_flags &= ~RQF_MQ_INFLIGHT;
|
||||||
atomic_dec(&hctx->nr_active);
|
__blk_mq_dec_active_requests(hctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,7 +309,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||||
* Allow at least some tags
|
* Allow at least some tags
|
||||||
*/
|
*/
|
||||||
depth = max((bt->sb.depth + users - 1) / users, 4U);
|
depth = max((bt->sb.depth + users - 1) / users, 4U);
|
||||||
return atomic_read(&hctx->nr_active) < depth;
|
return __blk_mq_active_requests(hctx) < depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -485,6 +485,8 @@ struct request_queue {
|
||||||
struct timer_list timeout;
|
struct timer_list timeout;
|
||||||
struct work_struct timeout_work;
|
struct work_struct timeout_work;
|
||||||
|
|
||||||
|
atomic_t nr_active_requests_shared_sbitmap;
|
||||||
|
|
||||||
struct list_head icq_list;
|
struct list_head icq_list;
|
||||||
#ifdef CONFIG_BLK_CGROUP
|
#ifdef CONFIG_BLK_CGROUP
|
||||||
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче