blk-mq: avoid code duplication

blk_mq_exit_hw_queues() and blk_mq_free_hw_queues()
are introduced to avoid code duplication.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Ming Lei 2014-05-27 23:35:13 +08:00 коммит произвёл Jens Axboe
Родитель 1f9f07e917
Коммит 624dbe4754
1 изменённых файлов: 37 добавлений и 24 удалений

Просмотреть файл

@ -1523,11 +1523,43 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
return NOTIFY_OK; return NOTIFY_OK;
} }
static void blk_mq_exit_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set, int nr_queue)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, i);
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map);
}
}
static void blk_mq_free_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i) {
free_cpumask_var(hctx->cpumask);
set->ops->free_hctx(hctx, i);
}
}
static int blk_mq_init_hw_queues(struct request_queue *q, static int blk_mq_init_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set) struct blk_mq_tag_set *set)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
unsigned int i, j; unsigned int i;
/* /*
* Initialize hardware queues * Initialize hardware queues
@ -1579,17 +1611,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
/* /*
* Init failed * Init failed
*/ */
queue_for_each_hw_ctx(q, hctx, j) { blk_mq_exit_hw_queues(q, set, i);
if (i == j)
break;
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, j);
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map);
}
return 1; return 1;
} }
@ -1838,21 +1860,12 @@ EXPORT_SYMBOL(blk_mq_init_queue);
void blk_mq_free_queue(struct request_queue *q) void blk_mq_free_queue(struct request_queue *q)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_tag_set *set = q->tag_set;
int i;
blk_mq_del_queue_tag_set(q); blk_mq_del_queue_tag_set(q);
queue_for_each_hw_ctx(q, hctx, i) { blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
blk_mq_tag_idle(hctx); blk_mq_free_hw_queues(q, set);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map);
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
if (q->mq_ops->exit_hctx)
q->mq_ops->exit_hctx(hctx, i);
free_cpumask_var(hctx->cpumask);
q->mq_ops->free_hctx(hctx, i);
}
free_percpu(q->queue_ctx); free_percpu(q->queue_ctx);
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);