blkcg: add blkcg_{init|drain|exit}_queue()

Currently block core calls directly into blk-throttle for init, drain
and exit.  This patch adds blkcg_{init|drain|exit}_queue() which wraps
the blk-throttle functions.  This is to give more control and
visiblity to blkcg core layer for proper layering.  Further patches
will add logic common to blkcg policies to the functions.

While at it, collapse blk_throtl_release() into blk_throtl_exit().
There's no reason to keep them separate.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Tejun Heo 2012-03-05 13:15:12 -08:00 коммит произвёл Jens Axboe
Родитель 7ee9c56205
Коммит 5efd611351
6 изменённых файлов: 55 добавлений и 10 удалений

Просмотреть файл

@ -20,6 +20,7 @@
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/delay.h> #include <linux/delay.h>
#include "blk-cgroup.h" #include "blk-cgroup.h"
#include "blk.h"
#define MAX_KEY_LEN 100 #define MAX_KEY_LEN 100
@ -1459,6 +1460,47 @@ done:
return &blkcg->css; return &blkcg->css;
} }
/**
* blkcg_init_queue - initialize blkcg part of request queue
* @q: request_queue to initialize
*
* Called from blk_alloc_queue_node(). Responsible for initializing blkcg
* part of new request_queue @q.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int blkcg_init_queue(struct request_queue *q)
{
might_sleep();
return blk_throtl_init(q);
}
/**
* blkcg_drain_queue - drain blkcg part of request_queue
* @q: request_queue to drain
*
* Called from blk_drain_queue(). Responsible for draining blkcg part.
*/
void blkcg_drain_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
blk_throtl_drain(q);
}
/**
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
* Called from blk_release_queue(). Responsible for exiting blkcg part.
*/
void blkcg_exit_queue(struct request_queue *q)
{
blk_throtl_exit(q);
}
/* /*
* We cannot support shared io contexts, as we have no mean to support * We cannot support shared io contexts, as we have no mean to support
* two tasks with the same ioc in two different groups without major rework * two tasks with the same ioc in two different groups without major rework

Просмотреть файл

@ -215,6 +215,10 @@ struct blkio_policy_type {
enum blkio_policy_id plid; enum blkio_policy_id plid;
}; };
extern int blkcg_init_queue(struct request_queue *q);
extern void blkcg_drain_queue(struct request_queue *q);
extern void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */ /* Blkio controller policy registration */
extern void blkio_policy_register(struct blkio_policy_type *); extern void blkio_policy_register(struct blkio_policy_type *);
extern void blkio_policy_unregister(struct blkio_policy_type *); extern void blkio_policy_unregister(struct blkio_policy_type *);
@ -233,6 +237,9 @@ struct blkio_group {
struct blkio_policy_type { struct blkio_policy_type {
}; };
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
static inline void blkg_destroy_all(struct request_queue *q) { } static inline void blkg_destroy_all(struct request_queue *q) { }

Просмотреть файл

@ -34,6 +34,7 @@
#include <trace/events/block.h> #include <trace/events/block.h>
#include "blk.h" #include "blk.h"
#include "blk-cgroup.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@ -280,7 +281,7 @@ EXPORT_SYMBOL(blk_stop_queue);
* *
* This function does not cancel any asynchronous activity arising * This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevaotor_exit() * out of elevator or throttling code. That would require elevaotor_exit()
* and blk_throtl_exit() to be called with queue lock initialized. * and blkcg_exit_queue() to be called with queue lock initialized.
* *
*/ */
void blk_sync_queue(struct request_queue *q) void blk_sync_queue(struct request_queue *q)
@ -372,7 +373,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->elevator) if (q->elevator)
elv_drain_elevator(q); elv_drain_elevator(q);
blk_throtl_drain(q); blkcg_drain_queue(q);
/* /*
* This function might be called on a queue which failed * This function might be called on a queue which failed
@ -562,7 +563,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
*/ */
q->queue_lock = &q->__queue_lock; q->queue_lock = &q->__queue_lock;
if (blk_throtl_init(q)) if (blkcg_init_queue(q))
goto fail_id; goto fail_id;
return q; return q;

Просмотреть файл

@ -9,6 +9,7 @@
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include "blk.h" #include "blk.h"
#include "blk-cgroup.h"
struct queue_sysfs_entry { struct queue_sysfs_entry {
struct attribute attr; struct attribute attr;
@ -486,7 +487,7 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator); elevator_exit(q->elevator);
} }
blk_throtl_exit(q); blkcg_exit_queue(q);
if (rl->rq_pool) if (rl->rq_pool)
mempool_destroy(rl->rq_pool); mempool_destroy(rl->rq_pool);
@ -494,7 +495,6 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
blk_throtl_release(q);
blk_trace_shutdown(q); blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);

Просмотреть файл

@ -1226,10 +1226,7 @@ void blk_throtl_exit(struct request_queue *q)
* it. * it.
*/ */
throtl_shutdown_wq(q); throtl_shutdown_wq(q);
}
void blk_throtl_release(struct request_queue *q)
{
kfree(q->td); kfree(q->td);
} }

Просмотреть файл

@ -236,7 +236,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q); extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q); extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */ #else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{ {
@ -245,7 +244,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
static inline void blk_throtl_drain(struct request_queue *q) { } static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { } static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */ #endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */ #endif /* BLK_INTERNAL_H */