blk-throttle: implement sq_to_tg(), sq_to_td() and throtl_log()

Now that both throtl_data and throtl_grp embed throtl_service_queue,
we can unify throtl_log() and throtl_log_tg().

* sq_to_tg() is added.  This returns the throtl_grp a service_queue is
  embedded in.  If the service_queue is the top-level one embedded in
  throtl_data, NULL is returned.

* sq_to_td() is added.  A service_queue is always associated with a
  throtl_data.  This function finds the associated td and returns it.

* throtl_log() is updated to take throtl_service_queue instead of
  throtl_data.  If the service_queue is one embedded in throtl_grp, it
  prints the same header as throtl_log_tg() did.  If it's one embedded
  in throtl_data, it behaves the same as before.  This renders
  throtl_log_tg() unnecessary.  Removed.

This change is necessary for hierarchy support as we're gonna be using
the same code paths to dispatch bios to intermediate service_queues
embedded in throtl_grps and the top-level service_queue embedded in
throtl_data.

This patch doesn't make any behavior changes.

v2: throtl_log() didn't print a space after blkg path.  Updated so
    that it prints a space after throtl_grp path.  Spotted by Vivek.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
This commit is contained in:
Tejun Heo 2013-05-14 13:52:36 -07:00
Родитель 77216b0484
Коммит fda6f272c7
1 изменённых файлов: 82 добавлений и 30 удалений

Просмотреть файл

@ -151,16 +151,65 @@ static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
return blkg_to_tg(td->queue->root_blkg); return blkg_to_tg(td->queue->root_blkg);
} }
#define throtl_log_tg(tg, fmt, args...) do { \ /**
* sq_to_tg - return the throl_grp the specified service queue belongs to
* @sq: the throtl_service_queue of interest
*
* Return the throtl_grp @sq belongs to. If @sq is the top-level one
* embedded in throtl_data, %NULL is returned.
*/
static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
{
if (sq && sq->parent_sq)
return container_of(sq, struct throtl_grp, service_queue);
else
return NULL;
}
/**
* sq_to_td - return throtl_data the specified service queue belongs to
* @sq: the throtl_service_queue of interest
*
* A service_queue can be embeded in either a throtl_grp or throtl_data.
* Determine the associated throtl_data accordingly and return it.
*/
static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
{
struct throtl_grp *tg = sq_to_tg(sq);
if (tg)
return tg->td;
else
return container_of(sq, struct throtl_data, service_queue);
}
/**
* throtl_log - log debug message via blktrace
* @sq: the service_queue being reported
* @fmt: printf format string
* @args: printf args
*
* The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
* throtl_grp; otherwise, just "throtl".
*
* TODO: this should be made a function and name formatting should happen
* after testing whether blktrace is enabled.
*/
#define throtl_log(sq, fmt, args...) do { \
struct throtl_grp *__tg = sq_to_tg((sq)); \
struct throtl_data *__td = sq_to_td((sq)); \
\
(void)__td; \
if ((__tg)) { \
char __pbuf[128]; \ char __pbuf[128]; \
\ \
blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \ blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \ blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
} else { \
blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
} \
} while (0) } while (0)
#define throtl_log(td, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
/* /*
* Worker for allocating per cpu stat for tgs. This is scheduled on the * Worker for allocating per cpu stat for tgs. This is scheduled on the
* system_wq once there are some groups on the alloc_list waiting for * system_wq once there are some groups on the alloc_list waiting for
@ -402,9 +451,10 @@ static void throtl_schedule_delayed_work(struct throtl_data *td,
unsigned long delay) unsigned long delay)
{ {
struct delayed_work *dwork = &td->dispatch_work; struct delayed_work *dwork = &td->dispatch_work;
struct throtl_service_queue *sq = &td->service_queue;
mod_delayed_work(kthrotld_workqueue, dwork, delay); mod_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); throtl_log(sq, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
} }
static void throtl_schedule_next_dispatch(struct throtl_data *td) static void throtl_schedule_next_dispatch(struct throtl_data *td)
@ -429,7 +479,8 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
tg->io_disp[rw] = 0; tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies; tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + throtl_slice; tg->slice_end[rw] = jiffies + throtl_slice;
throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu", throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw], rw == READ ? 'R' : 'W', tg->slice_start[rw],
tg->slice_end[rw], jiffies); tg->slice_end[rw], jiffies);
} }
@ -444,7 +495,8 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
unsigned long jiffy_end) unsigned long jiffy_end)
{ {
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu", throtl_log(&tg->service_queue,
"[%c] extend slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw], rw == READ ? 'R' : 'W', tg->slice_start[rw],
tg->slice_end[rw], jiffies); tg->slice_end[rw], jiffies);
} }
@ -511,8 +563,8 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
tg->slice_start[rw] += nr_slices * throtl_slice; tg->slice_start[rw] += nr_slices * throtl_slice;
throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu" throtl_log(&tg->service_queue,
" start=%lu end=%lu jiffies=%lu", "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
tg->slice_start[rw], tg->slice_end[rw], jiffies); tg->slice_start[rw], tg->slice_end[rw], jiffies);
} }
@ -852,7 +904,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
bio_list_init(&bio_list_on_stack); bio_list_init(&bio_list_on_stack);
throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
td->nr_queued[READ] + td->nr_queued[WRITE], td->nr_queued[READ] + td->nr_queued[WRITE],
td->nr_queued[READ], td->nr_queued[WRITE]); td->nr_queued[READ], td->nr_queued[WRITE]);
@ -863,7 +915,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]); bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]);
bio_list_init(&sq->bio_lists[rw]); bio_list_init(&sq->bio_lists[rw]);
} }
throtl_log(td, "bios disp=%u", nr_disp); throtl_log(sq, "bios disp=%u", nr_disp);
} }
throtl_schedule_next_dispatch(td); throtl_schedule_next_dispatch(td);
@ -972,7 +1024,8 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
else else
*(unsigned int *)((void *)tg + cft->private) = ctx.v; *(unsigned int *)((void *)tg + cft->private) = ctx.v;
throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", throtl_log(&tg->service_queue,
"limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
tg->bps[READ], tg->bps[WRITE], tg->bps[READ], tg->bps[WRITE],
tg->iops[READ], tg->iops[WRITE]); tg->iops[READ], tg->iops[WRITE]);
@ -1131,8 +1184,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
} }
queue_bio: queue_bio:
throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu" throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
" iodisp=%u iops=%u queued=%d/%d",
rw == READ ? 'R' : 'W', rw == READ ? 'R' : 'W',
tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
tg->io_disp[rw], tg->iops[rw], tg->io_disp[rw], tg->iops[rw],