blk-rq-qos: store a gendisk instead of request_queue in struct rq_qos
This is what about half of the users already want, and it's only going to grow more. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Andreas Herrmann <aherrmann@suse.de> Acked-by: Tejun Heo <tj@kernel.org> Link: https://lore.kernel.org/r/20230203150400.3199230-16-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
3963d84df7
Коммит
ba91c849fa
|
@ -669,7 +669,7 @@ static struct ioc *q_to_ioc(struct request_queue *q)
|
|||
|
||||
static const char __maybe_unused *ioc_name(struct ioc *ioc)
|
||||
{
|
||||
struct gendisk *disk = ioc->rqos.q->disk;
|
||||
struct gendisk *disk = ioc->rqos.disk;
|
||||
|
||||
if (!disk)
|
||||
return "<unknown>";
|
||||
|
@ -808,11 +808,11 @@ static int ioc_autop_idx(struct ioc *ioc)
|
|||
u64 now_ns;
|
||||
|
||||
/* rotational? */
|
||||
if (!blk_queue_nonrot(ioc->rqos.q))
|
||||
if (!blk_queue_nonrot(ioc->rqos.disk->queue))
|
||||
return AUTOP_HDD;
|
||||
|
||||
/* handle SATA SSDs w/ broken NCQ */
|
||||
if (blk_queue_depth(ioc->rqos.q) == 1)
|
||||
if (blk_queue_depth(ioc->rqos.disk->queue) == 1)
|
||||
return AUTOP_SSD_QD1;
|
||||
|
||||
/* use one of the normal ssd sets */
|
||||
|
@ -2649,7 +2649,7 @@ retry_lock:
|
|||
if (use_debt) {
|
||||
iocg_incur_debt(iocg, abs_cost, &now);
|
||||
if (iocg_kick_delay(iocg, &now))
|
||||
blkcg_schedule_throttle(rqos->q->disk,
|
||||
blkcg_schedule_throttle(rqos->disk,
|
||||
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
||||
iocg_unlock(iocg, ioc_locked, &flags);
|
||||
return;
|
||||
|
@ -2750,7 +2750,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
|
|||
if (likely(!list_empty(&iocg->active_list))) {
|
||||
iocg_incur_debt(iocg, abs_cost, &now);
|
||||
if (iocg_kick_delay(iocg, &now))
|
||||
blkcg_schedule_throttle(rqos->q->disk,
|
||||
blkcg_schedule_throttle(rqos->disk,
|
||||
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
||||
} else {
|
||||
iocg_commit_bio(iocg, bio, abs_cost, cost);
|
||||
|
@ -2821,7 +2821,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
|
|||
{
|
||||
struct ioc *ioc = rqos_to_ioc(rqos);
|
||||
|
||||
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
|
||||
blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iocost);
|
||||
|
||||
spin_lock_irq(&ioc->lock);
|
||||
ioc->running = IOC_STOP;
|
||||
|
|
|
@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
|
|||
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
|
||||
|
||||
if (use_delay)
|
||||
blkcg_schedule_throttle(rqos->q->disk, use_memdelay);
|
||||
blkcg_schedule_throttle(rqos->disk, use_memdelay);
|
||||
|
||||
/*
|
||||
* To avoid priority inversions we want to just take a slot if we are
|
||||
|
@ -330,7 +330,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
|
|||
struct child_latency_info *lat_info,
|
||||
bool up)
|
||||
{
|
||||
unsigned long qd = blkiolat->rqos.q->nr_requests;
|
||||
unsigned long qd = blkiolat->rqos.disk->queue->nr_requests;
|
||||
unsigned long scale = scale_amount(qd, up);
|
||||
unsigned long old = atomic_read(&lat_info->scale_cookie);
|
||||
unsigned long max_scale = qd << 1;
|
||||
|
@ -372,7 +372,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
|
|||
*/
|
||||
static void scale_change(struct iolatency_grp *iolat, bool up)
|
||||
{
|
||||
unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
|
||||
unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests;
|
||||
unsigned long scale = scale_amount(qd, up);
|
||||
unsigned long old = iolat->max_depth;
|
||||
|
||||
|
@ -646,7 +646,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
|
|||
|
||||
timer_shutdown_sync(&blkiolat->timer);
|
||||
flush_work(&blkiolat->enable_work);
|
||||
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
|
||||
blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iolatency);
|
||||
kfree(blkiolat);
|
||||
}
|
||||
|
||||
|
@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
|
|||
|
||||
rcu_read_lock();
|
||||
blkg_for_each_descendant_pre(blkg, pos_css,
|
||||
blkiolat->rqos.q->root_blkg) {
|
||||
blkiolat->rqos.disk->queue->root_blkg) {
|
||||
struct iolatency_grp *iolat;
|
||||
struct child_latency_info *lat_info;
|
||||
unsigned long flags;
|
||||
|
@ -749,9 +749,9 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
|
|||
*/
|
||||
enabled = atomic_read(&blkiolat->enable_cnt);
|
||||
if (enabled != blkiolat->enabled) {
|
||||
blk_mq_freeze_queue(blkiolat->rqos.q);
|
||||
blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
|
||||
blkiolat->enabled = enabled;
|
||||
blk_mq_unfreeze_queue(blkiolat->rqos.q);
|
||||
blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -813,9 +813,9 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
|
|||
|
||||
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
||||
{
|
||||
lockdep_assert_held(&rqos->q->debugfs_mutex);
|
||||
lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
|
||||
|
||||
if (!rqos->q->debugfs_dir)
|
||||
if (!rqos->disk->queue->debugfs_dir)
|
||||
return;
|
||||
debugfs_remove_recursive(rqos->debugfs_dir);
|
||||
rqos->debugfs_dir = NULL;
|
||||
|
@ -823,7 +823,7 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
|||
|
||||
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
||||
{
|
||||
struct request_queue *q = rqos->q;
|
||||
struct request_queue *q = rqos->disk->queue;
|
||||
const char *dir_name = rq_qos_id_to_name(rqos->id);
|
||||
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
@ -835,9 +835,7 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
|||
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
|
||||
q->debugfs_dir);
|
||||
|
||||
rqos->debugfs_dir = debugfs_create_dir(dir_name,
|
||||
rqos->q->rqos_debugfs_dir);
|
||||
|
||||
rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
|
||||
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
|
||||
}
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
|
|||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
rqos->q = q;
|
||||
rqos->disk = disk;
|
||||
rqos->id = id;
|
||||
rqos->ops = ops;
|
||||
|
||||
|
@ -337,7 +337,7 @@ ebusy:
|
|||
|
||||
void rq_qos_del(struct rq_qos *rqos)
|
||||
{
|
||||
struct request_queue *q = rqos->q;
|
||||
struct request_queue *q = rqos->disk->queue;
|
||||
struct rq_qos **cur;
|
||||
|
||||
/*
|
||||
|
|
|
@ -26,7 +26,7 @@ struct rq_wait {
|
|||
|
||||
struct rq_qos {
|
||||
const struct rq_qos_ops *ops;
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk;
|
||||
enum rq_qos_id id;
|
||||
struct rq_qos *next;
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
|
|
|
@ -165,7 +165,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
|
|||
*/
|
||||
static bool wb_recent_wait(struct rq_wb *rwb)
|
||||
{
|
||||
struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
|
||||
struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
|
||||
|
||||
return time_before(jiffies, wb->dirty_sleep + HZ);
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ enum {
|
|||
|
||||
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
|
||||
{
|
||||
struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
|
||||
struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
|
||||
struct rq_depth *rqd = &rwb->rq_depth;
|
||||
u64 thislat;
|
||||
|
||||
|
@ -365,7 +365,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
|
|||
|
||||
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
|
||||
{
|
||||
struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
|
||||
struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
|
||||
struct rq_depth *rqd = &rwb->rq_depth;
|
||||
|
||||
trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
|
||||
|
@ -435,13 +435,12 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
|
|||
unsigned int inflight = wbt_inflight(rwb);
|
||||
int status;
|
||||
|
||||
if (!rwb->rqos.q->disk)
|
||||
if (!rwb->rqos.disk)
|
||||
return;
|
||||
|
||||
status = latency_exceeded(rwb, cb->stat);
|
||||
|
||||
trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
|
||||
inflight);
|
||||
trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight);
|
||||
|
||||
/*
|
||||
* If we exceeded the latency target, step down. If we did not,
|
||||
|
@ -779,16 +778,15 @@ static int wbt_data_dir(const struct request *rq)
|
|||
|
||||
static void wbt_queue_depth_changed(struct rq_qos *rqos)
|
||||
{
|
||||
RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
|
||||
RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue);
|
||||
wbt_update_limits(RQWB(rqos));
|
||||
}
|
||||
|
||||
static void wbt_exit(struct rq_qos *rqos)
|
||||
{
|
||||
struct rq_wb *rwb = RQWB(rqos);
|
||||
struct request_queue *q = rqos->q;
|
||||
|
||||
blk_stat_remove_callback(q, rwb->cb);
|
||||
blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
|
||||
blk_stat_free_callback(rwb->cb);
|
||||
kfree(rwb);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче