dm: conditionally enable branching for less used features

Use jump_labels to further reduce cost of unlikely branches for zoned
block devices, dm-stats and swap_bios throttling.

Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
Mike Snitzer 2022-03-26 14:14:00 -04:00
Родитель 563a225c9f
Коммит 442761fd2b
4 изменённых файлов: 53 добавлений и 23 удалений

Просмотреть файл

@ -13,6 +13,7 @@
#include <linux/ktime.h>
#include <linux/blk-mq.h>
#include <linux/blk-crypto-profile.h>
#include <linux/jump_label.h>
#include <trace/events/block.h>
@ -154,6 +155,10 @@ static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
return &md->stats;
}
DECLARE_STATIC_KEY_FALSE(stats_enabled);
DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
DECLARE_STATIC_KEY_FALSE(zoned_enabled);
static inline bool dm_emulate_zone_append(struct mapped_device *md)
{
if (blk_queue_is_zoned(md->queue))

Просмотреть файл

@ -396,6 +396,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
dm_stats_recalc_precise_timestamps(stats);
if (!static_key_enabled(&stats_enabled.key))
static_branch_enable(&stats_enabled);
mutex_unlock(&stats->mutex);
resume_callback(md);

Просмотреть файл

@ -719,6 +719,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
static_branch_enable(&swap_bios_enabled);
return 0;
bad:
@ -2040,6 +2043,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
r = dm_set_zones_restrictions(t, q);
if (r)
return r;
if (!static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
}
dm_update_crypto_profile(q, t);

Просмотреть файл

@ -71,6 +71,10 @@ void dm_issue_global_event(void)
wake_up(&dm_global_eventq);
}
DEFINE_STATIC_KEY_FALSE(stats_enabled);
DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
DEFINE_STATIC_KEY_FALSE(zoned_enabled);
/*
* One of these is allocated (on-stack) per original bio.
*/
@ -516,7 +520,8 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio,
else
bio_end_io_acct(bio, start_time);
if (unlikely(dm_stats_used(&md->stats)))
if (static_branch_unlikely(&stats_enabled) &&
unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
end, start_time, stats_aux);
@ -586,6 +591,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
io->start_time = jiffies;
io->flags = 0;
if (static_branch_unlikely(&stats_enabled))
dm_stats_record_start(&md->stats, &io->stats_aux);
return io;
@ -1012,13 +1018,15 @@ static void clone_endio(struct bio *bio)
disable_write_zeroes(md);
}
if (unlikely(blk_queue_is_zoned(q)))
if (static_branch_unlikely(&zoned_enabled) &&
unlikely(blk_queue_is_zoned(q)))
dm_zone_endio(io, bio);
if (endio) {
int r = endio(ti, bio, &error);
switch (r) {
case DM_ENDIO_REQUEUE:
if (static_branch_unlikely(&zoned_enabled)) {
/*
* Requeuing writes to a sequential zone of a zoned
* target will break the sequential write pattern:
@ -1028,6 +1036,8 @@ static void clone_endio(struct bio *bio)
error = BLK_STS_IOERR;
else
error = BLK_STS_DM_REQUEUE;
} else
error = BLK_STS_DM_REQUEUE;
fallthrough;
case DM_ENDIO_DONE:
break;
@ -1040,7 +1050,8 @@ static void clone_endio(struct bio *bio)
}
}
if (unlikely(swap_bios_limit(ti, bio)))
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, bio)))
up(&md->swap_bios_semaphore);
free_tio(bio);
@ -1295,22 +1306,26 @@ static void __map_bio(struct bio *clone)
dm_io_inc_pending(io);
tio->old_sector = clone->bi_iter.bi_sector;
if (unlikely(swap_bios_limit(ti, clone))) {
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone))) {
int latch = get_swap_bios();
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
down(&md->swap_bios_semaphore);
}
if (static_branch_unlikely(&zoned_enabled)) {
/*
* Check if the IO needs a special mapping due to zone append emulation
* on zoned target. In this case, dm_zone_map_bio() calls the target
* map operation.
* Check if the IO needs a special mapping due to zone append
* emulation on zoned target. In this case, dm_zone_map_bio()
* calls the target map operation.
*/
if (unlikely(dm_emulate_zone_append(md)))
r = dm_zone_map_bio(tio);
else
r = ti->type->map(ti, clone);
} else
r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
@ -1329,7 +1344,8 @@ static void __map_bio(struct bio *clone)
break;
case DM_MAPIO_KILL:
case DM_MAPIO_REQUEUE:
if (unlikely(swap_bios_limit(ti, clone)))
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore);
free_tio(clone);
if (r == DM_MAPIO_KILL)
@ -1565,7 +1581,8 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector_count = bio_sectors(bio);
/* Shouldn't happen but sector_count was being set to 0 so... */
if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
if (static_branch_unlikely(&zoned_enabled) &&
WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
ci->sector_count = 0;
}