dm cache policy smq: change the mutex to a spinlock
We no longer sleep in any of the smq functions, so this can become a spinlock. Switching from mutex to spinlock improves performance when the fast cache device is a very low latency device (e.g. NVMe SSD). The switch to spinlock also allows for removal of the extra tick_lock; which is no longer needed since the main lock being a spinlock now fulfills the locking requirements needed by interrupt context. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Родитель
34dd051741
Коммит
4051aab762
|
@ -772,7 +772,7 @@ struct smq_policy {
|
||||||
struct dm_cache_policy policy;
|
struct dm_cache_policy policy;
|
||||||
|
|
||||||
/* protects everything */
|
/* protects everything */
|
||||||
struct mutex lock;
|
spinlock_t lock;
|
||||||
dm_cblock_t cache_size;
|
dm_cblock_t cache_size;
|
||||||
sector_t cache_block_size;
|
sector_t cache_block_size;
|
||||||
|
|
||||||
|
@ -807,13 +807,7 @@ struct smq_policy {
|
||||||
/*
|
/*
|
||||||
* Keeps track of time, incremented by the core. We use this to
|
* Keeps track of time, incremented by the core. We use this to
|
||||||
* avoid attributing multiple hits within the same tick.
|
* avoid attributing multiple hits within the same tick.
|
||||||
*
|
|
||||||
* Access to tick_protected should be done with the spin lock held.
|
|
||||||
* It's copied to tick at the start of the map function (within the
|
|
||||||
* mutex).
|
|
||||||
*/
|
*/
|
||||||
spinlock_t tick_lock;
|
|
||||||
unsigned tick_protected;
|
|
||||||
unsigned tick;
|
unsigned tick;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1296,46 +1290,20 @@ static void smq_destroy(struct dm_cache_policy *p)
|
||||||
kfree(mq);
|
kfree(mq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void copy_tick(struct smq_policy *mq)
|
|
||||||
{
|
|
||||||
unsigned long flags, tick;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mq->tick_lock, flags);
|
|
||||||
tick = mq->tick_protected;
|
|
||||||
if (tick != mq->tick) {
|
|
||||||
update_sentinels(mq);
|
|
||||||
end_hotspot_period(mq);
|
|
||||||
end_cache_period(mq);
|
|
||||||
mq->tick = tick;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&mq->tick_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool maybe_lock(struct smq_policy *mq, bool can_block)
|
|
||||||
{
|
|
||||||
if (can_block) {
|
|
||||||
mutex_lock(&mq->lock);
|
|
||||||
return true;
|
|
||||||
} else
|
|
||||||
return mutex_trylock(&mq->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
|
static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
|
||||||
bool can_block, bool can_migrate, bool fast_promote,
|
bool can_block, bool can_migrate, bool fast_promote,
|
||||||
struct bio *bio, struct policy_locker *locker,
|
struct bio *bio, struct policy_locker *locker,
|
||||||
struct policy_result *result)
|
struct policy_result *result)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
|
||||||
result->op = POLICY_MISS;
|
result->op = POLICY_MISS;
|
||||||
|
|
||||||
if (!maybe_lock(mq, can_block))
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
return -EWOULDBLOCK;
|
|
||||||
|
|
||||||
copy_tick(mq);
|
|
||||||
r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
|
r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1343,20 +1311,18 @@ static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
|
||||||
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
|
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
struct entry *e;
|
struct entry *e;
|
||||||
|
|
||||||
if (!mutex_trylock(&mq->lock))
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
return -EWOULDBLOCK;
|
|
||||||
|
|
||||||
e = h_lookup(&mq->table, oblock);
|
e = h_lookup(&mq->table, oblock);
|
||||||
if (e) {
|
if (e) {
|
||||||
*cblock = infer_cblock(mq, e);
|
*cblock = infer_cblock(mq, e);
|
||||||
r = 0;
|
r = 0;
|
||||||
} else
|
} else
|
||||||
r = -ENOENT;
|
r = -ENOENT;
|
||||||
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
mutex_unlock(&mq->lock);
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1375,20 +1341,22 @@ static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, boo
|
||||||
|
|
||||||
static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
|
static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
__smq_set_clear_dirty(mq, oblock, true);
|
__smq_set_clear_dirty(mq, oblock, true);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
|
static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
|
||||||
{
|
{
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
__smq_set_clear_dirty(mq, oblock, false);
|
__smq_set_clear_dirty(mq, oblock, false);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smq_load_mapping(struct dm_cache_policy *p,
|
static int smq_load_mapping(struct dm_cache_policy *p,
|
||||||
|
@ -1433,14 +1401,14 @@ static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
/*
|
||||||
|
* We don't need to lock here since this method is only called once
|
||||||
|
* the IO has stopped.
|
||||||
|
*/
|
||||||
r = smq_save_hints(mq, &mq->clean, fn, context);
|
r = smq_save_hints(mq, &mq->clean, fn, context);
|
||||||
if (!r)
|
if (!r)
|
||||||
r = smq_save_hints(mq, &mq->dirty, fn, context);
|
r = smq_save_hints(mq, &mq->dirty, fn, context);
|
||||||
|
|
||||||
mutex_unlock(&mq->lock);
|
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1458,10 +1426,11 @@ static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
|
||||||
static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
|
static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
|
||||||
{
|
{
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
__remove_mapping(mq, oblock);
|
__remove_mapping(mq, oblock);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
|
static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
|
||||||
|
@ -1480,11 +1449,12 @@ static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
|
||||||
static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
|
static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
r = __remove_cblock(mq, cblock);
|
r = __remove_cblock(mq, cblock);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1537,11 +1507,12 @@ static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
|
||||||
dm_cblock_t *cblock, bool critical_only)
|
dm_cblock_t *cblock, bool critical_only)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
r = __smq_writeback_work(mq, oblock, cblock, critical_only);
|
r = __smq_writeback_work(mq, oblock, cblock, critical_only);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1562,21 +1533,23 @@ static void __force_mapping(struct smq_policy *mq,
|
||||||
static void smq_force_mapping(struct dm_cache_policy *p,
|
static void smq_force_mapping(struct dm_cache_policy *p,
|
||||||
dm_oblock_t current_oblock, dm_oblock_t new_oblock)
|
dm_oblock_t current_oblock, dm_oblock_t new_oblock)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
__force_mapping(mq, current_oblock, new_oblock);
|
__force_mapping(mq, current_oblock, new_oblock);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static dm_cblock_t smq_residency(struct dm_cache_policy *p)
|
static dm_cblock_t smq_residency(struct dm_cache_policy *p)
|
||||||
{
|
{
|
||||||
dm_cblock_t r;
|
dm_cblock_t r;
|
||||||
|
unsigned long flags;
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
|
|
||||||
mutex_lock(&mq->lock);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
r = to_cblock(mq->cache_alloc.nr_allocated);
|
r = to_cblock(mq->cache_alloc.nr_allocated);
|
||||||
mutex_unlock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1586,15 +1559,12 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
|
||||||
struct smq_policy *mq = to_smq_policy(p);
|
struct smq_policy *mq = to_smq_policy(p);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&mq->tick_lock, flags);
|
spin_lock_irqsave(&mq->lock, flags);
|
||||||
mq->tick_protected++;
|
mq->tick++;
|
||||||
spin_unlock_irqrestore(&mq->tick_lock, flags);
|
update_sentinels(mq);
|
||||||
|
end_hotspot_period(mq);
|
||||||
if (can_block) {
|
end_cache_period(mq);
|
||||||
mutex_lock(&mq->lock);
|
spin_unlock_irqrestore(&mq->lock, flags);
|
||||||
copy_tick(mq);
|
|
||||||
mutex_unlock(&mq->lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Init the policy plugin interface function pointers. */
|
/* Init the policy plugin interface function pointers. */
|
||||||
|
@ -1694,10 +1664,8 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
|
||||||
} else
|
} else
|
||||||
mq->cache_hit_bits = NULL;
|
mq->cache_hit_bits = NULL;
|
||||||
|
|
||||||
mq->tick_protected = 0;
|
|
||||||
mq->tick = 0;
|
mq->tick = 0;
|
||||||
mutex_init(&mq->lock);
|
spin_lock_init(&mq->lock);
|
||||||
spin_lock_init(&mq->tick_lock);
|
|
||||||
|
|
||||||
q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
|
q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
|
||||||
mq->hotspot.nr_top_levels = 8;
|
mq->hotspot.nr_top_levels = 8;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче