map->swap_lock protects map->cleared from concurrent modification,
however sbitmap_deferred_clear() is already atomically drains it, so
it's guaranteed to not loose bits on concurrent
sbitmap_deferred_clear().

A one threaded tag heavy test on top of nullbk showed ~1.5% t-put
increase, and 3% -> 1% cycle reduction of sbitmap_get() according to perf.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-11-22 15:35:46 +00:00 коммит произвёл Jens Axboe
Родитель b78beea038
Коммит 661d4f55a7
2 изменённых файлов: 3 добавлений и 16 удалений

Просмотреть файл

@ -32,11 +32,6 @@ struct sbitmap_word {
* @cleared: word holding cleared bits
*/
unsigned long cleared ____cacheline_aligned_in_smp;
/**
* @swap_lock: Held while swapping word <-> cleared
*/
spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**

Просмотреть файл

@ -15,13 +15,9 @@
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
unsigned long mask, val;
bool ret = false;
unsigned long flags;
spin_lock_irqsave(&map->swap_lock, flags);
if (!map->cleared)
goto out_unlock;
if (!READ_ONCE(map->cleared))
return false;
/*
* First get a stable cleared mask, setting the old mask to 0.
@ -35,10 +31,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
val = map->word;
} while (cmpxchg(&map->word, val, val & ~mask) != val);
ret = true;
out_unlock:
spin_unlock_irqrestore(&map->swap_lock, flags);
return ret;
return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
@ -80,7 +73,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
depth -= sb->map[i].depth;
spin_lock_init(&sb->map[i].swap_lock);
}
return 0;
}