rhashtable: avoid unnecessary wakeup for worker queue

Move condition statements of verifying whether hash table size exceeds
its maximum threshold or reaches its minimum threshold from resizing
functions to resizing decision functions, avoiding unnecessary wakeup
for worker queue thread.

Signed-off-by: Ying Xue <ying.xue@windriver.com>
Cc: Thomas Graf <tgraf@suug.ch>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ying Xue 2015-01-07 13:41:56 +08:00 коммит произвёл David S. Miller
Родитель bd6d4db552
Коммит c0c09bfdc4
2 изменённых файлов: 8 добавлений и 12 удалений

Просмотреть файл

@ -113,7 +113,7 @@ struct rhashtable {
struct bucket_table __rcu *tbl; struct bucket_table __rcu *tbl;
struct bucket_table __rcu *future_tbl; struct bucket_table __rcu *future_tbl;
atomic_t nelems; atomic_t nelems;
size_t shift; atomic_t shift;
struct rhashtable_params p; struct rhashtable_params p;
struct delayed_work run_work; struct delayed_work run_work;
struct mutex mutex; struct mutex mutex;

Просмотреть файл

@ -199,7 +199,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{ {
/* Expand table when exceeding 75% load */ /* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (new_size / 4 * 3); return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
(ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
} }
EXPORT_SYMBOL_GPL(rht_grow_above_75); EXPORT_SYMBOL_GPL(rht_grow_above_75);
@ -211,7 +212,8 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{ {
/* Shrink table beneath 30% load */ /* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (new_size * 3 / 10); return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
(atomic_read(&ht->shift) > ht->p.min_shift);
} }
EXPORT_SYMBOL_GPL(rht_shrink_below_30); EXPORT_SYMBOL_GPL(rht_shrink_below_30);
@ -318,14 +320,11 @@ int rhashtable_expand(struct rhashtable *ht)
ASSERT_RHT_MUTEX(ht); ASSERT_RHT_MUTEX(ht);
if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
return 0;
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
ht->shift++; atomic_inc(&ht->shift);
/* Make insertions go into the new, empty table right away. Deletions /* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize. * and lookups will be attempted in both tables until we synchronize.
@ -421,9 +420,6 @@ int rhashtable_shrink(struct rhashtable *ht)
ASSERT_RHT_MUTEX(ht); ASSERT_RHT_MUTEX(ht);
if (ht->shift <= ht->p.min_shift)
return 0;
new_tbl = bucket_table_alloc(ht, tbl->size / 2); new_tbl = bucket_table_alloc(ht, tbl->size / 2);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
@ -462,7 +458,7 @@ int rhashtable_shrink(struct rhashtable *ht)
/* Publish the new, valid hash table */ /* Publish the new, valid hash table */
rcu_assign_pointer(ht->tbl, new_tbl); rcu_assign_pointer(ht->tbl, new_tbl);
ht->shift--; atomic_dec(&ht->shift);
/* Wait for readers. No new readers will have references to the /* Wait for readers. No new readers will have references to the
* old hash table. * old hash table.
@ -851,7 +847,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
if (tbl == NULL) if (tbl == NULL)
return -ENOMEM; return -ENOMEM;
ht->shift = ilog2(tbl->size); atomic_set(&ht->shift, ilog2(tbl->size));
RCU_INIT_POINTER(ht->tbl, tbl); RCU_INIT_POINTER(ht->tbl, tbl);
RCU_INIT_POINTER(ht->future_tbl, tbl); RCU_INIT_POINTER(ht->future_tbl, tbl);