Since route hash is a triple, use jhash_3words rather doing the mixing
directly. This should be as fast and give better distribution.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Stephen Hemminger 2008-04-10 01:54:01 -07:00 коммит произвёл David S. Miller
Родитель 5969f71d57
Коммит 1294fc4a48
1 изменённых файлов: 4 добавлений и 6 удалений

Просмотреть файл

@ -259,16 +259,14 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) \
(__raw_get_cpu_var(rt_cache_stat).field++)
static unsigned int rt_hash_code(u32 daddr, u32 saddr)
static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx)
{
return jhash_2words(daddr, saddr, atomic_read(&rt_genid))
return jhash_3words((__force u32)(__be32)(daddr),
(__force u32)(__be32)(saddr),
idx, atomic_read(&rt_genid))
& rt_hash_mask;
}
#define rt_hash(daddr, saddr, idx) \
rt_hash_code((__force u32)(__be32)(daddr),\
(__force u32)(__be32)(saddr) ^ ((idx) << 5))
#ifdef CONFIG_PROC_FS
struct rt_cache_iter_state {
struct seq_net_private p;