summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Kubecek <mkubecek@suse.cz>2018-10-12 14:24:44 +0200
committerSteffen Klassert <steffen.klassert@secunet.com>2018-10-15 10:09:18 +0200
commit8d4b6bce2559755cf2db6513a267fccdfbf7c3ab (patch)
tree67e95350ad40c9474764892ee84af863bd805f07
parentf1193e915748291fb205a908db33bd3debece6e2 (diff)
downloadlwn-8d4b6bce2559755cf2db6513a267fccdfbf7c3ab.tar.gz
lwn-8d4b6bce2559755cf2db6513a267fccdfbf7c3ab.zip
xfrm: use complete IPv6 addresses for hash
In some environments it is common that many hosts share the same lower half of their IPv6 addresses (in particular ::1). As __xfrm6_addr_hash() and __xfrm6_daddr_saddr_hash() calculate the hash only from the lower halves, as much as 1/3 of the hosts ends up in one hashtable chain which harms the performance. Use complete IPv6 addresses when calculating the hashes. Rather than just adding two more words to the xor, use jhash2() for consistency with __xfrm6_pref_hash() and __xfrm6_dpref_spref_hash(). Signed-off-by: Michal Kubecek <mkubecek@suse.cz> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
-rw-r--r--net/xfrm/xfrm_hash.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 61be810389d8..ce66323102f9 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -13,7 +13,7 @@ static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr)
static inline unsigned int __xfrm6_addr_hash(const xfrm_address_t *addr)
{
- return ntohl(addr->a6[2] ^ addr->a6[3]);
+ return jhash2((__force u32 *)addr->a6, 4, 0);
}
static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
@@ -26,8 +26,7 @@ static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr,
const xfrm_address_t *saddr)
{
- return ntohl(daddr->a6[2] ^ daddr->a6[3] ^
- saddr->a6[2] ^ saddr->a6[3]);
+ return __xfrm6_addr_hash(daddr) ^ __xfrm6_addr_hash(saddr);
}
static inline u32 __bits2mask32(__u8 bits)