summaryrefslogtreecommitdiff
path: root/include/net/ipv6.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-07-10 19:05:57 +0000
committerDavid S. Miller <davem@davemloft.net>2012-07-10 23:13:46 -0700
commit1a203cb33a7dc791b6c0aedf701e70ac00c50cdb (patch)
treee34337ceadbad1aef902ce621e5d81f66bb7a0f5 /include/net/ipv6.h
parent1aa8b471e09f227455c11d55c4bc94a655ee8497 (diff)
downloadlwn-1a203cb33a7dc791b6c0aedf701e70ac00c50cdb.tar.gz
lwn-1a203cb33a7dc791b6c0aedf701e70ac00c50cdb.zip
ipv6: optimize ipv6 addresses compares
On 64 bit arches having efficient unaligned accesses (eg x86_64) we can use long words to reduce number of instructions for free. Joe Perches suggested to change ipv6_masked_addr_cmp() to return a bool instead of 'int', to make sure ipv6_masked_addr_cmp() cannot be used in a sorting function. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Joe Perches <joe@perches.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/ipv6.h')
-rw-r--r--include/net/ipv6.h24
1 files changed, 23 insertions, 1 deletions
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index aecf88436abf..d4261d4d6c47 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -298,14 +298,23 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr
return memcmp(a1, a2, sizeof(struct in6_addr));
}
-static inline int
+static inline bool
ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
const struct in6_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ulm = (const unsigned long *)m;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
+ ((ul1[1] ^ ul2[1]) & ulm[1]));
+#else
return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
+#endif
}
static inline void ipv6_addr_prefix(struct in6_addr *pfx,
@@ -335,10 +344,17 @@ static inline void ipv6_addr_set(struct in6_addr *addr,
static inline bool ipv6_addr_equal(const struct in6_addr *a1,
const struct in6_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
(a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
(a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
(a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
+#endif
}
static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
@@ -391,8 +407,14 @@ bool ip6_frag_match(struct inet_frag_queue *q, void *a);
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+
+ return (ul[0] | ul[1]) == 0UL;
+#else
return (a->s6_addr32[0] | a->s6_addr32[1] |
a->s6_addr32[2] | a->s6_addr32[3]) == 0;
+#endif
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)