|
@@ -558,24 +558,29 @@ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
|
|
|
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
|
|
|
{
|
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
|
|
- const unsigned long *ul = (const unsigned long *)a;
|
|
|
+ const __be64 *be = (const __be64 *)a;
|
|
|
|
|
|
- return (ul[0] | (ul[1] ^ cpu_to_be64(1))) == 0UL;
|
|
|
+ return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
|
|
|
#else
|
|
|
return (a->s6_addr32[0] | a->s6_addr32[1] |
|
|
|
- a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
|
|
|
+ a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Note that we must __force cast these to unsigned long to make sparse happy,
|
|
|
+ * since all of the endian-annotated types are fixed size regardless of arch.
|
|
|
+ */
|
|
|
static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
|
|
|
{
|
|
|
return (
|
|
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
|
|
- *(__be64 *)a |
|
|
|
+ *(unsigned long *)a |
|
|
|
#else
|
|
|
- (a->s6_addr32[0] | a->s6_addr32[1]) |
|
|
|
+ (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
|
|
|
#endif
|
|
|
- (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
|
|
|
+ (__force unsigned long)(a->s6_addr32[2] ^
|
|
|
+ cpu_to_be32(0x0000ffff))) == 0UL;
|
|
|
}
|
|
|
|
|
|
/*
|