|
@@ -962,34 +962,66 @@ void crypto_inc(u8 *a, unsigned int size)
|
|
|
__be32 *b = (__be32 *)(a + size);
|
|
|
u32 c;
|
|
|
|
|
|
- for (; size >= 4; size -= 4) {
|
|
|
- c = be32_to_cpu(*--b) + 1;
|
|
|
- *b = cpu_to_be32(c);
|
|
|
- if (c)
|
|
|
- return;
|
|
|
- }
|
|
|
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
|
|
+ !((unsigned long)b & (__alignof__(*b) - 1)))
|
|
|
+ for (; size >= 4; size -= 4) {
|
|
|
+ c = be32_to_cpu(*--b) + 1;
|
|
|
+ *b = cpu_to_be32(c);
|
|
|
+ if (c)
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
crypto_inc_byte(a, size);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(crypto_inc);
|
|
|
|
|
|
-static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size)
|
|
|
+void __crypto_xor(u8 *dst, const u8 *src, unsigned int len)
|
|
|
{
|
|
|
- for (; size; size--)
|
|
|
- *a++ ^= *b++;
|
|
|
-}
|
|
|
+ int relalign = 0;
|
|
|
+
|
|
|
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
|
|
|
+ int size = sizeof(unsigned long);
|
|
|
+ int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1);
|
|
|
+
|
|
|
+ relalign = d ? 1 << __ffs(d) : size;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we care about alignment, process as many bytes as
|
|
|
+ * needed to advance dst and src to values whose alignments
|
|
|
+ * equal their relative alignment. This will allow us to
|
|
|
+ * process the remainder of the input using optimal strides.
|
|
|
+ */
|
|
|
+ while (((unsigned long)dst & (relalign - 1)) && len > 0) {
|
|
|
+ *dst++ ^= *src++;
|
|
|
+ len--;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
-void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
|
|
|
-{
|
|
|
- u32 *a = (u32 *)dst;
|
|
|
- u32 *b = (u32 *)src;
|
|
|
+ while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
|
|
|
+ *(u64 *)dst ^= *(u64 *)src;
|
|
|
+ dst += 8;
|
|
|
+ src += 8;
|
|
|
+ len -= 8;
|
|
|
+ }
|
|
|
|
|
|
- for (; size >= 4; size -= 4)
|
|
|
- *a++ ^= *b++;
|
|
|
+ while (len >= 4 && !(relalign & 3)) {
|
|
|
+ *(u32 *)dst ^= *(u32 *)src;
|
|
|
+ dst += 4;
|
|
|
+ src += 4;
|
|
|
+ len -= 4;
|
|
|
+ }
|
|
|
+
|
|
|
+ while (len >= 2 && !(relalign & 1)) {
|
|
|
+ *(u16 *)dst ^= *(u16 *)src;
|
|
|
+ dst += 2;
|
|
|
+ src += 2;
|
|
|
+ len -= 2;
|
|
|
+ }
|
|
|
|
|
|
- crypto_xor_byte((u8 *)a, (u8 *)b, size);
|
|
|
+ while (len--)
|
|
|
+ *dst++ ^= *src++;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(crypto_xor);
|
|
|
+EXPORT_SYMBOL_GPL(__crypto_xor);
|
|
|
|
|
|
unsigned int crypto_alg_extsize(struct crypto_alg *alg)
|
|
|
{
|