checksum.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
  7. * Copyright (C) 1999 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 Thiemo Seufer.
  9. * Copyright (C) 2002 Maciej W. Rozycki
  10. * Copyright (C) 2014 Imagination Technologies Ltd.
  11. */
  12. #ifndef _ASM_CHECKSUM_H
  13. #define _ASM_CHECKSUM_H
  14. #include <linux/in6.h>
  15. #include <asm/uaccess.h>
  16. /*
  17. * computes the checksum of a memory block at buff, length len,
  18. * and adds in "sum" (32-bit)
  19. *
  20. * returns a 32-bit number suitable for feeding into itself
  21. * or csum_tcpudp_magic
  22. *
  23. * this function must be called with even lengths, except
  24. * for the last fragment, which may be odd
  25. *
  26. * it's best to have buff aligned on a 32-bit boundary
  27. */
  28. __wsum csum_partial(const void *buff, int len, __wsum sum);
  29. __wsum __csum_partial_copy_kernel(const void *src, void *dst,
  30. int len, __wsum sum, int *err_ptr);
  31. __wsum __csum_partial_copy_from_user(const void *src, void *dst,
  32. int len, __wsum sum, int *err_ptr);
  33. __wsum __csum_partial_copy_to_user(const void *src, void *dst,
  34. int len, __wsum sum, int *err_ptr);
  35. /*
  36. * this is a new version of the above that records errors it finds in *errp,
  37. * but continues and zeros the rest of the buffer.
  38. */
  39. static inline
  40. __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
  41. __wsum sum, int *err_ptr)
  42. {
  43. might_fault();
  44. if (segment_eq(get_fs(), get_ds()))
  45. return __csum_partial_copy_kernel((__force void *)src, dst,
  46. len, sum, err_ptr);
  47. else
  48. return __csum_partial_copy_from_user((__force void *)src, dst,
  49. len, sum, err_ptr);
  50. }
  51. #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
  52. static inline
  53. __wsum csum_and_copy_from_user(const void __user *src, void *dst,
  54. int len, __wsum sum, int *err_ptr)
  55. {
  56. if (access_ok(VERIFY_READ, src, len))
  57. return csum_partial_copy_from_user(src, dst, len, sum,
  58. err_ptr);
  59. if (len)
  60. *err_ptr = -EFAULT;
  61. return sum;
  62. }
  63. /*
  64. * Copy and checksum to user
  65. */
  66. #define HAVE_CSUM_COPY_USER
  67. static inline
  68. __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
  69. __wsum sum, int *err_ptr)
  70. {
  71. might_fault();
  72. if (access_ok(VERIFY_WRITE, dst, len)) {
  73. if (segment_eq(get_fs(), get_ds()))
  74. return __csum_partial_copy_kernel(src,
  75. (__force void *)dst,
  76. len, sum, err_ptr);
  77. else
  78. return __csum_partial_copy_to_user(src,
  79. (__force void *)dst,
  80. len, sum, err_ptr);
  81. }
  82. if (len)
  83. *err_ptr = -EFAULT;
  84. return (__force __wsum)-1; /* invalid checksum */
  85. }
  86. /*
  87. * the same as csum_partial, but copies from user space (but on MIPS
  88. * we have just one address space, so this is identical to the above)
  89. */
  90. __wsum csum_partial_copy_nocheck(const void *src, void *dst,
  91. int len, __wsum sum);
  92. /*
  93. * Fold a partial checksum without adding pseudo headers
  94. */
  95. static inline __sum16 csum_fold(__wsum sum)
  96. {
  97. __asm__(
  98. " .set push # csum_fold\n"
  99. " .set noat \n"
  100. " sll $1, %0, 16 \n"
  101. " addu %0, $1 \n"
  102. " sltu $1, %0, $1 \n"
  103. " srl %0, %0, 16 \n"
  104. " addu %0, $1 \n"
  105. " xori %0, 0xffff \n"
  106. " .set pop"
  107. : "=r" (sum)
  108. : "0" (sum));
  109. return (__force __sum16)sum;
  110. }
  111. /*
  112. * This is a version of ip_compute_csum() optimized for IP headers,
  113. * which always checksum on 4 octet boundaries.
  114. *
  115. * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  116. * Arnt Gulbrandsen.
  117. */
  118. static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  119. {
  120. const unsigned int *word = iph;
  121. const unsigned int *stop = word + ihl;
  122. unsigned int csum;
  123. int carry;
  124. csum = word[0];
  125. csum += word[1];
  126. carry = (csum < word[1]);
  127. csum += carry;
  128. csum += word[2];
  129. carry = (csum < word[2]);
  130. csum += carry;
  131. csum += word[3];
  132. carry = (csum < word[3]);
  133. csum += carry;
  134. word += 4;
  135. do {
  136. csum += *word;
  137. carry = (csum < *word);
  138. csum += carry;
  139. word++;
  140. } while (word != stop);
  141. return csum_fold(csum);
  142. }
  143. static inline __wsum csum_tcpudp_nofold(__be32 saddr,
  144. __be32 daddr, unsigned short len, unsigned short proto,
  145. __wsum sum)
  146. {
  147. __asm__(
  148. " .set push # csum_tcpudp_nofold\n"
  149. " .set noat \n"
  150. #ifdef CONFIG_32BIT
  151. " addu %0, %2 \n"
  152. " sltu $1, %0, %2 \n"
  153. " addu %0, $1 \n"
  154. " addu %0, %3 \n"
  155. " sltu $1, %0, %3 \n"
  156. " addu %0, $1 \n"
  157. " addu %0, %4 \n"
  158. " sltu $1, %0, %4 \n"
  159. " addu %0, $1 \n"
  160. #endif
  161. #ifdef CONFIG_64BIT
  162. " daddu %0, %2 \n"
  163. " daddu %0, %3 \n"
  164. " daddu %0, %4 \n"
  165. " dsll32 $1, %0, 0 \n"
  166. " daddu %0, $1 \n"
  167. " dsra32 %0, %0, 0 \n"
  168. #endif
  169. " .set pop"
  170. : "=r" (sum)
  171. : "0" ((__force unsigned long)daddr),
  172. "r" ((__force unsigned long)saddr),
  173. #ifdef __MIPSEL__
  174. "r" ((proto + len) << 8),
  175. #else
  176. "r" (proto + len),
  177. #endif
  178. "r" ((__force unsigned long)sum));
  179. return sum;
  180. }
  181. /*
  182. * computes the checksum of the TCP/UDP pseudo-header
  183. * returns a 16-bit checksum, already complemented
  184. */
  185. static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
  186. unsigned short len,
  187. unsigned short proto,
  188. __wsum sum)
  189. {
  190. return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
  191. }
  192. /*
  193. * this routine is used for miscellaneous IP-like checksums, mainly
  194. * in icmp.c
  195. */
  196. static inline __sum16 ip_compute_csum(const void *buff, int len)
  197. {
  198. return csum_fold(csum_partial(buff, len, 0));
  199. }
  200. #define _HAVE_ARCH_IPV6_CSUM
  201. static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
  202. const struct in6_addr *daddr,
  203. __u32 len, unsigned short proto,
  204. __wsum sum)
  205. {
  206. __asm__(
  207. " .set push # csum_ipv6_magic\n"
  208. " .set noreorder \n"
  209. " .set noat \n"
  210. " addu %0, %5 # proto (long in network byte order)\n"
  211. " sltu $1, %0, %5 \n"
  212. " addu %0, $1 \n"
  213. " addu %0, %6 # csum\n"
  214. " sltu $1, %0, %6 \n"
  215. " lw %1, 0(%2) # four words source address\n"
  216. " addu %0, $1 \n"
  217. " addu %0, %1 \n"
  218. " sltu $1, %0, %1 \n"
  219. " lw %1, 4(%2) \n"
  220. " addu %0, $1 \n"
  221. " addu %0, %1 \n"
  222. " sltu $1, %0, %1 \n"
  223. " lw %1, 8(%2) \n"
  224. " addu %0, $1 \n"
  225. " addu %0, %1 \n"
  226. " sltu $1, %0, %1 \n"
  227. " lw %1, 12(%2) \n"
  228. " addu %0, $1 \n"
  229. " addu %0, %1 \n"
  230. " sltu $1, %0, %1 \n"
  231. " lw %1, 0(%3) \n"
  232. " addu %0, $1 \n"
  233. " addu %0, %1 \n"
  234. " sltu $1, %0, %1 \n"
  235. " lw %1, 4(%3) \n"
  236. " addu %0, $1 \n"
  237. " addu %0, %1 \n"
  238. " sltu $1, %0, %1 \n"
  239. " lw %1, 8(%3) \n"
  240. " addu %0, $1 \n"
  241. " addu %0, %1 \n"
  242. " sltu $1, %0, %1 \n"
  243. " lw %1, 12(%3) \n"
  244. " addu %0, $1 \n"
  245. " addu %0, %1 \n"
  246. " sltu $1, %0, %1 \n"
  247. " addu %0, $1 # Add final carry\n"
  248. " .set pop"
  249. : "=r" (sum), "=r" (proto)
  250. : "r" (saddr), "r" (daddr),
  251. "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
  252. return csum_fold(sum);
  253. }
  254. #endif /* _ASM_CHECKSUM_H */