csum_partial.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Quick'n'dirty IP checksum ...
  7. *
  8. * Copyright (C) 1998, 1999 Ralf Baechle
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. * Copyright (C) 2007 Maciej W. Rozycki
  11. * Copyright (C) 2014 Imagination Technologies Ltd.
  12. */
  13. #include <linux/errno.h>
  14. #include <asm/asm.h>
  15. #include <asm/asm-offsets.h>
  16. #include <asm/regdef.h>
  17. #ifdef CONFIG_64BIT
  18. /*
  19. * As we are sharing code base with the mips32 tree (which use the o32 ABI
  20. * register definitions). We need to redefine the register definitions from
  21. * the n64 ABI register naming to the o32 ABI register naming.
  22. */
  23. #undef t0
  24. #undef t1
  25. #undef t2
  26. #undef t3
  27. #define t0 $8
  28. #define t1 $9
  29. #define t2 $10
  30. #define t3 $11
  31. #define t4 $12
  32. #define t5 $13
  33. #define t6 $14
  34. #define t7 $15
  35. #define USE_DOUBLE
  36. #endif
  37. #ifdef USE_DOUBLE
  38. #define LOAD ld
  39. #define LOAD32 lwu
  40. #define ADD daddu
  41. #define NBYTES 8
  42. #else
  43. #define LOAD lw
  44. #define LOAD32 lw
  45. #define ADD addu
  46. #define NBYTES 4
  47. #endif /* USE_DOUBLE */
  48. #define UNIT(unit) ((unit)*NBYTES)
  49. #define ADDC(sum,reg) \
  50. .set push; \
  51. .set noat; \
  52. ADD sum, reg; \
  53. sltu v1, sum, reg; \
  54. ADD sum, v1; \
  55. .set pop
  56. #define ADDC32(sum,reg) \
  57. .set push; \
  58. .set noat; \
  59. addu sum, reg; \
  60. sltu v1, sum, reg; \
  61. addu sum, v1; \
  62. .set pop
  63. #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
  64. LOAD _t0, (offset + UNIT(0))(src); \
  65. LOAD _t1, (offset + UNIT(1))(src); \
  66. LOAD _t2, (offset + UNIT(2))(src); \
  67. LOAD _t3, (offset + UNIT(3))(src); \
  68. ADDC(sum, _t0); \
  69. ADDC(sum, _t1); \
  70. ADDC(sum, _t2); \
  71. ADDC(sum, _t3)
  72. #ifdef USE_DOUBLE
  73. #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
  74. CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
  75. #else
  76. #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
  77. CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
  78. CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
  79. #endif
  80. /*
  81. * a0: source address
  82. * a1: length of the area to checksum
  83. * a2: partial checksum
  84. */
  85. #define src a0
  86. #define sum v0
  87. .text
  88. .set noreorder
  89. .align 5
  90. LEAF(csum_partial)
  91. move sum, zero
  92. move t7, zero
  93. sltiu t8, a1, 0x8
  94. bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
  95. move t2, a1
  96. andi t7, src, 0x1 /* odd buffer? */
  97. .Lhword_align:
  98. beqz t7, .Lword_align
  99. andi t8, src, 0x2
  100. lbu t0, (src)
  101. LONG_SUBU a1, a1, 0x1
  102. #ifdef __MIPSEL__
  103. sll t0, t0, 8
  104. #endif
  105. ADDC(sum, t0)
  106. PTR_ADDU src, src, 0x1
  107. andi t8, src, 0x2
  108. .Lword_align:
  109. beqz t8, .Ldword_align
  110. sltiu t8, a1, 56
  111. lhu t0, (src)
  112. LONG_SUBU a1, a1, 0x2
  113. ADDC(sum, t0)
  114. sltiu t8, a1, 56
  115. PTR_ADDU src, src, 0x2
  116. .Ldword_align:
  117. bnez t8, .Ldo_end_words
  118. move t8, a1
  119. andi t8, src, 0x4
  120. beqz t8, .Lqword_align
  121. andi t8, src, 0x8
  122. LOAD32 t0, 0x00(src)
  123. LONG_SUBU a1, a1, 0x4
  124. ADDC(sum, t0)
  125. PTR_ADDU src, src, 0x4
  126. andi t8, src, 0x8
  127. .Lqword_align:
  128. beqz t8, .Loword_align
  129. andi t8, src, 0x10
  130. #ifdef USE_DOUBLE
  131. ld t0, 0x00(src)
  132. LONG_SUBU a1, a1, 0x8
  133. ADDC(sum, t0)
  134. #else
  135. lw t0, 0x00(src)
  136. lw t1, 0x04(src)
  137. LONG_SUBU a1, a1, 0x8
  138. ADDC(sum, t0)
  139. ADDC(sum, t1)
  140. #endif
  141. PTR_ADDU src, src, 0x8
  142. andi t8, src, 0x10
  143. .Loword_align:
  144. beqz t8, .Lbegin_movement
  145. LONG_SRL t8, a1, 0x7
  146. #ifdef USE_DOUBLE
  147. ld t0, 0x00(src)
  148. ld t1, 0x08(src)
  149. ADDC(sum, t0)
  150. ADDC(sum, t1)
  151. #else
  152. CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
  153. #endif
  154. LONG_SUBU a1, a1, 0x10
  155. PTR_ADDU src, src, 0x10
  156. LONG_SRL t8, a1, 0x7
  157. .Lbegin_movement:
  158. beqz t8, 1f
  159. andi t2, a1, 0x40
  160. .Lmove_128bytes:
  161. CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
  162. CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
  163. CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
  164. CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
  165. LONG_SUBU t8, t8, 0x01
  166. .set reorder /* DADDI_WAR */
  167. PTR_ADDU src, src, 0x80
  168. bnez t8, .Lmove_128bytes
  169. .set noreorder
  170. 1:
  171. beqz t2, 1f
  172. andi t2, a1, 0x20
  173. .Lmove_64bytes:
  174. CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
  175. CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
  176. PTR_ADDU src, src, 0x40
  177. 1:
  178. beqz t2, .Ldo_end_words
  179. andi t8, a1, 0x1c
  180. .Lmove_32bytes:
  181. CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
  182. andi t8, a1, 0x1c
  183. PTR_ADDU src, src, 0x20
  184. .Ldo_end_words:
  185. beqz t8, .Lsmall_csumcpy
  186. andi t2, a1, 0x3
  187. LONG_SRL t8, t8, 0x2
  188. .Lend_words:
  189. LOAD32 t0, (src)
  190. LONG_SUBU t8, t8, 0x1
  191. ADDC(sum, t0)
  192. .set reorder /* DADDI_WAR */
  193. PTR_ADDU src, src, 0x4
  194. bnez t8, .Lend_words
  195. .set noreorder
  196. /* unknown src alignment and < 8 bytes to go */
  197. .Lsmall_csumcpy:
  198. move a1, t2
  199. andi t0, a1, 4
  200. beqz t0, 1f
  201. andi t0, a1, 2
  202. /* Still a full word to go */
  203. ulw t1, (src)
  204. PTR_ADDIU src, 4
  205. #ifdef USE_DOUBLE
  206. dsll t1, t1, 32 /* clear lower 32bit */
  207. #endif
  208. ADDC(sum, t1)
  209. 1: move t1, zero
  210. beqz t0, 1f
  211. andi t0, a1, 1
  212. /* Still a halfword to go */
  213. ulhu t1, (src)
  214. PTR_ADDIU src, 2
  215. 1: beqz t0, 1f
  216. sll t1, t1, 16
  217. lbu t2, (src)
  218. nop
  219. #ifdef __MIPSEB__
  220. sll t2, t2, 8
  221. #endif
  222. or t1, t2
  223. 1: ADDC(sum, t1)
  224. /* fold checksum */
  225. #ifdef USE_DOUBLE
  226. dsll32 v1, sum, 0
  227. daddu sum, v1
  228. sltu v1, sum, v1
  229. dsra32 sum, sum, 0
  230. addu sum, v1
  231. #endif
  232. /* odd buffer alignment? */
  233. #ifdef CONFIG_CPU_MIPSR2
  234. wsbh v1, sum
  235. movn sum, v1, t7
  236. #else
  237. beqz t7, 1f /* odd buffer alignment? */
  238. lui v1, 0x00ff
  239. addu v1, 0x00ff
  240. and t0, sum, v1
  241. sll t0, t0, 8
  242. srl sum, sum, 8
  243. and sum, sum, v1
  244. or sum, sum, t0
  245. 1:
  246. #endif
  247. .set reorder
  248. /* Add the passed partial csum. */
  249. ADDC32(sum, a2)
  250. jr ra
  251. .set noreorder
  252. END(csum_partial)
  253. /*
  254. * checksum and copy routines based on memcpy.S
  255. *
  256. * csum_partial_copy_nocheck(src, dst, len, sum)
  257. * __csum_partial_copy_kernel(src, dst, len, sum, errp)
  258. *
  259. * See "Spec" in memcpy.S for details. Unlike __copy_user, all
  260. * function in this file use the standard calling convention.
  261. */
  262. #define src a0
  263. #define dst a1
  264. #define len a2
  265. #define psum a3
  266. #define sum v0
  267. #define odd t8
  268. #define errptr t9
  269. /*
  270. * The exception handler for loads requires that:
  271. * 1- AT contain the address of the byte just past the end of the source
  272. * of the copy,
  273. * 2- src_entry <= src < AT, and
  274. * 3- (dst - src) == (dst_entry - src_entry),
  275. * The _entry suffix denotes values when __copy_user was called.
  276. *
  277. * (1) is set up up by __csum_partial_copy_from_user and maintained by
  278. * not writing AT in __csum_partial_copy
  279. * (2) is met by incrementing src by the number of bytes copied
  280. * (3) is met by not doing loads between a pair of increments of dst and src
  281. *
  282. * The exception handlers for stores stores -EFAULT to errptr and return.
  283. * These handlers do not need to overwrite any data.
  284. */
  285. /* Instruction type */
  286. #define LD_INSN 1
  287. #define ST_INSN 2
  288. #define LEGACY_MODE 1
  289. #define EVA_MODE 2
  290. #define USEROP 1
  291. #define KERNELOP 2
  292. /*
  293. * Wrapper to add an entry in the exception table
  294. * in case the insn causes a memory exception.
  295. * Arguments:
  296. * insn : Load/store instruction
  297. * type : Instruction type
  298. * reg : Register
  299. * addr : Address
  300. * handler : Exception handler
  301. */
  302. #define EXC(insn, type, reg, addr, handler) \
  303. .if \mode == LEGACY_MODE; \
  304. 9: insn reg, addr; \
  305. .section __ex_table,"a"; \
  306. PTR 9b, handler; \
  307. .previous; \
  308. /* This is enabled in EVA mode */ \
  309. .else; \
  310. /* If loading from user or storing to user */ \
  311. .if ((\from == USEROP) && (type == LD_INSN)) || \
  312. ((\to == USEROP) && (type == ST_INSN)); \
  313. 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
  314. .section __ex_table,"a"; \
  315. PTR 9b, handler; \
  316. .previous; \
  317. .else; \
  318. /* EVA without exception */ \
  319. insn reg, addr; \
  320. .endif; \
  321. .endif
  322. #undef LOAD
  323. #ifdef USE_DOUBLE
  324. #define LOADK ld /* No exception */
  325. #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
  326. #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
  327. #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
  328. #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
  329. #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
  330. #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
  331. #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
  332. #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
  333. #define ADD daddu
  334. #define SUB dsubu
  335. #define SRL dsrl
  336. #define SLL dsll
  337. #define SLLV dsllv
  338. #define SRLV dsrlv
  339. #define NBYTES 8
  340. #define LOG_NBYTES 3
  341. #else
  342. #define LOADK lw /* No exception */
  343. #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
  344. #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
  345. #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
  346. #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
  347. #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
  348. #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
  349. #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
  350. #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
  351. #define ADD addu
  352. #define SUB subu
  353. #define SRL srl
  354. #define SLL sll
  355. #define SLLV sllv
  356. #define SRLV srlv
  357. #define NBYTES 4
  358. #define LOG_NBYTES 2
  359. #endif /* USE_DOUBLE */
  360. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  361. #define LDFIRST LOADR
  362. #define LDREST LOADL
  363. #define STFIRST STORER
  364. #define STREST STOREL
  365. #define SHIFT_DISCARD SLLV
  366. #define SHIFT_DISCARD_REVERT SRLV
  367. #else
  368. #define LDFIRST LOADL
  369. #define LDREST LOADR
  370. #define STFIRST STOREL
  371. #define STREST STORER
  372. #define SHIFT_DISCARD SRLV
  373. #define SHIFT_DISCARD_REVERT SLLV
  374. #endif
  375. #define FIRST(unit) ((unit)*NBYTES)
  376. #define REST(unit) (FIRST(unit)+NBYTES-1)
  377. #define ADDRMASK (NBYTES-1)
  378. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  379. .set noat
  380. #else
  381. .set at=v1
  382. #endif
  383. .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
  384. PTR_ADDU AT, src, len /* See (1) above. */
  385. /* initialize __nocheck if this the first time we execute this
  386. * macro
  387. */
  388. #ifdef CONFIG_64BIT
  389. move errptr, a4
  390. #else
  391. lw errptr, 16(sp)
  392. #endif
  393. .if \__nocheck == 1
  394. FEXPORT(csum_partial_copy_nocheck)
  395. .endif
  396. move sum, zero
  397. move odd, zero
  398. /*
  399. * Note: dst & src may be unaligned, len may be 0
  400. * Temps
  401. */
  402. /*
  403. * The "issue break"s below are very approximate.
  404. * Issue delays for dcache fills will perturb the schedule, as will
  405. * load queue full replay traps, etc.
  406. *
  407. * If len < NBYTES use byte operations.
  408. */
  409. sltu t2, len, NBYTES
  410. and t1, dst, ADDRMASK
  411. bnez t2, .Lcopy_bytes_checklen\@
  412. and t0, src, ADDRMASK
  413. andi odd, dst, 0x1 /* odd buffer? */
  414. bnez t1, .Ldst_unaligned\@
  415. nop
  416. bnez t0, .Lsrc_unaligned_dst_aligned\@
  417. /*
  418. * use delay slot for fall-through
  419. * src and dst are aligned; need to compute rem
  420. */
  421. .Lboth_aligned\@:
  422. SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
  423. beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
  424. nop
  425. SUB len, 8*NBYTES # subtract here for bgez loop
  426. .align 4
  427. 1:
  428. LOAD(t0, UNIT(0)(src), .Ll_exc\@)
  429. LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
  430. LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
  431. LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
  432. LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
  433. LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
  434. LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
  435. LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
  436. SUB len, len, 8*NBYTES
  437. ADD src, src, 8*NBYTES
  438. STORE(t0, UNIT(0)(dst), .Ls_exc\@)
  439. ADDC(sum, t0)
  440. STORE(t1, UNIT(1)(dst), .Ls_exc\@)
  441. ADDC(sum, t1)
  442. STORE(t2, UNIT(2)(dst), .Ls_exc\@)
  443. ADDC(sum, t2)
  444. STORE(t3, UNIT(3)(dst), .Ls_exc\@)
  445. ADDC(sum, t3)
  446. STORE(t4, UNIT(4)(dst), .Ls_exc\@)
  447. ADDC(sum, t4)
  448. STORE(t5, UNIT(5)(dst), .Ls_exc\@)
  449. ADDC(sum, t5)
  450. STORE(t6, UNIT(6)(dst), .Ls_exc\@)
  451. ADDC(sum, t6)
  452. STORE(t7, UNIT(7)(dst), .Ls_exc\@)
  453. ADDC(sum, t7)
  454. .set reorder /* DADDI_WAR */
  455. ADD dst, dst, 8*NBYTES
  456. bgez len, 1b
  457. .set noreorder
  458. ADD len, 8*NBYTES # revert len (see above)
  459. /*
  460. * len == the number of bytes left to copy < 8*NBYTES
  461. */
  462. .Lcleanup_both_aligned\@:
  463. #define rem t7
  464. beqz len, .Ldone\@
  465. sltu t0, len, 4*NBYTES
  466. bnez t0, .Lless_than_4units\@
  467. and rem, len, (NBYTES-1) # rem = len % NBYTES
  468. /*
  469. * len >= 4*NBYTES
  470. */
  471. LOAD(t0, UNIT(0)(src), .Ll_exc\@)
  472. LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
  473. LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
  474. LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
  475. SUB len, len, 4*NBYTES
  476. ADD src, src, 4*NBYTES
  477. STORE(t0, UNIT(0)(dst), .Ls_exc\@)
  478. ADDC(sum, t0)
  479. STORE(t1, UNIT(1)(dst), .Ls_exc\@)
  480. ADDC(sum, t1)
  481. STORE(t2, UNIT(2)(dst), .Ls_exc\@)
  482. ADDC(sum, t2)
  483. STORE(t3, UNIT(3)(dst), .Ls_exc\@)
  484. ADDC(sum, t3)
  485. .set reorder /* DADDI_WAR */
  486. ADD dst, dst, 4*NBYTES
  487. beqz len, .Ldone\@
  488. .set noreorder
  489. .Lless_than_4units\@:
  490. /*
  491. * rem = len % NBYTES
  492. */
  493. beq rem, len, .Lcopy_bytes\@
  494. nop
  495. 1:
  496. LOAD(t0, 0(src), .Ll_exc\@)
  497. ADD src, src, NBYTES
  498. SUB len, len, NBYTES
  499. STORE(t0, 0(dst), .Ls_exc\@)
  500. ADDC(sum, t0)
  501. .set reorder /* DADDI_WAR */
  502. ADD dst, dst, NBYTES
  503. bne rem, len, 1b
  504. .set noreorder
  505. /*
  506. * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
  507. * A loop would do only a byte at a time with possible branch
  508. * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
  509. * because can't assume read-access to dst. Instead, use
  510. * STREST dst, which doesn't require read access to dst.
  511. *
  512. * This code should perform better than a simple loop on modern,
  513. * wide-issue mips processors because the code has fewer branches and
  514. * more instruction-level parallelism.
  515. */
  516. #define bits t2
  517. beqz len, .Ldone\@
  518. ADD t1, dst, len # t1 is just past last byte of dst
  519. li bits, 8*NBYTES
  520. SLL rem, len, 3 # rem = number of bits to keep
  521. LOAD(t0, 0(src), .Ll_exc\@)
  522. SUB bits, bits, rem # bits = number of bits to discard
  523. SHIFT_DISCARD t0, t0, bits
  524. STREST(t0, -1(t1), .Ls_exc\@)
  525. SHIFT_DISCARD_REVERT t0, t0, bits
  526. .set reorder
  527. ADDC(sum, t0)
  528. b .Ldone\@
  529. .set noreorder
  530. .Ldst_unaligned\@:
  531. /*
  532. * dst is unaligned
  533. * t0 = src & ADDRMASK
  534. * t1 = dst & ADDRMASK; T1 > 0
  535. * len >= NBYTES
  536. *
  537. * Copy enough bytes to align dst
  538. * Set match = (src and dst have same alignment)
  539. */
  540. #define match rem
  541. LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
  542. ADD t2, zero, NBYTES
  543. LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
  544. SUB t2, t2, t1 # t2 = number of bytes copied
  545. xor match, t0, t1
  546. STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
  547. SLL t4, t1, 3 # t4 = number of bits to discard
  548. SHIFT_DISCARD t3, t3, t4
  549. /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
  550. ADDC(sum, t3)
  551. beq len, t2, .Ldone\@
  552. SUB len, len, t2
  553. ADD dst, dst, t2
  554. beqz match, .Lboth_aligned\@
  555. ADD src, src, t2
  556. .Lsrc_unaligned_dst_aligned\@:
  557. SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
  558. beqz t0, .Lcleanup_src_unaligned\@
  559. and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
  560. 1:
  561. /*
  562. * Avoid consecutive LD*'s to the same register since some mips
  563. * implementations can't issue them in the same cycle.
  564. * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  565. * are to the same unit (unless src is aligned, but it's not).
  566. */
  567. LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
  568. LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
  569. SUB len, len, 4*NBYTES
  570. LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
  571. LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
  572. LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
  573. LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
  574. LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
  575. LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
  576. ADD src, src, 4*NBYTES
  577. #ifdef CONFIG_CPU_SB1
  578. nop # improves slotting
  579. #endif
  580. STORE(t0, UNIT(0)(dst), .Ls_exc\@)
  581. ADDC(sum, t0)
  582. STORE(t1, UNIT(1)(dst), .Ls_exc\@)
  583. ADDC(sum, t1)
  584. STORE(t2, UNIT(2)(dst), .Ls_exc\@)
  585. ADDC(sum, t2)
  586. STORE(t3, UNIT(3)(dst), .Ls_exc\@)
  587. ADDC(sum, t3)
  588. .set reorder /* DADDI_WAR */
  589. ADD dst, dst, 4*NBYTES
  590. bne len, rem, 1b
  591. .set noreorder
  592. .Lcleanup_src_unaligned\@:
  593. beqz len, .Ldone\@
  594. and rem, len, NBYTES-1 # rem = len % NBYTES
  595. beq rem, len, .Lcopy_bytes\@
  596. nop
  597. 1:
  598. LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
  599. LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
  600. ADD src, src, NBYTES
  601. SUB len, len, NBYTES
  602. STORE(t0, 0(dst), .Ls_exc\@)
  603. ADDC(sum, t0)
  604. .set reorder /* DADDI_WAR */
  605. ADD dst, dst, NBYTES
  606. bne len, rem, 1b
  607. .set noreorder
  608. .Lcopy_bytes_checklen\@:
  609. beqz len, .Ldone\@
  610. nop
  611. .Lcopy_bytes\@:
  612. /* 0 < len < NBYTES */
  613. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  614. #define SHIFT_START 0
  615. #define SHIFT_INC 8
  616. #else
  617. #define SHIFT_START 8*(NBYTES-1)
  618. #define SHIFT_INC -8
  619. #endif
  620. move t2, zero # partial word
  621. li t3, SHIFT_START # shift
  622. /* use .Ll_exc_copy here to return correct sum on fault */
  623. #define COPY_BYTE(N) \
  624. LOADBU(t0, N(src), .Ll_exc_copy\@); \
  625. SUB len, len, 1; \
  626. STOREB(t0, N(dst), .Ls_exc\@); \
  627. SLLV t0, t0, t3; \
  628. addu t3, SHIFT_INC; \
  629. beqz len, .Lcopy_bytes_done\@; \
  630. or t2, t0
  631. COPY_BYTE(0)
  632. COPY_BYTE(1)
  633. #ifdef USE_DOUBLE
  634. COPY_BYTE(2)
  635. COPY_BYTE(3)
  636. COPY_BYTE(4)
  637. COPY_BYTE(5)
  638. #endif
  639. LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
  640. SUB len, len, 1
  641. STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
  642. SLLV t0, t0, t3
  643. or t2, t0
  644. .Lcopy_bytes_done\@:
  645. ADDC(sum, t2)
  646. .Ldone\@:
  647. /* fold checksum */
  648. .set push
  649. .set noat
  650. #ifdef USE_DOUBLE
  651. dsll32 v1, sum, 0
  652. daddu sum, v1
  653. sltu v1, sum, v1
  654. dsra32 sum, sum, 0
  655. addu sum, v1
  656. #endif
  657. #ifdef CONFIG_CPU_MIPSR2
  658. wsbh v1, sum
  659. movn sum, v1, odd
  660. #else
  661. beqz odd, 1f /* odd buffer alignment? */
  662. lui v1, 0x00ff
  663. addu v1, 0x00ff
  664. and t0, sum, v1
  665. sll t0, t0, 8
  666. srl sum, sum, 8
  667. and sum, sum, v1
  668. or sum, sum, t0
  669. 1:
  670. #endif
  671. .set pop
  672. .set reorder
  673. ADDC32(sum, psum)
  674. jr ra
  675. .set noreorder
  676. .Ll_exc_copy\@:
  677. /*
  678. * Copy bytes from src until faulting load address (or until a
  679. * lb faults)
  680. *
  681. * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
  682. * may be more than a byte beyond the last address.
  683. * Hence, the lb below may get an exception.
  684. *
  685. * Assumes src < THREAD_BUADDR($28)
  686. */
  687. LOADK t0, TI_TASK($28)
  688. li t2, SHIFT_START
  689. LOADK t0, THREAD_BUADDR(t0)
  690. 1:
  691. LOADBU(t1, 0(src), .Ll_exc\@)
  692. ADD src, src, 1
  693. sb t1, 0(dst) # can't fault -- we're copy_from_user
  694. SLLV t1, t1, t2
  695. addu t2, SHIFT_INC
  696. ADDC(sum, t1)
  697. .set reorder /* DADDI_WAR */
  698. ADD dst, dst, 1
  699. bne src, t0, 1b
  700. .set noreorder
  701. .Ll_exc\@:
  702. LOADK t0, TI_TASK($28)
  703. nop
  704. LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
  705. nop
  706. SUB len, AT, t0 # len number of uncopied bytes
  707. /*
  708. * Here's where we rely on src and dst being incremented in tandem,
  709. * See (3) above.
  710. * dst += (fault addr - src) to put dst at first byte to clear
  711. */
  712. ADD dst, t0 # compute start address in a1
  713. SUB dst, src
  714. /*
  715. * Clear len bytes starting at dst. Can't call __bzero because it
  716. * might modify len. An inefficient loop for these rare times...
  717. */
  718. .set reorder /* DADDI_WAR */
  719. SUB src, len, 1
  720. beqz len, .Ldone\@
  721. .set noreorder
  722. 1: sb zero, 0(dst)
  723. ADD dst, dst, 1
  724. .set push
  725. .set noat
  726. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  727. bnez src, 1b
  728. SUB src, src, 1
  729. #else
  730. li v1, 1
  731. bnez src, 1b
  732. SUB src, src, v1
  733. #endif
  734. li v1, -EFAULT
  735. b .Ldone\@
  736. sw v1, (errptr)
  737. .Ls_exc\@:
  738. li v0, -1 /* invalid checksum */
  739. li v1, -EFAULT
  740. jr ra
  741. sw v1, (errptr)
  742. .set pop
  743. .endm
  744. LEAF(__csum_partial_copy_kernel)
  745. #ifndef CONFIG_EVA
  746. FEXPORT(__csum_partial_copy_to_user)
  747. FEXPORT(__csum_partial_copy_from_user)
  748. #endif
  749. __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
  750. END(__csum_partial_copy_kernel)
  751. #ifdef CONFIG_EVA
  752. LEAF(__csum_partial_copy_to_user)
  753. __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
  754. END(__csum_partial_copy_to_user)
  755. LEAF(__csum_partial_copy_from_user)
  756. __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
  757. END(__csum_partial_copy_from_user)
  758. #endif