aes-neon.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
  3. *
  4. * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. #define AES_ENTRY(func) ENTRY(neon_ ## func)
  12. #define AES_ENDPROC(func) ENDPROC(neon_ ## func)
  13. /* multiply by polynomial 'x' in GF(2^8) */
  14. .macro mul_by_x, out, in, temp, const
  15. sshr \temp, \in, #7
  16. add \out, \in, \in
  17. and \temp, \temp, \const
  18. eor \out, \out, \temp
  19. .endm
  20. /* preload the entire Sbox */
  21. .macro prepare, sbox, shiftrows, temp
  22. adr \temp, \sbox
  23. movi v12.16b, #0x40
  24. ldr q13, \shiftrows
  25. movi v14.16b, #0x1b
  26. ld1 {v16.16b-v19.16b}, [\temp], #64
  27. ld1 {v20.16b-v23.16b}, [\temp], #64
  28. ld1 {v24.16b-v27.16b}, [\temp], #64
  29. ld1 {v28.16b-v31.16b}, [\temp]
  30. .endm
  31. /* do preload for encryption */
  32. .macro enc_prepare, ignore0, ignore1, temp
  33. prepare .LForward_Sbox, .LForward_ShiftRows, \temp
  34. .endm
  35. .macro enc_switch_key, ignore0, ignore1, temp
  36. /* do nothing */
  37. .endm
  38. /* do preload for decryption */
  39. .macro dec_prepare, ignore0, ignore1, temp
  40. prepare .LReverse_Sbox, .LReverse_ShiftRows, \temp
  41. .endm
  42. /* apply SubBytes transformation using the the preloaded Sbox */
  43. .macro sub_bytes, in
  44. sub v9.16b, \in\().16b, v12.16b
  45. tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b
  46. sub v10.16b, v9.16b, v12.16b
  47. tbx \in\().16b, {v20.16b-v23.16b}, v9.16b
  48. sub v11.16b, v10.16b, v12.16b
  49. tbx \in\().16b, {v24.16b-v27.16b}, v10.16b
  50. tbx \in\().16b, {v28.16b-v31.16b}, v11.16b
  51. .endm
  52. /* apply MixColumns transformation */
  53. .macro mix_columns, in
  54. mul_by_x v10.16b, \in\().16b, v9.16b, v14.16b
  55. rev32 v8.8h, \in\().8h
  56. eor \in\().16b, v10.16b, \in\().16b
  57. shl v9.4s, v8.4s, #24
  58. shl v11.4s, \in\().4s, #24
  59. sri v9.4s, v8.4s, #8
  60. sri v11.4s, \in\().4s, #8
  61. eor v9.16b, v9.16b, v8.16b
  62. eor v10.16b, v10.16b, v9.16b
  63. eor \in\().16b, v10.16b, v11.16b
  64. .endm
  65. /* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
  66. .macro inv_mix_columns, in
  67. mul_by_x v11.16b, \in\().16b, v10.16b, v14.16b
  68. mul_by_x v11.16b, v11.16b, v10.16b, v14.16b
  69. eor \in\().16b, \in\().16b, v11.16b
  70. rev32 v11.8h, v11.8h
  71. eor \in\().16b, \in\().16b, v11.16b
  72. mix_columns \in
  73. .endm
  74. .macro do_block, enc, in, rounds, rk, rkp, i
  75. ld1 {v15.16b}, [\rk]
  76. add \rkp, \rk, #16
  77. mov \i, \rounds
  78. 1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
  79. tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
  80. sub_bytes \in
  81. ld1 {v15.16b}, [\rkp], #16
  82. subs \i, \i, #1
  83. beq 2222f
  84. .if \enc == 1
  85. mix_columns \in
  86. .else
  87. inv_mix_columns \in
  88. .endif
  89. b 1111b
  90. 2222: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
  91. .endm
  92. .macro encrypt_block, in, rounds, rk, rkp, i
  93. do_block 1, \in, \rounds, \rk, \rkp, \i
  94. .endm
  95. .macro decrypt_block, in, rounds, rk, rkp, i
  96. do_block 0, \in, \rounds, \rk, \rkp, \i
  97. .endm
  98. /*
  99. * Interleaved versions: functionally equivalent to the
  100. * ones above, but applied to 2 or 4 AES states in parallel.
  101. */
  102. .macro sub_bytes_2x, in0, in1
  103. sub v8.16b, \in0\().16b, v12.16b
  104. sub v9.16b, \in1\().16b, v12.16b
  105. tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
  106. tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
  107. sub v10.16b, v8.16b, v12.16b
  108. sub v11.16b, v9.16b, v12.16b
  109. tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
  110. tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
  111. sub v8.16b, v10.16b, v12.16b
  112. sub v9.16b, v11.16b, v12.16b
  113. tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b
  114. tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b
  115. tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
  116. tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
  117. .endm
  118. .macro sub_bytes_4x, in0, in1, in2, in3
  119. sub v8.16b, \in0\().16b, v12.16b
  120. tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
  121. sub v9.16b, \in1\().16b, v12.16b
  122. tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
  123. sub v10.16b, \in2\().16b, v12.16b
  124. tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b
  125. sub v11.16b, \in3\().16b, v12.16b
  126. tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b
  127. tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b
  128. tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b
  129. sub v8.16b, v8.16b, v12.16b
  130. tbx \in2\().16b, {v20.16b-v23.16b}, v10.16b
  131. sub v9.16b, v9.16b, v12.16b
  132. tbx \in3\().16b, {v20.16b-v23.16b}, v11.16b
  133. sub v10.16b, v10.16b, v12.16b
  134. tbx \in0\().16b, {v24.16b-v27.16b}, v8.16b
  135. sub v11.16b, v11.16b, v12.16b
  136. tbx \in1\().16b, {v24.16b-v27.16b}, v9.16b
  137. sub v8.16b, v8.16b, v12.16b
  138. tbx \in2\().16b, {v24.16b-v27.16b}, v10.16b
  139. sub v9.16b, v9.16b, v12.16b
  140. tbx \in3\().16b, {v24.16b-v27.16b}, v11.16b
  141. sub v10.16b, v10.16b, v12.16b
  142. tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b
  143. sub v11.16b, v11.16b, v12.16b
  144. tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b
  145. tbx \in2\().16b, {v28.16b-v31.16b}, v10.16b
  146. tbx \in3\().16b, {v28.16b-v31.16b}, v11.16b
  147. .endm
  148. .macro mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
  149. sshr \tmp0\().16b, \in0\().16b, #7
  150. add \out0\().16b, \in0\().16b, \in0\().16b
  151. sshr \tmp1\().16b, \in1\().16b, #7
  152. and \tmp0\().16b, \tmp0\().16b, \const\().16b
  153. add \out1\().16b, \in1\().16b, \in1\().16b
  154. and \tmp1\().16b, \tmp1\().16b, \const\().16b
  155. eor \out0\().16b, \out0\().16b, \tmp0\().16b
  156. eor \out1\().16b, \out1\().16b, \tmp1\().16b
  157. .endm
  158. .macro mix_columns_2x, in0, in1
  159. mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
  160. rev32 v10.8h, \in0\().8h
  161. rev32 v11.8h, \in1\().8h
  162. eor \in0\().16b, v8.16b, \in0\().16b
  163. eor \in1\().16b, v9.16b, \in1\().16b
  164. shl v12.4s, v10.4s, #24
  165. shl v13.4s, v11.4s, #24
  166. eor v8.16b, v8.16b, v10.16b
  167. sri v12.4s, v10.4s, #8
  168. shl v10.4s, \in0\().4s, #24
  169. eor v9.16b, v9.16b, v11.16b
  170. sri v13.4s, v11.4s, #8
  171. shl v11.4s, \in1\().4s, #24
  172. sri v10.4s, \in0\().4s, #8
  173. eor \in0\().16b, v8.16b, v12.16b
  174. sri v11.4s, \in1\().4s, #8
  175. eor \in1\().16b, v9.16b, v13.16b
  176. eor \in0\().16b, v10.16b, \in0\().16b
  177. eor \in1\().16b, v11.16b, \in1\().16b
  178. .endm
  179. .macro inv_mix_cols_2x, in0, in1
  180. mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
  181. mul_by_x_2x v8, v9, v8, v9, v10, v11, v14
  182. eor \in0\().16b, \in0\().16b, v8.16b
  183. eor \in1\().16b, \in1\().16b, v9.16b
  184. rev32 v8.8h, v8.8h
  185. rev32 v9.8h, v9.8h
  186. eor \in0\().16b, \in0\().16b, v8.16b
  187. eor \in1\().16b, \in1\().16b, v9.16b
  188. mix_columns_2x \in0, \in1
  189. .endm
  190. .macro inv_mix_cols_4x, in0, in1, in2, in3
  191. mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v14
  192. mul_by_x_2x v10, v11, \in2, \in3, v12, v13, v14
  193. mul_by_x_2x v8, v9, v8, v9, v12, v13, v14
  194. mul_by_x_2x v10, v11, v10, v11, v12, v13, v14
  195. eor \in0\().16b, \in0\().16b, v8.16b
  196. eor \in1\().16b, \in1\().16b, v9.16b
  197. eor \in2\().16b, \in2\().16b, v10.16b
  198. eor \in3\().16b, \in3\().16b, v11.16b
  199. rev32 v8.8h, v8.8h
  200. rev32 v9.8h, v9.8h
  201. rev32 v10.8h, v10.8h
  202. rev32 v11.8h, v11.8h
  203. eor \in0\().16b, \in0\().16b, v8.16b
  204. eor \in1\().16b, \in1\().16b, v9.16b
  205. eor \in2\().16b, \in2\().16b, v10.16b
  206. eor \in3\().16b, \in3\().16b, v11.16b
  207. mix_columns_2x \in0, \in1
  208. mix_columns_2x \in2, \in3
  209. .endm
  210. .macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
  211. ld1 {v15.16b}, [\rk]
  212. add \rkp, \rk, #16
  213. mov \i, \rounds
  214. 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  215. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  216. sub_bytes_2x \in0, \in1
  217. tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
  218. tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
  219. ld1 {v15.16b}, [\rkp], #16
  220. subs \i, \i, #1
  221. beq 2222f
  222. .if \enc == 1
  223. mix_columns_2x \in0, \in1
  224. ldr q13, .LForward_ShiftRows
  225. .else
  226. inv_mix_cols_2x \in0, \in1
  227. ldr q13, .LReverse_ShiftRows
  228. .endif
  229. movi v12.16b, #0x40
  230. b 1111b
  231. 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  232. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  233. .endm
  234. .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
  235. ld1 {v15.16b}, [\rk]
  236. add \rkp, \rk, #16
  237. mov \i, \rounds
  238. 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  239. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  240. eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
  241. eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
  242. sub_bytes_4x \in0, \in1, \in2, \in3
  243. tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
  244. tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
  245. tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
  246. tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
  247. ld1 {v15.16b}, [\rkp], #16
  248. subs \i, \i, #1
  249. beq 2222f
  250. .if \enc == 1
  251. mix_columns_2x \in0, \in1
  252. mix_columns_2x \in2, \in3
  253. ldr q13, .LForward_ShiftRows
  254. .else
  255. inv_mix_cols_4x \in0, \in1, \in2, \in3
  256. ldr q13, .LReverse_ShiftRows
  257. .endif
  258. movi v12.16b, #0x40
  259. b 1111b
  260. 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
  261. eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */
  262. eor \in2\().16b, \in2\().16b, v15.16b /* ^round key */
  263. eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */
  264. .endm
  265. .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i
  266. do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i
  267. .endm
  268. .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i
  269. do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i
  270. .endm
  271. .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
  272. do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
  273. .endm
  274. .macro decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
  275. do_block_4x 0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
  276. .endm
  277. #include "aes-modes.S"
  278. .text
  279. .align 4
  280. .LForward_ShiftRows:
  281. .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
  282. .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
  283. .LReverse_ShiftRows:
  284. .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
  285. .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
  286. .LForward_Sbox:
  287. .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
  288. .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
  289. .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
  290. .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
  291. .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
  292. .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
  293. .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
  294. .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
  295. .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
  296. .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
  297. .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
  298. .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
  299. .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
  300. .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
  301. .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
  302. .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
  303. .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
  304. .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
  305. .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
  306. .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
  307. .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
  308. .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
  309. .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
  310. .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
  311. .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
  312. .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
  313. .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
  314. .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
  315. .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
  316. .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
  317. .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
  318. .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
  319. .LReverse_Sbox:
  320. .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
  321. .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
  322. .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
  323. .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
  324. .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
  325. .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
  326. .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
  327. .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
  328. .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
  329. .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
  330. .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
  331. .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
  332. .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
  333. .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
  334. .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
  335. .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
  336. .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
  337. .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
  338. .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
  339. .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
  340. .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
  341. .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
  342. .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
  343. .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
  344. .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
  345. .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
  346. .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
  347. .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
  348. .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
  349. .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
  350. .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
  351. .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d