caamalg.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. SHA512_DIGEST_SIZE * 2)
  61. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  62. #define CAAM_MAX_IV_LENGTH 16
  63. /* length of descriptors text */
  64. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  65. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
  66. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
  67. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
  68. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  69. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  70. 20 * CAAM_CMD_SZ)
  71. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  72. 15 * CAAM_CMD_SZ)
  73. #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
  74. CAAM_MAX_KEY_SIZE)
  75. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  76. #ifdef DEBUG
  77. /* for print_hex_dumps with line references */
  78. #define debug(format, arg...) printk(format, arg)
  79. #else
  80. #define debug(format, arg...)
  81. #endif
  82. static struct list_head alg_list;
  83. /* Set DK bit in class 1 operation if shared */
  84. static inline void append_dec_op1(u32 *desc, u32 type)
  85. {
  86. u32 *jump_cmd, *uncond_jump_cmd;
  87. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  88. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  89. OP_ALG_DECRYPT);
  90. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  91. set_jump_tgt_here(desc, jump_cmd);
  92. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  93. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  94. set_jump_tgt_here(desc, uncond_jump_cmd);
  95. }
  96. /*
  97. * Wait for completion of class 1 key loading before allowing
  98. * error propagation
  99. */
  100. static inline void append_dec_shr_done(u32 *desc)
  101. {
  102. u32 *jump_cmd;
  103. jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
  104. set_jump_tgt_here(desc, jump_cmd);
  105. append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
  106. }
  107. /*
  108. * For aead functions, read payload and write payload,
  109. * both of which are specified in req->src and req->dst
  110. */
  111. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  112. {
  113. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  114. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  115. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  116. }
  117. /*
  118. * For aead encrypt and decrypt, read iv for both classes
  119. */
  120. static inline void aead_append_ld_iv(u32 *desc, int ivsize)
  121. {
  122. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  123. LDST_CLASS_1_CCB | ivsize);
  124. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
  125. }
  126. /*
  127. * For ablkcipher encrypt and decrypt, read from req->src and
  128. * write to req->dst
  129. */
  130. static inline void ablkcipher_append_src_dst(u32 *desc)
  131. {
  132. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  133. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  134. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  135. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  136. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  137. }
  138. /*
  139. * If all data, including src (with assoc and iv) or dst (with iv only) are
  140. * contiguous
  141. */
  142. #define GIV_SRC_CONTIG 1
  143. #define GIV_DST_CONTIG (1 << 1)
  144. /*
  145. * per-session context
  146. */
  147. struct caam_ctx {
  148. struct device *jrdev;
  149. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  150. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  151. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  152. dma_addr_t sh_desc_enc_dma;
  153. dma_addr_t sh_desc_dec_dma;
  154. dma_addr_t sh_desc_givenc_dma;
  155. u32 class1_alg_type;
  156. u32 class2_alg_type;
  157. u32 alg_op;
  158. u8 key[CAAM_MAX_KEY_SIZE];
  159. dma_addr_t key_dma;
  160. unsigned int enckeylen;
  161. unsigned int split_key_len;
  162. unsigned int split_key_pad_len;
  163. unsigned int authsize;
  164. };
  165. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  166. int keys_fit_inline)
  167. {
  168. if (keys_fit_inline) {
  169. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  170. ctx->split_key_len, CLASS_2 |
  171. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  172. append_key_as_imm(desc, (void *)ctx->key +
  173. ctx->split_key_pad_len, ctx->enckeylen,
  174. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  175. } else {
  176. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  177. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  178. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  179. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  180. }
  181. }
  182. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  183. int keys_fit_inline)
  184. {
  185. u32 *key_jump_cmd;
  186. init_sh_desc(desc, HDR_SHARE_SERIAL);
  187. /* Skip if already shared */
  188. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  189. JUMP_COND_SHRD);
  190. append_key_aead(desc, ctx, keys_fit_inline);
  191. set_jump_tgt_here(desc, key_jump_cmd);
  192. /* Propagate errors from shared to job descriptor */
  193. append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
  194. }
  195. static int aead_set_sh_desc(struct crypto_aead *aead)
  196. {
  197. struct aead_tfm *tfm = &aead->base.crt_aead;
  198. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  199. struct device *jrdev = ctx->jrdev;
  200. bool keys_fit_inline = false;
  201. u32 *key_jump_cmd, *jump_cmd;
  202. u32 geniv, moveiv;
  203. u32 *desc;
  204. if (!ctx->enckeylen || !ctx->authsize)
  205. return 0;
  206. /*
  207. * Job Descriptor and Shared Descriptors
  208. * must all fit into the 64-word Descriptor h/w Buffer
  209. */
  210. if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
  211. ctx->split_key_pad_len + ctx->enckeylen <=
  212. CAAM_DESC_BYTES_MAX)
  213. keys_fit_inline = true;
  214. /* aead_encrypt shared descriptor */
  215. desc = ctx->sh_desc_enc;
  216. init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
  217. /* Class 2 operation */
  218. append_operation(desc, ctx->class2_alg_type |
  219. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  220. /* cryptlen = seqoutlen - authsize */
  221. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  222. /* assoclen + cryptlen = seqinlen - ivsize */
  223. append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
  224. /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
  225. append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
  226. /* read assoc before reading payload */
  227. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  228. KEY_VLF);
  229. aead_append_ld_iv(desc, tfm->ivsize);
  230. /* Class 1 operation */
  231. append_operation(desc, ctx->class1_alg_type |
  232. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  233. /* Read and write cryptlen bytes */
  234. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  235. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  236. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  237. /* Write ICV */
  238. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  239. LDST_SRCDST_BYTE_CONTEXT);
  240. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  241. desc_bytes(desc),
  242. DMA_TO_DEVICE);
  243. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  244. dev_err(jrdev, "unable to map shared descriptor\n");
  245. return -ENOMEM;
  246. }
  247. #ifdef DEBUG
  248. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  249. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  250. desc_bytes(desc), 1);
  251. #endif
  252. /*
  253. * Job Descriptor and Shared Descriptors
  254. * must all fit into the 64-word Descriptor h/w Buffer
  255. */
  256. if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
  257. ctx->split_key_pad_len + ctx->enckeylen <=
  258. CAAM_DESC_BYTES_MAX)
  259. keys_fit_inline = true;
  260. desc = ctx->sh_desc_dec;
  261. /* aead_decrypt shared descriptor */
  262. init_sh_desc(desc, HDR_SHARE_SERIAL);
  263. /* Skip if already shared */
  264. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  265. JUMP_COND_SHRD);
  266. append_key_aead(desc, ctx, keys_fit_inline);
  267. /* Only propagate error immediately if shared */
  268. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  269. set_jump_tgt_here(desc, key_jump_cmd);
  270. append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
  271. set_jump_tgt_here(desc, jump_cmd);
  272. /* Class 2 operation */
  273. append_operation(desc, ctx->class2_alg_type |
  274. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  275. /* assoclen + cryptlen = seqinlen - ivsize */
  276. append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
  277. ctx->authsize + tfm->ivsize)
  278. /* assoclen = (assoclen + cryptlen) - cryptlen */
  279. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  280. append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
  281. /* read assoc before reading payload */
  282. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  283. KEY_VLF);
  284. aead_append_ld_iv(desc, tfm->ivsize);
  285. append_dec_op1(desc, ctx->class1_alg_type);
  286. /* Read and write cryptlen bytes */
  287. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  288. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  289. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  290. /* Load ICV */
  291. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  292. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  293. append_dec_shr_done(desc);
  294. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  295. desc_bytes(desc),
  296. DMA_TO_DEVICE);
  297. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  298. dev_err(jrdev, "unable to map shared descriptor\n");
  299. return -ENOMEM;
  300. }
  301. #ifdef DEBUG
  302. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  303. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  304. desc_bytes(desc), 1);
  305. #endif
  306. /*
  307. * Job Descriptor and Shared Descriptors
  308. * must all fit into the 64-word Descriptor h/w Buffer
  309. */
  310. if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
  311. ctx->split_key_pad_len + ctx->enckeylen <=
  312. CAAM_DESC_BYTES_MAX)
  313. keys_fit_inline = true;
  314. /* aead_givencrypt shared descriptor */
  315. desc = ctx->sh_desc_givenc;
  316. init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
  317. /* Generate IV */
  318. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  319. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  320. NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
  321. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  322. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  323. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  324. append_move(desc, MOVE_SRC_INFIFO |
  325. MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
  326. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  327. /* Copy IV to class 1 context */
  328. append_move(desc, MOVE_SRC_CLASS1CTX |
  329. MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
  330. /* Return to encryption */
  331. append_operation(desc, ctx->class2_alg_type |
  332. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  333. /* ivsize + cryptlen = seqoutlen - authsize */
  334. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  335. /* assoclen = seqinlen - (ivsize + cryptlen) */
  336. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  337. /* read assoc before reading payload */
  338. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  339. KEY_VLF);
  340. /* Copy iv from class 1 ctx to class 2 fifo*/
  341. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  342. NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
  343. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  344. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  345. append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
  346. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  347. /* Class 1 operation */
  348. append_operation(desc, ctx->class1_alg_type |
  349. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  350. /* Will write ivsize + cryptlen */
  351. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  352. /* Not need to reload iv */
  353. append_seq_fifo_load(desc, tfm->ivsize,
  354. FIFOLD_CLASS_SKIP);
  355. /* Will read cryptlen */
  356. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  357. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  358. /* Write ICV */
  359. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  360. LDST_SRCDST_BYTE_CONTEXT);
  361. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  362. desc_bytes(desc),
  363. DMA_TO_DEVICE);
  364. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  365. dev_err(jrdev, "unable to map shared descriptor\n");
  366. return -ENOMEM;
  367. }
  368. #ifdef DEBUG
  369. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  370. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  371. desc_bytes(desc), 1);
  372. #endif
  373. return 0;
  374. }
  375. static int aead_setauthsize(struct crypto_aead *authenc,
  376. unsigned int authsize)
  377. {
  378. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  379. ctx->authsize = authsize;
  380. aead_set_sh_desc(authenc);
  381. return 0;
  382. }
  383. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  384. u32 authkeylen)
  385. {
  386. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  387. ctx->split_key_pad_len, key_in, authkeylen,
  388. ctx->alg_op);
  389. }
  390. static int aead_setkey(struct crypto_aead *aead,
  391. const u8 *key, unsigned int keylen)
  392. {
  393. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  394. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  395. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  396. struct device *jrdev = ctx->jrdev;
  397. struct crypto_authenc_keys keys;
  398. int ret = 0;
  399. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  400. goto badkey;
  401. /* Pick class 2 key length from algorithm submask */
  402. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  403. OP_ALG_ALGSEL_SHIFT] * 2;
  404. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  405. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  406. goto badkey;
  407. #ifdef DEBUG
  408. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  409. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  410. keys.authkeylen);
  411. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  412. ctx->split_key_len, ctx->split_key_pad_len);
  413. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  414. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  415. #endif
  416. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  417. if (ret) {
  418. goto badkey;
  419. }
  420. /* postpend encryption key to auth split key */
  421. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  422. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  423. keys.enckeylen, DMA_TO_DEVICE);
  424. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  425. dev_err(jrdev, "unable to map key i/o memory\n");
  426. return -ENOMEM;
  427. }
  428. #ifdef DEBUG
  429. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  430. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  431. ctx->split_key_pad_len + keys.enckeylen, 1);
  432. #endif
  433. ctx->enckeylen = keys.enckeylen;
  434. ret = aead_set_sh_desc(aead);
  435. if (ret) {
  436. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  437. keys.enckeylen, DMA_TO_DEVICE);
  438. }
  439. return ret;
  440. badkey:
  441. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  442. return -EINVAL;
  443. }
  444. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  445. const u8 *key, unsigned int keylen)
  446. {
  447. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  448. struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
  449. struct device *jrdev = ctx->jrdev;
  450. int ret = 0;
  451. u32 *key_jump_cmd, *jump_cmd;
  452. u32 *desc;
  453. #ifdef DEBUG
  454. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  455. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  456. #endif
  457. memcpy(ctx->key, key, keylen);
  458. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  459. DMA_TO_DEVICE);
  460. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  461. dev_err(jrdev, "unable to map key i/o memory\n");
  462. return -ENOMEM;
  463. }
  464. ctx->enckeylen = keylen;
  465. /* ablkcipher_encrypt shared descriptor */
  466. desc = ctx->sh_desc_enc;
  467. init_sh_desc(desc, HDR_SHARE_SERIAL);
  468. /* Skip if already shared */
  469. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  470. JUMP_COND_SHRD);
  471. /* Load class1 key only */
  472. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  473. ctx->enckeylen, CLASS_1 |
  474. KEY_DEST_CLASS_REG);
  475. set_jump_tgt_here(desc, key_jump_cmd);
  476. /* Propagate errors from shared to job descriptor */
  477. append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
  478. /* Load iv */
  479. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  480. LDST_CLASS_1_CCB | tfm->ivsize);
  481. /* Load operation */
  482. append_operation(desc, ctx->class1_alg_type |
  483. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  484. /* Perform operation */
  485. ablkcipher_append_src_dst(desc);
  486. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  487. desc_bytes(desc),
  488. DMA_TO_DEVICE);
  489. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  490. dev_err(jrdev, "unable to map shared descriptor\n");
  491. return -ENOMEM;
  492. }
  493. #ifdef DEBUG
  494. print_hex_dump(KERN_ERR,
  495. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  496. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  497. desc_bytes(desc), 1);
  498. #endif
  499. /* ablkcipher_decrypt shared descriptor */
  500. desc = ctx->sh_desc_dec;
  501. init_sh_desc(desc, HDR_SHARE_SERIAL);
  502. /* Skip if already shared */
  503. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  504. JUMP_COND_SHRD);
  505. /* Load class1 key only */
  506. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  507. ctx->enckeylen, CLASS_1 |
  508. KEY_DEST_CLASS_REG);
  509. /* For aead, only propagate error immediately if shared */
  510. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  511. set_jump_tgt_here(desc, key_jump_cmd);
  512. append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
  513. set_jump_tgt_here(desc, jump_cmd);
  514. /* load IV */
  515. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  516. LDST_CLASS_1_CCB | tfm->ivsize);
  517. /* Choose operation */
  518. append_dec_op1(desc, ctx->class1_alg_type);
  519. /* Perform operation */
  520. ablkcipher_append_src_dst(desc);
  521. /* Wait for key to load before allowing propagating error */
  522. append_dec_shr_done(desc);
  523. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  524. desc_bytes(desc),
  525. DMA_TO_DEVICE);
  526. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  527. dev_err(jrdev, "unable to map shared descriptor\n");
  528. return -ENOMEM;
  529. }
  530. #ifdef DEBUG
  531. print_hex_dump(KERN_ERR,
  532. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  533. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  534. desc_bytes(desc), 1);
  535. #endif
  536. return ret;
  537. }
  538. /*
  539. * aead_edesc - s/w-extended aead descriptor
  540. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  541. * @assoc_chained: if source is chained
  542. * @src_nents: number of segments in input scatterlist
  543. * @src_chained: if source is chained
  544. * @dst_nents: number of segments in output scatterlist
  545. * @dst_chained: if destination is chained
  546. * @iv_dma: dma address of iv for checking continuity and link table
  547. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  548. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  549. * @sec4_sg_dma: bus physical mapped address of h/w link table
  550. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  551. */
  552. struct aead_edesc {
  553. int assoc_nents;
  554. bool assoc_chained;
  555. int src_nents;
  556. bool src_chained;
  557. int dst_nents;
  558. bool dst_chained;
  559. dma_addr_t iv_dma;
  560. int sec4_sg_bytes;
  561. dma_addr_t sec4_sg_dma;
  562. struct sec4_sg_entry *sec4_sg;
  563. u32 hw_desc[0];
  564. };
  565. /*
  566. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  567. * @src_nents: number of segments in input scatterlist
  568. * @src_chained: if source is chained
  569. * @dst_nents: number of segments in output scatterlist
  570. * @dst_chained: if destination is chained
  571. * @iv_dma: dma address of iv for checking continuity and link table
  572. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  573. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  574. * @sec4_sg_dma: bus physical mapped address of h/w link table
  575. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  576. */
  577. struct ablkcipher_edesc {
  578. int src_nents;
  579. bool src_chained;
  580. int dst_nents;
  581. bool dst_chained;
  582. dma_addr_t iv_dma;
  583. int sec4_sg_bytes;
  584. dma_addr_t sec4_sg_dma;
  585. struct sec4_sg_entry *sec4_sg;
  586. u32 hw_desc[0];
  587. };
  588. static void caam_unmap(struct device *dev, struct scatterlist *src,
  589. struct scatterlist *dst, int src_nents,
  590. bool src_chained, int dst_nents, bool dst_chained,
  591. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  592. int sec4_sg_bytes)
  593. {
  594. if (dst != src) {
  595. dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
  596. src_chained);
  597. dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
  598. dst_chained);
  599. } else {
  600. dma_unmap_sg_chained(dev, src, src_nents ? : 1,
  601. DMA_BIDIRECTIONAL, src_chained);
  602. }
  603. if (iv_dma)
  604. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  605. if (sec4_sg_bytes)
  606. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  607. DMA_TO_DEVICE);
  608. }
  609. static void aead_unmap(struct device *dev,
  610. struct aead_edesc *edesc,
  611. struct aead_request *req)
  612. {
  613. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  614. int ivsize = crypto_aead_ivsize(aead);
  615. dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
  616. DMA_TO_DEVICE, edesc->assoc_chained);
  617. caam_unmap(dev, req->src, req->dst,
  618. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  619. edesc->dst_chained, edesc->iv_dma, ivsize,
  620. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  621. }
  622. static void ablkcipher_unmap(struct device *dev,
  623. struct ablkcipher_edesc *edesc,
  624. struct ablkcipher_request *req)
  625. {
  626. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  627. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  628. caam_unmap(dev, req->src, req->dst,
  629. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  630. edesc->dst_chained, edesc->iv_dma, ivsize,
  631. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  632. }
  633. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  634. void *context)
  635. {
  636. struct aead_request *req = context;
  637. struct aead_edesc *edesc;
  638. #ifdef DEBUG
  639. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  640. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  641. int ivsize = crypto_aead_ivsize(aead);
  642. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  643. #endif
  644. edesc = (struct aead_edesc *)((char *)desc -
  645. offsetof(struct aead_edesc, hw_desc));
  646. if (err) {
  647. char tmp[CAAM_ERROR_STR_MAX];
  648. dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
  649. }
  650. aead_unmap(jrdev, edesc, req);
  651. #ifdef DEBUG
  652. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  653. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  654. req->assoclen , 1);
  655. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  656. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
  657. edesc->src_nents ? 100 : ivsize, 1);
  658. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  659. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  660. edesc->src_nents ? 100 : req->cryptlen +
  661. ctx->authsize + 4, 1);
  662. #endif
  663. kfree(edesc);
  664. aead_request_complete(req, err);
  665. }
  666. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  667. void *context)
  668. {
  669. struct aead_request *req = context;
  670. struct aead_edesc *edesc;
  671. #ifdef DEBUG
  672. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  673. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  674. int ivsize = crypto_aead_ivsize(aead);
  675. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  676. #endif
  677. edesc = (struct aead_edesc *)((char *)desc -
  678. offsetof(struct aead_edesc, hw_desc));
  679. #ifdef DEBUG
  680. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  681. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  682. ivsize, 1);
  683. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  684. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
  685. req->cryptlen - ctx->authsize, 1);
  686. #endif
  687. if (err) {
  688. char tmp[CAAM_ERROR_STR_MAX];
  689. dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
  690. }
  691. aead_unmap(jrdev, edesc, req);
  692. /*
  693. * verify hw auth check passed else return -EBADMSG
  694. */
  695. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  696. err = -EBADMSG;
  697. #ifdef DEBUG
  698. print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
  699. DUMP_PREFIX_ADDRESS, 16, 4,
  700. ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
  701. sizeof(struct iphdr) + req->assoclen +
  702. ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
  703. ctx->authsize + 36, 1);
  704. if (!err && edesc->sec4_sg_bytes) {
  705. struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
  706. print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
  707. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
  708. sg->length + ctx->authsize + 16, 1);
  709. }
  710. #endif
  711. kfree(edesc);
  712. aead_request_complete(req, err);
  713. }
  714. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  715. void *context)
  716. {
  717. struct ablkcipher_request *req = context;
  718. struct ablkcipher_edesc *edesc;
  719. #ifdef DEBUG
  720. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  721. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  722. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  723. #endif
  724. edesc = (struct ablkcipher_edesc *)((char *)desc -
  725. offsetof(struct ablkcipher_edesc, hw_desc));
  726. if (err) {
  727. char tmp[CAAM_ERROR_STR_MAX];
  728. dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
  729. }
  730. #ifdef DEBUG
  731. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  732. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  733. edesc->src_nents > 1 ? 100 : ivsize, 1);
  734. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  735. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  736. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  737. #endif
  738. ablkcipher_unmap(jrdev, edesc, req);
  739. kfree(edesc);
  740. ablkcipher_request_complete(req, err);
  741. }
  742. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  743. void *context)
  744. {
  745. struct ablkcipher_request *req = context;
  746. struct ablkcipher_edesc *edesc;
  747. #ifdef DEBUG
  748. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  749. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  750. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  751. #endif
  752. edesc = (struct ablkcipher_edesc *)((char *)desc -
  753. offsetof(struct ablkcipher_edesc, hw_desc));
  754. if (err) {
  755. char tmp[CAAM_ERROR_STR_MAX];
  756. dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
  757. }
  758. #ifdef DEBUG
  759. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  760. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  761. ivsize, 1);
  762. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  763. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  764. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  765. #endif
  766. ablkcipher_unmap(jrdev, edesc, req);
  767. kfree(edesc);
  768. ablkcipher_request_complete(req, err);
  769. }
  770. /*
  771. * Fill in aead job descriptor
  772. */
  773. static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
  774. struct aead_edesc *edesc,
  775. struct aead_request *req,
  776. bool all_contig, bool encrypt)
  777. {
  778. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  779. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  780. int ivsize = crypto_aead_ivsize(aead);
  781. int authsize = ctx->authsize;
  782. u32 *desc = edesc->hw_desc;
  783. u32 out_options = 0, in_options;
  784. dma_addr_t dst_dma, src_dma;
  785. int len, sec4_sg_index = 0;
  786. #ifdef DEBUG
  787. debug("assoclen %d cryptlen %d authsize %d\n",
  788. req->assoclen, req->cryptlen, authsize);
  789. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  790. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  791. req->assoclen , 1);
  792. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  793. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  794. edesc->src_nents ? 100 : ivsize, 1);
  795. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  796. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  797. edesc->src_nents ? 100 : req->cryptlen, 1);
  798. print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
  799. DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
  800. desc_bytes(sh_desc), 1);
  801. #endif
  802. len = desc_len(sh_desc);
  803. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  804. if (all_contig) {
  805. src_dma = sg_dma_address(req->assoc);
  806. in_options = 0;
  807. } else {
  808. src_dma = edesc->sec4_sg_dma;
  809. sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
  810. (edesc->src_nents ? : 1);
  811. in_options = LDST_SGF;
  812. }
  813. append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
  814. in_options);
  815. if (likely(req->src == req->dst)) {
  816. if (all_contig) {
  817. dst_dma = sg_dma_address(req->src);
  818. } else {
  819. dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
  820. ((edesc->assoc_nents ? : 1) + 1);
  821. out_options = LDST_SGF;
  822. }
  823. } else {
  824. if (!edesc->dst_nents) {
  825. dst_dma = sg_dma_address(req->dst);
  826. } else {
  827. dst_dma = edesc->sec4_sg_dma +
  828. sec4_sg_index *
  829. sizeof(struct sec4_sg_entry);
  830. out_options = LDST_SGF;
  831. }
  832. }
  833. if (encrypt)
  834. append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
  835. out_options);
  836. else
  837. append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
  838. out_options);
  839. }
  840. /*
  841. * Fill in aead givencrypt job descriptor
  842. */
  843. static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
  844. struct aead_edesc *edesc,
  845. struct aead_request *req,
  846. int contig)
  847. {
  848. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  849. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  850. int ivsize = crypto_aead_ivsize(aead);
  851. int authsize = ctx->authsize;
  852. u32 *desc = edesc->hw_desc;
  853. u32 out_options = 0, in_options;
  854. dma_addr_t dst_dma, src_dma;
  855. int len, sec4_sg_index = 0;
  856. #ifdef DEBUG
  857. debug("assoclen %d cryptlen %d authsize %d\n",
  858. req->assoclen, req->cryptlen, authsize);
  859. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  860. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  861. req->assoclen , 1);
  862. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  863. DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
  864. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  865. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  866. edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
  867. print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
  868. DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
  869. desc_bytes(sh_desc), 1);
  870. #endif
  871. len = desc_len(sh_desc);
  872. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  873. if (contig & GIV_SRC_CONTIG) {
  874. src_dma = sg_dma_address(req->assoc);
  875. in_options = 0;
  876. } else {
  877. src_dma = edesc->sec4_sg_dma;
  878. sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
  879. in_options = LDST_SGF;
  880. }
  881. append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
  882. in_options);
  883. if (contig & GIV_DST_CONTIG) {
  884. dst_dma = edesc->iv_dma;
  885. } else {
  886. if (likely(req->src == req->dst)) {
  887. dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
  888. edesc->assoc_nents;
  889. out_options = LDST_SGF;
  890. } else {
  891. dst_dma = edesc->sec4_sg_dma +
  892. sec4_sg_index *
  893. sizeof(struct sec4_sg_entry);
  894. out_options = LDST_SGF;
  895. }
  896. }
  897. append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
  898. out_options);
  899. }
  900. /*
  901. * Fill in ablkcipher job descriptor
  902. */
  903. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  904. struct ablkcipher_edesc *edesc,
  905. struct ablkcipher_request *req,
  906. bool iv_contig)
  907. {
  908. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  909. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  910. u32 *desc = edesc->hw_desc;
  911. u32 out_options = 0, in_options;
  912. dma_addr_t dst_dma, src_dma;
  913. int len, sec4_sg_index = 0;
  914. #ifdef DEBUG
  915. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  916. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  917. ivsize, 1);
  918. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  919. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  920. edesc->src_nents ? 100 : req->nbytes, 1);
  921. #endif
  922. len = desc_len(sh_desc);
  923. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  924. if (iv_contig) {
  925. src_dma = edesc->iv_dma;
  926. in_options = 0;
  927. } else {
  928. src_dma = edesc->sec4_sg_dma;
  929. sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
  930. in_options = LDST_SGF;
  931. }
  932. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  933. if (likely(req->src == req->dst)) {
  934. if (!edesc->src_nents && iv_contig) {
  935. dst_dma = sg_dma_address(req->src);
  936. } else {
  937. dst_dma = edesc->sec4_sg_dma +
  938. sizeof(struct sec4_sg_entry);
  939. out_options = LDST_SGF;
  940. }
  941. } else {
  942. if (!edesc->dst_nents) {
  943. dst_dma = sg_dma_address(req->dst);
  944. } else {
  945. dst_dma = edesc->sec4_sg_dma +
  946. sec4_sg_index * sizeof(struct sec4_sg_entry);
  947. out_options = LDST_SGF;
  948. }
  949. }
  950. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  951. }
  952. /*
  953. * allocate and map the aead extended descriptor
  954. */
  955. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  956. int desc_bytes, bool *all_contig_ptr,
  957. bool encrypt)
  958. {
  959. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  960. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  961. struct device *jrdev = ctx->jrdev;
  962. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  963. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  964. int assoc_nents, src_nents, dst_nents = 0;
  965. struct aead_edesc *edesc;
  966. dma_addr_t iv_dma = 0;
  967. int sgc;
  968. bool all_contig = true;
  969. bool assoc_chained = false, src_chained = false, dst_chained = false;
  970. int ivsize = crypto_aead_ivsize(aead);
  971. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  972. unsigned int authsize = ctx->authsize;
  973. assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
  974. if (unlikely(req->dst != req->src)) {
  975. src_nents = sg_count(req->src, req->cryptlen, &src_chained);
  976. dst_nents = sg_count(req->dst,
  977. req->cryptlen +
  978. (encrypt ? authsize : (-authsize)),
  979. &dst_chained);
  980. } else {
  981. src_nents = sg_count(req->src,
  982. req->cryptlen +
  983. (encrypt ? authsize : 0),
  984. &src_chained);
  985. }
  986. sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
  987. DMA_TO_DEVICE, assoc_chained);
  988. if (likely(req->src == req->dst)) {
  989. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  990. DMA_BIDIRECTIONAL, src_chained);
  991. } else {
  992. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  993. DMA_TO_DEVICE, src_chained);
  994. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  995. DMA_FROM_DEVICE, dst_chained);
  996. }
  997. /* Check if data are contiguous */
  998. iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
  999. if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
  1000. iv_dma || src_nents || iv_dma + ivsize !=
  1001. sg_dma_address(req->src)) {
  1002. all_contig = false;
  1003. assoc_nents = assoc_nents ? : 1;
  1004. src_nents = src_nents ? : 1;
  1005. sec4_sg_len = assoc_nents + 1 + src_nents;
  1006. }
  1007. sec4_sg_len += dst_nents;
  1008. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1009. /* allocate space for base edesc and hw desc commands, link tables */
  1010. edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
  1011. sec4_sg_bytes, GFP_DMA | flags);
  1012. if (!edesc) {
  1013. dev_err(jrdev, "could not allocate extended descriptor\n");
  1014. return ERR_PTR(-ENOMEM);
  1015. }
  1016. edesc->assoc_nents = assoc_nents;
  1017. edesc->assoc_chained = assoc_chained;
  1018. edesc->src_nents = src_nents;
  1019. edesc->src_chained = src_chained;
  1020. edesc->dst_nents = dst_nents;
  1021. edesc->dst_chained = dst_chained;
  1022. edesc->iv_dma = iv_dma;
  1023. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1024. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1025. desc_bytes;
  1026. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1027. sec4_sg_bytes, DMA_TO_DEVICE);
  1028. *all_contig_ptr = all_contig;
  1029. sec4_sg_index = 0;
  1030. if (!all_contig) {
  1031. sg_to_sec4_sg(req->assoc,
  1032. (assoc_nents ? : 1),
  1033. edesc->sec4_sg +
  1034. sec4_sg_index, 0);
  1035. sec4_sg_index += assoc_nents ? : 1;
  1036. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  1037. iv_dma, ivsize, 0);
  1038. sec4_sg_index += 1;
  1039. sg_to_sec4_sg_last(req->src,
  1040. (src_nents ? : 1),
  1041. edesc->sec4_sg +
  1042. sec4_sg_index, 0);
  1043. sec4_sg_index += src_nents ? : 1;
  1044. }
  1045. if (dst_nents) {
  1046. sg_to_sec4_sg_last(req->dst, dst_nents,
  1047. edesc->sec4_sg + sec4_sg_index, 0);
  1048. }
  1049. return edesc;
  1050. }
  1051. static int aead_encrypt(struct aead_request *req)
  1052. {
  1053. struct aead_edesc *edesc;
  1054. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1055. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1056. struct device *jrdev = ctx->jrdev;
  1057. bool all_contig;
  1058. u32 *desc;
  1059. int ret = 0;
  1060. /* allocate extended descriptor */
  1061. edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
  1062. CAAM_CMD_SZ, &all_contig, true);
  1063. if (IS_ERR(edesc))
  1064. return PTR_ERR(edesc);
  1065. /* Create and submit job descriptor */
  1066. init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
  1067. all_contig, true);
  1068. #ifdef DEBUG
  1069. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1070. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1071. desc_bytes(edesc->hw_desc), 1);
  1072. #endif
  1073. desc = edesc->hw_desc;
  1074. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1075. if (!ret) {
  1076. ret = -EINPROGRESS;
  1077. } else {
  1078. aead_unmap(jrdev, edesc, req);
  1079. kfree(edesc);
  1080. }
  1081. return ret;
  1082. }
  1083. static int aead_decrypt(struct aead_request *req)
  1084. {
  1085. struct aead_edesc *edesc;
  1086. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1087. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1088. struct device *jrdev = ctx->jrdev;
  1089. bool all_contig;
  1090. u32 *desc;
  1091. int ret = 0;
  1092. /* allocate extended descriptor */
  1093. edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
  1094. CAAM_CMD_SZ, &all_contig, false);
  1095. if (IS_ERR(edesc))
  1096. return PTR_ERR(edesc);
  1097. #ifdef DEBUG
  1098. print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  1099. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1100. req->cryptlen, 1);
  1101. #endif
  1102. /* Create and submit job descriptor*/
  1103. init_aead_job(ctx->sh_desc_dec,
  1104. ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
  1105. #ifdef DEBUG
  1106. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1107. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1108. desc_bytes(edesc->hw_desc), 1);
  1109. #endif
  1110. desc = edesc->hw_desc;
  1111. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1112. if (!ret) {
  1113. ret = -EINPROGRESS;
  1114. } else {
  1115. aead_unmap(jrdev, edesc, req);
  1116. kfree(edesc);
  1117. }
  1118. return ret;
  1119. }
  1120. /*
  1121. * allocate and map the aead extended descriptor for aead givencrypt
  1122. */
  1123. static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
  1124. *greq, int desc_bytes,
  1125. u32 *contig_ptr)
  1126. {
  1127. struct aead_request *req = &greq->areq;
  1128. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1129. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1130. struct device *jrdev = ctx->jrdev;
  1131. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1132. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1133. int assoc_nents, src_nents, dst_nents = 0;
  1134. struct aead_edesc *edesc;
  1135. dma_addr_t iv_dma = 0;
  1136. int sgc;
  1137. u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
  1138. int ivsize = crypto_aead_ivsize(aead);
  1139. bool assoc_chained = false, src_chained = false, dst_chained = false;
  1140. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1141. assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
  1142. src_nents = sg_count(req->src, req->cryptlen, &src_chained);
  1143. if (unlikely(req->dst != req->src))
  1144. dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
  1145. &dst_chained);
  1146. sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
  1147. DMA_TO_DEVICE, assoc_chained);
  1148. if (likely(req->src == req->dst)) {
  1149. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1150. DMA_BIDIRECTIONAL, src_chained);
  1151. } else {
  1152. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1153. DMA_TO_DEVICE, src_chained);
  1154. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  1155. DMA_FROM_DEVICE, dst_chained);
  1156. }
  1157. /* Check if data are contiguous */
  1158. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  1159. if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
  1160. iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
  1161. contig &= ~GIV_SRC_CONTIG;
  1162. if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
  1163. contig &= ~GIV_DST_CONTIG;
  1164. if (unlikely(req->src != req->dst)) {
  1165. dst_nents = dst_nents ? : 1;
  1166. sec4_sg_len += 1;
  1167. }
  1168. if (!(contig & GIV_SRC_CONTIG)) {
  1169. assoc_nents = assoc_nents ? : 1;
  1170. src_nents = src_nents ? : 1;
  1171. sec4_sg_len += assoc_nents + 1 + src_nents;
  1172. if (likely(req->src == req->dst))
  1173. contig &= ~GIV_DST_CONTIG;
  1174. }
  1175. sec4_sg_len += dst_nents;
  1176. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1177. /* allocate space for base edesc and hw desc commands, link tables */
  1178. edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
  1179. sec4_sg_bytes, GFP_DMA | flags);
  1180. if (!edesc) {
  1181. dev_err(jrdev, "could not allocate extended descriptor\n");
  1182. return ERR_PTR(-ENOMEM);
  1183. }
  1184. edesc->assoc_nents = assoc_nents;
  1185. edesc->assoc_chained = assoc_chained;
  1186. edesc->src_nents = src_nents;
  1187. edesc->src_chained = src_chained;
  1188. edesc->dst_nents = dst_nents;
  1189. edesc->dst_chained = dst_chained;
  1190. edesc->iv_dma = iv_dma;
  1191. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1192. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1193. desc_bytes;
  1194. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1195. sec4_sg_bytes, DMA_TO_DEVICE);
  1196. *contig_ptr = contig;
  1197. sec4_sg_index = 0;
  1198. if (!(contig & GIV_SRC_CONTIG)) {
  1199. sg_to_sec4_sg(req->assoc, assoc_nents,
  1200. edesc->sec4_sg +
  1201. sec4_sg_index, 0);
  1202. sec4_sg_index += assoc_nents;
  1203. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  1204. iv_dma, ivsize, 0);
  1205. sec4_sg_index += 1;
  1206. sg_to_sec4_sg_last(req->src, src_nents,
  1207. edesc->sec4_sg +
  1208. sec4_sg_index, 0);
  1209. sec4_sg_index += src_nents;
  1210. }
  1211. if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
  1212. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  1213. iv_dma, ivsize, 0);
  1214. sec4_sg_index += 1;
  1215. sg_to_sec4_sg_last(req->dst, dst_nents,
  1216. edesc->sec4_sg + sec4_sg_index, 0);
  1217. }
  1218. return edesc;
  1219. }
  1220. static int aead_givencrypt(struct aead_givcrypt_request *areq)
  1221. {
  1222. struct aead_request *req = &areq->areq;
  1223. struct aead_edesc *edesc;
  1224. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1225. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1226. struct device *jrdev = ctx->jrdev;
  1227. u32 contig;
  1228. u32 *desc;
  1229. int ret = 0;
  1230. /* allocate extended descriptor */
  1231. edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
  1232. CAAM_CMD_SZ, &contig);
  1233. if (IS_ERR(edesc))
  1234. return PTR_ERR(edesc);
  1235. #ifdef DEBUG
  1236. print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
  1237. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1238. req->cryptlen, 1);
  1239. #endif
  1240. /* Create and submit job descriptor*/
  1241. init_aead_giv_job(ctx->sh_desc_givenc,
  1242. ctx->sh_desc_givenc_dma, edesc, req, contig);
  1243. #ifdef DEBUG
  1244. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1245. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1246. desc_bytes(edesc->hw_desc), 1);
  1247. #endif
  1248. desc = edesc->hw_desc;
  1249. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1250. if (!ret) {
  1251. ret = -EINPROGRESS;
  1252. } else {
  1253. aead_unmap(jrdev, edesc, req);
  1254. kfree(edesc);
  1255. }
  1256. return ret;
  1257. }
  1258. /*
  1259. * allocate and map the ablkcipher extended descriptor for ablkcipher
  1260. */
  1261. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  1262. *req, int desc_bytes,
  1263. bool *iv_contig_out)
  1264. {
  1265. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1266. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1267. struct device *jrdev = ctx->jrdev;
  1268. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1269. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  1270. GFP_KERNEL : GFP_ATOMIC;
  1271. int src_nents, dst_nents = 0, sec4_sg_bytes;
  1272. struct ablkcipher_edesc *edesc;
  1273. dma_addr_t iv_dma = 0;
  1274. bool iv_contig = false;
  1275. int sgc;
  1276. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1277. bool src_chained = false, dst_chained = false;
  1278. int sec4_sg_index;
  1279. src_nents = sg_count(req->src, req->nbytes, &src_chained);
  1280. if (req->dst != req->src)
  1281. dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
  1282. if (likely(req->src == req->dst)) {
  1283. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1284. DMA_BIDIRECTIONAL, src_chained);
  1285. } else {
  1286. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1287. DMA_TO_DEVICE, src_chained);
  1288. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  1289. DMA_FROM_DEVICE, dst_chained);
  1290. }
  1291. /*
  1292. * Check if iv can be contiguous with source and destination.
  1293. * If so, include it. If not, create scatterlist.
  1294. */
  1295. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  1296. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  1297. iv_contig = true;
  1298. else
  1299. src_nents = src_nents ? : 1;
  1300. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  1301. sizeof(struct sec4_sg_entry);
  1302. /* allocate space for base edesc and hw desc commands, link tables */
  1303. edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
  1304. sec4_sg_bytes, GFP_DMA | flags);
  1305. if (!edesc) {
  1306. dev_err(jrdev, "could not allocate extended descriptor\n");
  1307. return ERR_PTR(-ENOMEM);
  1308. }
  1309. edesc->src_nents = src_nents;
  1310. edesc->src_chained = src_chained;
  1311. edesc->dst_nents = dst_nents;
  1312. edesc->dst_chained = dst_chained;
  1313. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1314. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  1315. desc_bytes;
  1316. sec4_sg_index = 0;
  1317. if (!iv_contig) {
  1318. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  1319. sg_to_sec4_sg_last(req->src, src_nents,
  1320. edesc->sec4_sg + 1, 0);
  1321. sec4_sg_index += 1 + src_nents;
  1322. }
  1323. if (dst_nents) {
  1324. sg_to_sec4_sg_last(req->dst, dst_nents,
  1325. edesc->sec4_sg + sec4_sg_index, 0);
  1326. }
  1327. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1328. sec4_sg_bytes, DMA_TO_DEVICE);
  1329. edesc->iv_dma = iv_dma;
  1330. #ifdef DEBUG
  1331. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  1332. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  1333. sec4_sg_bytes, 1);
  1334. #endif
  1335. *iv_contig_out = iv_contig;
  1336. return edesc;
  1337. }
  1338. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  1339. {
  1340. struct ablkcipher_edesc *edesc;
  1341. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1342. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1343. struct device *jrdev = ctx->jrdev;
  1344. bool iv_contig;
  1345. u32 *desc;
  1346. int ret = 0;
  1347. /* allocate extended descriptor */
  1348. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  1349. CAAM_CMD_SZ, &iv_contig);
  1350. if (IS_ERR(edesc))
  1351. return PTR_ERR(edesc);
  1352. /* Create and submit job descriptor*/
  1353. init_ablkcipher_job(ctx->sh_desc_enc,
  1354. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  1355. #ifdef DEBUG
  1356. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1357. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1358. desc_bytes(edesc->hw_desc), 1);
  1359. #endif
  1360. desc = edesc->hw_desc;
  1361. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  1362. if (!ret) {
  1363. ret = -EINPROGRESS;
  1364. } else {
  1365. ablkcipher_unmap(jrdev, edesc, req);
  1366. kfree(edesc);
  1367. }
  1368. return ret;
  1369. }
  1370. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  1371. {
  1372. struct ablkcipher_edesc *edesc;
  1373. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1374. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1375. struct device *jrdev = ctx->jrdev;
  1376. bool iv_contig;
  1377. u32 *desc;
  1378. int ret = 0;
  1379. /* allocate extended descriptor */
  1380. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  1381. CAAM_CMD_SZ, &iv_contig);
  1382. if (IS_ERR(edesc))
  1383. return PTR_ERR(edesc);
  1384. /* Create and submit job descriptor*/
  1385. init_ablkcipher_job(ctx->sh_desc_dec,
  1386. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  1387. desc = edesc->hw_desc;
  1388. #ifdef DEBUG
  1389. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1390. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1391. desc_bytes(edesc->hw_desc), 1);
  1392. #endif
  1393. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  1394. if (!ret) {
  1395. ret = -EINPROGRESS;
  1396. } else {
  1397. ablkcipher_unmap(jrdev, edesc, req);
  1398. kfree(edesc);
  1399. }
  1400. return ret;
  1401. }
  1402. #define template_aead template_u.aead
  1403. #define template_ablkcipher template_u.ablkcipher
  1404. struct caam_alg_template {
  1405. char name[CRYPTO_MAX_ALG_NAME];
  1406. char driver_name[CRYPTO_MAX_ALG_NAME];
  1407. unsigned int blocksize;
  1408. u32 type;
  1409. union {
  1410. struct ablkcipher_alg ablkcipher;
  1411. struct aead_alg aead;
  1412. struct blkcipher_alg blkcipher;
  1413. struct cipher_alg cipher;
  1414. struct compress_alg compress;
  1415. struct rng_alg rng;
  1416. } template_u;
  1417. u32 class1_alg_type;
  1418. u32 class2_alg_type;
  1419. u32 alg_op;
  1420. };
  1421. static struct caam_alg_template driver_algs[] = {
  1422. /* single-pass ipsec_esp descriptor */
  1423. {
  1424. .name = "authenc(hmac(md5),cbc(aes))",
  1425. .driver_name = "authenc-hmac-md5-cbc-aes-caam",
  1426. .blocksize = AES_BLOCK_SIZE,
  1427. .type = CRYPTO_ALG_TYPE_AEAD,
  1428. .template_aead = {
  1429. .setkey = aead_setkey,
  1430. .setauthsize = aead_setauthsize,
  1431. .encrypt = aead_encrypt,
  1432. .decrypt = aead_decrypt,
  1433. .givencrypt = aead_givencrypt,
  1434. .geniv = "<built-in>",
  1435. .ivsize = AES_BLOCK_SIZE,
  1436. .maxauthsize = MD5_DIGEST_SIZE,
  1437. },
  1438. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1439. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1440. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1441. },
  1442. {
  1443. .name = "authenc(hmac(sha1),cbc(aes))",
  1444. .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
  1445. .blocksize = AES_BLOCK_SIZE,
  1446. .type = CRYPTO_ALG_TYPE_AEAD,
  1447. .template_aead = {
  1448. .setkey = aead_setkey,
  1449. .setauthsize = aead_setauthsize,
  1450. .encrypt = aead_encrypt,
  1451. .decrypt = aead_decrypt,
  1452. .givencrypt = aead_givencrypt,
  1453. .geniv = "<built-in>",
  1454. .ivsize = AES_BLOCK_SIZE,
  1455. .maxauthsize = SHA1_DIGEST_SIZE,
  1456. },
  1457. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1458. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1459. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1460. },
  1461. {
  1462. .name = "authenc(hmac(sha224),cbc(aes))",
  1463. .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
  1464. .blocksize = AES_BLOCK_SIZE,
  1465. .type = CRYPTO_ALG_TYPE_AEAD,
  1466. .template_aead = {
  1467. .setkey = aead_setkey,
  1468. .setauthsize = aead_setauthsize,
  1469. .encrypt = aead_encrypt,
  1470. .decrypt = aead_decrypt,
  1471. .givencrypt = aead_givencrypt,
  1472. .geniv = "<built-in>",
  1473. .ivsize = AES_BLOCK_SIZE,
  1474. .maxauthsize = SHA224_DIGEST_SIZE,
  1475. },
  1476. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1477. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1478. OP_ALG_AAI_HMAC_PRECOMP,
  1479. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1480. },
  1481. {
  1482. .name = "authenc(hmac(sha256),cbc(aes))",
  1483. .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
  1484. .blocksize = AES_BLOCK_SIZE,
  1485. .type = CRYPTO_ALG_TYPE_AEAD,
  1486. .template_aead = {
  1487. .setkey = aead_setkey,
  1488. .setauthsize = aead_setauthsize,
  1489. .encrypt = aead_encrypt,
  1490. .decrypt = aead_decrypt,
  1491. .givencrypt = aead_givencrypt,
  1492. .geniv = "<built-in>",
  1493. .ivsize = AES_BLOCK_SIZE,
  1494. .maxauthsize = SHA256_DIGEST_SIZE,
  1495. },
  1496. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1497. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1498. OP_ALG_AAI_HMAC_PRECOMP,
  1499. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  1500. },
  1501. {
  1502. .name = "authenc(hmac(sha384),cbc(aes))",
  1503. .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
  1504. .blocksize = AES_BLOCK_SIZE,
  1505. .type = CRYPTO_ALG_TYPE_AEAD,
  1506. .template_aead = {
  1507. .setkey = aead_setkey,
  1508. .setauthsize = aead_setauthsize,
  1509. .encrypt = aead_encrypt,
  1510. .decrypt = aead_decrypt,
  1511. .givencrypt = aead_givencrypt,
  1512. .geniv = "<built-in>",
  1513. .ivsize = AES_BLOCK_SIZE,
  1514. .maxauthsize = SHA384_DIGEST_SIZE,
  1515. },
  1516. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1517. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1518. OP_ALG_AAI_HMAC_PRECOMP,
  1519. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  1520. },
  1521. {
  1522. .name = "authenc(hmac(sha512),cbc(aes))",
  1523. .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
  1524. .blocksize = AES_BLOCK_SIZE,
  1525. .type = CRYPTO_ALG_TYPE_AEAD,
  1526. .template_aead = {
  1527. .setkey = aead_setkey,
  1528. .setauthsize = aead_setauthsize,
  1529. .encrypt = aead_encrypt,
  1530. .decrypt = aead_decrypt,
  1531. .givencrypt = aead_givencrypt,
  1532. .geniv = "<built-in>",
  1533. .ivsize = AES_BLOCK_SIZE,
  1534. .maxauthsize = SHA512_DIGEST_SIZE,
  1535. },
  1536. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1537. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1538. OP_ALG_AAI_HMAC_PRECOMP,
  1539. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  1540. },
  1541. {
  1542. .name = "authenc(hmac(md5),cbc(des3_ede))",
  1543. .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
  1544. .blocksize = DES3_EDE_BLOCK_SIZE,
  1545. .type = CRYPTO_ALG_TYPE_AEAD,
  1546. .template_aead = {
  1547. .setkey = aead_setkey,
  1548. .setauthsize = aead_setauthsize,
  1549. .encrypt = aead_encrypt,
  1550. .decrypt = aead_decrypt,
  1551. .givencrypt = aead_givencrypt,
  1552. .geniv = "<built-in>",
  1553. .ivsize = DES3_EDE_BLOCK_SIZE,
  1554. .maxauthsize = MD5_DIGEST_SIZE,
  1555. },
  1556. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1557. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1558. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1559. },
  1560. {
  1561. .name = "authenc(hmac(sha1),cbc(des3_ede))",
  1562. .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
  1563. .blocksize = DES3_EDE_BLOCK_SIZE,
  1564. .type = CRYPTO_ALG_TYPE_AEAD,
  1565. .template_aead = {
  1566. .setkey = aead_setkey,
  1567. .setauthsize = aead_setauthsize,
  1568. .encrypt = aead_encrypt,
  1569. .decrypt = aead_decrypt,
  1570. .givencrypt = aead_givencrypt,
  1571. .geniv = "<built-in>",
  1572. .ivsize = DES3_EDE_BLOCK_SIZE,
  1573. .maxauthsize = SHA1_DIGEST_SIZE,
  1574. },
  1575. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1576. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1577. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1578. },
  1579. {
  1580. .name = "authenc(hmac(sha224),cbc(des3_ede))",
  1581. .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
  1582. .blocksize = DES3_EDE_BLOCK_SIZE,
  1583. .type = CRYPTO_ALG_TYPE_AEAD,
  1584. .template_aead = {
  1585. .setkey = aead_setkey,
  1586. .setauthsize = aead_setauthsize,
  1587. .encrypt = aead_encrypt,
  1588. .decrypt = aead_decrypt,
  1589. .givencrypt = aead_givencrypt,
  1590. .geniv = "<built-in>",
  1591. .ivsize = DES3_EDE_BLOCK_SIZE,
  1592. .maxauthsize = SHA224_DIGEST_SIZE,
  1593. },
  1594. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1595. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1596. OP_ALG_AAI_HMAC_PRECOMP,
  1597. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1598. },
  1599. {
  1600. .name = "authenc(hmac(sha256),cbc(des3_ede))",
  1601. .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
  1602. .blocksize = DES3_EDE_BLOCK_SIZE,
  1603. .type = CRYPTO_ALG_TYPE_AEAD,
  1604. .template_aead = {
  1605. .setkey = aead_setkey,
  1606. .setauthsize = aead_setauthsize,
  1607. .encrypt = aead_encrypt,
  1608. .decrypt = aead_decrypt,
  1609. .givencrypt = aead_givencrypt,
  1610. .geniv = "<built-in>",
  1611. .ivsize = DES3_EDE_BLOCK_SIZE,
  1612. .maxauthsize = SHA256_DIGEST_SIZE,
  1613. },
  1614. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1615. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1616. OP_ALG_AAI_HMAC_PRECOMP,
  1617. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  1618. },
  1619. {
  1620. .name = "authenc(hmac(sha384),cbc(des3_ede))",
  1621. .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
  1622. .blocksize = DES3_EDE_BLOCK_SIZE,
  1623. .type = CRYPTO_ALG_TYPE_AEAD,
  1624. .template_aead = {
  1625. .setkey = aead_setkey,
  1626. .setauthsize = aead_setauthsize,
  1627. .encrypt = aead_encrypt,
  1628. .decrypt = aead_decrypt,
  1629. .givencrypt = aead_givencrypt,
  1630. .geniv = "<built-in>",
  1631. .ivsize = DES3_EDE_BLOCK_SIZE,
  1632. .maxauthsize = SHA384_DIGEST_SIZE,
  1633. },
  1634. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1635. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1636. OP_ALG_AAI_HMAC_PRECOMP,
  1637. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  1638. },
  1639. {
  1640. .name = "authenc(hmac(sha512),cbc(des3_ede))",
  1641. .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
  1642. .blocksize = DES3_EDE_BLOCK_SIZE,
  1643. .type = CRYPTO_ALG_TYPE_AEAD,
  1644. .template_aead = {
  1645. .setkey = aead_setkey,
  1646. .setauthsize = aead_setauthsize,
  1647. .encrypt = aead_encrypt,
  1648. .decrypt = aead_decrypt,
  1649. .givencrypt = aead_givencrypt,
  1650. .geniv = "<built-in>",
  1651. .ivsize = DES3_EDE_BLOCK_SIZE,
  1652. .maxauthsize = SHA512_DIGEST_SIZE,
  1653. },
  1654. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1655. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1656. OP_ALG_AAI_HMAC_PRECOMP,
  1657. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  1658. },
  1659. {
  1660. .name = "authenc(hmac(md5),cbc(des))",
  1661. .driver_name = "authenc-hmac-md5-cbc-des-caam",
  1662. .blocksize = DES_BLOCK_SIZE,
  1663. .type = CRYPTO_ALG_TYPE_AEAD,
  1664. .template_aead = {
  1665. .setkey = aead_setkey,
  1666. .setauthsize = aead_setauthsize,
  1667. .encrypt = aead_encrypt,
  1668. .decrypt = aead_decrypt,
  1669. .givencrypt = aead_givencrypt,
  1670. .geniv = "<built-in>",
  1671. .ivsize = DES_BLOCK_SIZE,
  1672. .maxauthsize = MD5_DIGEST_SIZE,
  1673. },
  1674. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1675. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1676. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1677. },
  1678. {
  1679. .name = "authenc(hmac(sha1),cbc(des))",
  1680. .driver_name = "authenc-hmac-sha1-cbc-des-caam",
  1681. .blocksize = DES_BLOCK_SIZE,
  1682. .type = CRYPTO_ALG_TYPE_AEAD,
  1683. .template_aead = {
  1684. .setkey = aead_setkey,
  1685. .setauthsize = aead_setauthsize,
  1686. .encrypt = aead_encrypt,
  1687. .decrypt = aead_decrypt,
  1688. .givencrypt = aead_givencrypt,
  1689. .geniv = "<built-in>",
  1690. .ivsize = DES_BLOCK_SIZE,
  1691. .maxauthsize = SHA1_DIGEST_SIZE,
  1692. },
  1693. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1694. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1695. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1696. },
  1697. {
  1698. .name = "authenc(hmac(sha224),cbc(des))",
  1699. .driver_name = "authenc-hmac-sha224-cbc-des-caam",
  1700. .blocksize = DES_BLOCK_SIZE,
  1701. .type = CRYPTO_ALG_TYPE_AEAD,
  1702. .template_aead = {
  1703. .setkey = aead_setkey,
  1704. .setauthsize = aead_setauthsize,
  1705. .encrypt = aead_encrypt,
  1706. .decrypt = aead_decrypt,
  1707. .givencrypt = aead_givencrypt,
  1708. .geniv = "<built-in>",
  1709. .ivsize = DES_BLOCK_SIZE,
  1710. .maxauthsize = SHA224_DIGEST_SIZE,
  1711. },
  1712. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1713. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1714. OP_ALG_AAI_HMAC_PRECOMP,
  1715. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1716. },
  1717. {
  1718. .name = "authenc(hmac(sha256),cbc(des))",
  1719. .driver_name = "authenc-hmac-sha256-cbc-des-caam",
  1720. .blocksize = DES_BLOCK_SIZE,
  1721. .type = CRYPTO_ALG_TYPE_AEAD,
  1722. .template_aead = {
  1723. .setkey = aead_setkey,
  1724. .setauthsize = aead_setauthsize,
  1725. .encrypt = aead_encrypt,
  1726. .decrypt = aead_decrypt,
  1727. .givencrypt = aead_givencrypt,
  1728. .geniv = "<built-in>",
  1729. .ivsize = DES_BLOCK_SIZE,
  1730. .maxauthsize = SHA256_DIGEST_SIZE,
  1731. },
  1732. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1733. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1734. OP_ALG_AAI_HMAC_PRECOMP,
  1735. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  1736. },
  1737. {
  1738. .name = "authenc(hmac(sha384),cbc(des))",
  1739. .driver_name = "authenc-hmac-sha384-cbc-des-caam",
  1740. .blocksize = DES_BLOCK_SIZE,
  1741. .type = CRYPTO_ALG_TYPE_AEAD,
  1742. .template_aead = {
  1743. .setkey = aead_setkey,
  1744. .setauthsize = aead_setauthsize,
  1745. .encrypt = aead_encrypt,
  1746. .decrypt = aead_decrypt,
  1747. .givencrypt = aead_givencrypt,
  1748. .geniv = "<built-in>",
  1749. .ivsize = DES_BLOCK_SIZE,
  1750. .maxauthsize = SHA384_DIGEST_SIZE,
  1751. },
  1752. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1753. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1754. OP_ALG_AAI_HMAC_PRECOMP,
  1755. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  1756. },
  1757. {
  1758. .name = "authenc(hmac(sha512),cbc(des))",
  1759. .driver_name = "authenc-hmac-sha512-cbc-des-caam",
  1760. .blocksize = DES_BLOCK_SIZE,
  1761. .type = CRYPTO_ALG_TYPE_AEAD,
  1762. .template_aead = {
  1763. .setkey = aead_setkey,
  1764. .setauthsize = aead_setauthsize,
  1765. .encrypt = aead_encrypt,
  1766. .decrypt = aead_decrypt,
  1767. .givencrypt = aead_givencrypt,
  1768. .geniv = "<built-in>",
  1769. .ivsize = DES_BLOCK_SIZE,
  1770. .maxauthsize = SHA512_DIGEST_SIZE,
  1771. },
  1772. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1773. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1774. OP_ALG_AAI_HMAC_PRECOMP,
  1775. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  1776. },
  1777. /* ablkcipher descriptor */
  1778. {
  1779. .name = "cbc(aes)",
  1780. .driver_name = "cbc-aes-caam",
  1781. .blocksize = AES_BLOCK_SIZE,
  1782. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1783. .template_ablkcipher = {
  1784. .setkey = ablkcipher_setkey,
  1785. .encrypt = ablkcipher_encrypt,
  1786. .decrypt = ablkcipher_decrypt,
  1787. .geniv = "eseqiv",
  1788. .min_keysize = AES_MIN_KEY_SIZE,
  1789. .max_keysize = AES_MAX_KEY_SIZE,
  1790. .ivsize = AES_BLOCK_SIZE,
  1791. },
  1792. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1793. },
  1794. {
  1795. .name = "cbc(des3_ede)",
  1796. .driver_name = "cbc-3des-caam",
  1797. .blocksize = DES3_EDE_BLOCK_SIZE,
  1798. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1799. .template_ablkcipher = {
  1800. .setkey = ablkcipher_setkey,
  1801. .encrypt = ablkcipher_encrypt,
  1802. .decrypt = ablkcipher_decrypt,
  1803. .geniv = "eseqiv",
  1804. .min_keysize = DES3_EDE_KEY_SIZE,
  1805. .max_keysize = DES3_EDE_KEY_SIZE,
  1806. .ivsize = DES3_EDE_BLOCK_SIZE,
  1807. },
  1808. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1809. },
  1810. {
  1811. .name = "cbc(des)",
  1812. .driver_name = "cbc-des-caam",
  1813. .blocksize = DES_BLOCK_SIZE,
  1814. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1815. .template_ablkcipher = {
  1816. .setkey = ablkcipher_setkey,
  1817. .encrypt = ablkcipher_encrypt,
  1818. .decrypt = ablkcipher_decrypt,
  1819. .geniv = "eseqiv",
  1820. .min_keysize = DES_KEY_SIZE,
  1821. .max_keysize = DES_KEY_SIZE,
  1822. .ivsize = DES_BLOCK_SIZE,
  1823. },
  1824. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1825. }
  1826. };
  1827. struct caam_crypto_alg {
  1828. struct list_head entry;
  1829. int class1_alg_type;
  1830. int class2_alg_type;
  1831. int alg_op;
  1832. struct crypto_alg crypto_alg;
  1833. };
  1834. static int caam_cra_init(struct crypto_tfm *tfm)
  1835. {
  1836. struct crypto_alg *alg = tfm->__crt_alg;
  1837. struct caam_crypto_alg *caam_alg =
  1838. container_of(alg, struct caam_crypto_alg, crypto_alg);
  1839. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  1840. ctx->jrdev = caam_jr_alloc();
  1841. if (IS_ERR(ctx->jrdev)) {
  1842. pr_err("Job Ring Device allocation for transform failed\n");
  1843. return PTR_ERR(ctx->jrdev);
  1844. }
  1845. /* copy descriptor header template value */
  1846. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
  1847. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
  1848. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
  1849. return 0;
  1850. }
  1851. static void caam_cra_exit(struct crypto_tfm *tfm)
  1852. {
  1853. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  1854. if (ctx->sh_desc_enc_dma &&
  1855. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  1856. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  1857. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  1858. if (ctx->sh_desc_dec_dma &&
  1859. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  1860. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  1861. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  1862. if (ctx->sh_desc_givenc_dma &&
  1863. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  1864. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  1865. desc_bytes(ctx->sh_desc_givenc),
  1866. DMA_TO_DEVICE);
  1867. caam_jr_free(ctx->jrdev);
  1868. }
  1869. static void __exit caam_algapi_exit(void)
  1870. {
  1871. struct caam_crypto_alg *t_alg, *n;
  1872. if (!alg_list.next)
  1873. return;
  1874. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  1875. crypto_unregister_alg(&t_alg->crypto_alg);
  1876. list_del(&t_alg->entry);
  1877. kfree(t_alg);
  1878. }
  1879. }
  1880. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  1881. *template)
  1882. {
  1883. struct caam_crypto_alg *t_alg;
  1884. struct crypto_alg *alg;
  1885. t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
  1886. if (!t_alg) {
  1887. pr_err("failed to allocate t_alg\n");
  1888. return ERR_PTR(-ENOMEM);
  1889. }
  1890. alg = &t_alg->crypto_alg;
  1891. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  1892. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  1893. template->driver_name);
  1894. alg->cra_module = THIS_MODULE;
  1895. alg->cra_init = caam_cra_init;
  1896. alg->cra_exit = caam_cra_exit;
  1897. alg->cra_priority = CAAM_CRA_PRIORITY;
  1898. alg->cra_blocksize = template->blocksize;
  1899. alg->cra_alignmask = 0;
  1900. alg->cra_ctxsize = sizeof(struct caam_ctx);
  1901. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  1902. template->type;
  1903. switch (template->type) {
  1904. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1905. alg->cra_type = &crypto_ablkcipher_type;
  1906. alg->cra_ablkcipher = template->template_ablkcipher;
  1907. break;
  1908. case CRYPTO_ALG_TYPE_AEAD:
  1909. alg->cra_type = &crypto_aead_type;
  1910. alg->cra_aead = template->template_aead;
  1911. break;
  1912. }
  1913. t_alg->class1_alg_type = template->class1_alg_type;
  1914. t_alg->class2_alg_type = template->class2_alg_type;
  1915. t_alg->alg_op = template->alg_op;
  1916. return t_alg;
  1917. }
  1918. static int __init caam_algapi_init(void)
  1919. {
  1920. int i = 0, err = 0;
  1921. INIT_LIST_HEAD(&alg_list);
  1922. /* register crypto algorithms the device supports */
  1923. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  1924. /* TODO: check if h/w supports alg */
  1925. struct caam_crypto_alg *t_alg;
  1926. t_alg = caam_alg_alloc(&driver_algs[i]);
  1927. if (IS_ERR(t_alg)) {
  1928. err = PTR_ERR(t_alg);
  1929. pr_warn("%s alg allocation failed\n",
  1930. driver_algs[i].driver_name);
  1931. continue;
  1932. }
  1933. err = crypto_register_alg(&t_alg->crypto_alg);
  1934. if (err) {
  1935. pr_warn("%s alg registration failed\n",
  1936. t_alg->crypto_alg.cra_driver_name);
  1937. kfree(t_alg);
  1938. } else
  1939. list_add_tail(&t_alg->entry, &alg_list);
  1940. }
  1941. if (!list_empty(&alg_list))
  1942. pr_info("caam algorithms registered in /proc/crypto\n");
  1943. return err;
  1944. }
  1945. module_init(caam_algapi_init);
  1946. module_exit(caam_algapi_exit);
  1947. MODULE_LICENSE("GPL");
  1948. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  1949. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");