caamalg.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. SHA512_DIGEST_SIZE * 2)
  61. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  62. #define CAAM_MAX_IV_LENGTH 16
  63. /* length of descriptors text */
  64. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  65. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
  66. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
  67. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
  68. #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
  69. #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
  70. #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
  71. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  72. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  73. 20 * CAAM_CMD_SZ)
  74. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  75. 15 * CAAM_CMD_SZ)
  76. #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
  77. CAAM_MAX_KEY_SIZE)
  78. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  79. #ifdef DEBUG
  80. /* for print_hex_dumps with line references */
  81. #define debug(format, arg...) printk(format, arg)
  82. #else
  83. #define debug(format, arg...)
  84. #endif
  85. static struct list_head alg_list;
  86. /* Set DK bit in class 1 operation if shared */
  87. static inline void append_dec_op1(u32 *desc, u32 type)
  88. {
  89. u32 *jump_cmd, *uncond_jump_cmd;
  90. /* DK bit is valid only for AES */
  91. if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
  92. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  93. OP_ALG_DECRYPT);
  94. return;
  95. }
  96. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  97. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  98. OP_ALG_DECRYPT);
  99. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  100. set_jump_tgt_here(desc, jump_cmd);
  101. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  102. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  103. set_jump_tgt_here(desc, uncond_jump_cmd);
  104. }
  105. /*
  106. * For aead functions, read payload and write payload,
  107. * both of which are specified in req->src and req->dst
  108. */
  109. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  110. {
  111. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  112. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  113. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  114. }
  115. /*
  116. * For aead encrypt and decrypt, read iv for both classes
  117. */
  118. static inline void aead_append_ld_iv(u32 *desc, int ivsize)
  119. {
  120. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  121. LDST_CLASS_1_CCB | ivsize);
  122. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
  123. }
  124. /*
  125. * For ablkcipher encrypt and decrypt, read from req->src and
  126. * write to req->dst
  127. */
  128. static inline void ablkcipher_append_src_dst(u32 *desc)
  129. {
  130. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  131. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  132. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  133. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  134. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  135. }
  136. /*
  137. * If all data, including src (with assoc and iv) or dst (with iv only) are
  138. * contiguous
  139. */
  140. #define GIV_SRC_CONTIG 1
  141. #define GIV_DST_CONTIG (1 << 1)
  142. /*
  143. * per-session context
  144. */
  145. struct caam_ctx {
  146. struct device *jrdev;
  147. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  148. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  149. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  150. dma_addr_t sh_desc_enc_dma;
  151. dma_addr_t sh_desc_dec_dma;
  152. dma_addr_t sh_desc_givenc_dma;
  153. u32 class1_alg_type;
  154. u32 class2_alg_type;
  155. u32 alg_op;
  156. u8 key[CAAM_MAX_KEY_SIZE];
  157. dma_addr_t key_dma;
  158. unsigned int enckeylen;
  159. unsigned int split_key_len;
  160. unsigned int split_key_pad_len;
  161. unsigned int authsize;
  162. };
  163. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  164. int keys_fit_inline)
  165. {
  166. if (keys_fit_inline) {
  167. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  168. ctx->split_key_len, CLASS_2 |
  169. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  170. append_key_as_imm(desc, (void *)ctx->key +
  171. ctx->split_key_pad_len, ctx->enckeylen,
  172. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  173. } else {
  174. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  175. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  176. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  177. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  178. }
  179. }
  180. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  181. int keys_fit_inline)
  182. {
  183. u32 *key_jump_cmd;
  184. init_sh_desc(desc, HDR_SHARE_SERIAL);
  185. /* Skip if already shared */
  186. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  187. JUMP_COND_SHRD);
  188. append_key_aead(desc, ctx, keys_fit_inline);
  189. set_jump_tgt_here(desc, key_jump_cmd);
  190. }
  191. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  192. {
  193. struct aead_tfm *tfm = &aead->base.crt_aead;
  194. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  195. struct device *jrdev = ctx->jrdev;
  196. bool keys_fit_inline = false;
  197. u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
  198. u32 *desc;
  199. /*
  200. * Job Descriptor and Shared Descriptors
  201. * must all fit into the 64-word Descriptor h/w Buffer
  202. */
  203. if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
  204. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  205. keys_fit_inline = true;
  206. /* aead_encrypt shared descriptor */
  207. desc = ctx->sh_desc_enc;
  208. init_sh_desc(desc, HDR_SHARE_SERIAL);
  209. /* Skip if already shared */
  210. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  211. JUMP_COND_SHRD);
  212. if (keys_fit_inline)
  213. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  214. ctx->split_key_len, CLASS_2 |
  215. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  216. else
  217. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  218. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  219. set_jump_tgt_here(desc, key_jump_cmd);
  220. /* cryptlen = seqoutlen - authsize */
  221. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  222. /*
  223. * NULL encryption; IV is zero
  224. * assoclen = (assoclen + cryptlen) - cryptlen
  225. */
  226. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  227. /* read assoc before reading payload */
  228. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  229. KEY_VLF);
  230. /* Prepare to read and write cryptlen bytes */
  231. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  232. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  233. /*
  234. * MOVE_LEN opcode is not available in all SEC HW revisions,
  235. * thus need to do some magic, i.e. self-patch the descriptor
  236. * buffer.
  237. */
  238. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  239. MOVE_DEST_MATH3 |
  240. (0x6 << MOVE_LEN_SHIFT));
  241. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
  242. MOVE_DEST_DESCBUF |
  243. MOVE_WAITCOMP |
  244. (0x8 << MOVE_LEN_SHIFT));
  245. /* Class 2 operation */
  246. append_operation(desc, ctx->class2_alg_type |
  247. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  248. /* Read and write cryptlen bytes */
  249. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  250. set_move_tgt_here(desc, read_move_cmd);
  251. set_move_tgt_here(desc, write_move_cmd);
  252. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  253. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  254. MOVE_AUX_LS);
  255. /* Write ICV */
  256. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  257. LDST_SRCDST_BYTE_CONTEXT);
  258. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  259. desc_bytes(desc),
  260. DMA_TO_DEVICE);
  261. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  262. dev_err(jrdev, "unable to map shared descriptor\n");
  263. return -ENOMEM;
  264. }
  265. #ifdef DEBUG
  266. print_hex_dump(KERN_ERR,
  267. "aead null enc shdesc@"__stringify(__LINE__)": ",
  268. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  269. desc_bytes(desc), 1);
  270. #endif
  271. /*
  272. * Job Descriptor and Shared Descriptors
  273. * must all fit into the 64-word Descriptor h/w Buffer
  274. */
  275. keys_fit_inline = false;
  276. if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
  277. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  278. keys_fit_inline = true;
  279. desc = ctx->sh_desc_dec;
  280. /* aead_decrypt shared descriptor */
  281. init_sh_desc(desc, HDR_SHARE_SERIAL);
  282. /* Skip if already shared */
  283. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  284. JUMP_COND_SHRD);
  285. if (keys_fit_inline)
  286. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  287. ctx->split_key_len, CLASS_2 |
  288. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  289. else
  290. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  291. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  292. set_jump_tgt_here(desc, key_jump_cmd);
  293. /* Class 2 operation */
  294. append_operation(desc, ctx->class2_alg_type |
  295. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  296. /* assoclen + cryptlen = seqinlen - ivsize - authsize */
  297. append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
  298. ctx->authsize + tfm->ivsize);
  299. /* assoclen = (assoclen + cryptlen) - cryptlen */
  300. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  301. append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
  302. /* read assoc before reading payload */
  303. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  304. KEY_VLF);
  305. /* Prepare to read and write cryptlen bytes */
  306. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  307. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  308. /*
  309. * MOVE_LEN opcode is not available in all SEC HW revisions,
  310. * thus need to do some magic, i.e. self-patch the descriptor
  311. * buffer.
  312. */
  313. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  314. MOVE_DEST_MATH2 |
  315. (0x6 << MOVE_LEN_SHIFT));
  316. write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
  317. MOVE_DEST_DESCBUF |
  318. MOVE_WAITCOMP |
  319. (0x8 << MOVE_LEN_SHIFT));
  320. /* Read and write cryptlen bytes */
  321. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  322. /*
  323. * Insert a NOP here, since we need at least 4 instructions between
  324. * code patching the descriptor buffer and the location being patched.
  325. */
  326. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  327. set_jump_tgt_here(desc, jump_cmd);
  328. set_move_tgt_here(desc, read_move_cmd);
  329. set_move_tgt_here(desc, write_move_cmd);
  330. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  331. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  332. MOVE_AUX_LS);
  333. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  334. /* Load ICV */
  335. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  336. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  337. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  338. desc_bytes(desc),
  339. DMA_TO_DEVICE);
  340. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  341. dev_err(jrdev, "unable to map shared descriptor\n");
  342. return -ENOMEM;
  343. }
  344. #ifdef DEBUG
  345. print_hex_dump(KERN_ERR,
  346. "aead null dec shdesc@"__stringify(__LINE__)": ",
  347. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  348. desc_bytes(desc), 1);
  349. #endif
  350. return 0;
  351. }
  352. static int aead_set_sh_desc(struct crypto_aead *aead)
  353. {
  354. struct aead_tfm *tfm = &aead->base.crt_aead;
  355. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  356. struct device *jrdev = ctx->jrdev;
  357. bool keys_fit_inline = false;
  358. u32 geniv, moveiv;
  359. u32 *desc;
  360. if (!ctx->authsize)
  361. return 0;
  362. /* NULL encryption / decryption */
  363. if (!ctx->enckeylen)
  364. return aead_null_set_sh_desc(aead);
  365. /*
  366. * Job Descriptor and Shared Descriptors
  367. * must all fit into the 64-word Descriptor h/w Buffer
  368. */
  369. if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
  370. ctx->split_key_pad_len + ctx->enckeylen <=
  371. CAAM_DESC_BYTES_MAX)
  372. keys_fit_inline = true;
  373. /* aead_encrypt shared descriptor */
  374. desc = ctx->sh_desc_enc;
  375. init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
  376. /* Class 2 operation */
  377. append_operation(desc, ctx->class2_alg_type |
  378. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  379. /* cryptlen = seqoutlen - authsize */
  380. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  381. /* assoclen + cryptlen = seqinlen - ivsize */
  382. append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
  383. /* assoclen = (assoclen + cryptlen) - cryptlen */
  384. append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
  385. /* read assoc before reading payload */
  386. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  387. KEY_VLF);
  388. aead_append_ld_iv(desc, tfm->ivsize);
  389. /* Class 1 operation */
  390. append_operation(desc, ctx->class1_alg_type |
  391. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  392. /* Read and write cryptlen bytes */
  393. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  394. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  395. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  396. /* Write ICV */
  397. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  398. LDST_SRCDST_BYTE_CONTEXT);
  399. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  400. desc_bytes(desc),
  401. DMA_TO_DEVICE);
  402. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  403. dev_err(jrdev, "unable to map shared descriptor\n");
  404. return -ENOMEM;
  405. }
  406. #ifdef DEBUG
  407. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  408. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  409. desc_bytes(desc), 1);
  410. #endif
  411. /*
  412. * Job Descriptor and Shared Descriptors
  413. * must all fit into the 64-word Descriptor h/w Buffer
  414. */
  415. keys_fit_inline = false;
  416. if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
  417. ctx->split_key_pad_len + ctx->enckeylen <=
  418. CAAM_DESC_BYTES_MAX)
  419. keys_fit_inline = true;
  420. /* aead_decrypt shared descriptor */
  421. desc = ctx->sh_desc_dec;
  422. init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
  423. /* Class 2 operation */
  424. append_operation(desc, ctx->class2_alg_type |
  425. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  426. /* assoclen + cryptlen = seqinlen - ivsize - authsize */
  427. append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
  428. ctx->authsize + tfm->ivsize);
  429. /* assoclen = (assoclen + cryptlen) - cryptlen */
  430. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  431. append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
  432. /* read assoc before reading payload */
  433. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  434. KEY_VLF);
  435. aead_append_ld_iv(desc, tfm->ivsize);
  436. append_dec_op1(desc, ctx->class1_alg_type);
  437. /* Read and write cryptlen bytes */
  438. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  439. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  440. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  441. /* Load ICV */
  442. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  443. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  444. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  445. desc_bytes(desc),
  446. DMA_TO_DEVICE);
  447. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  448. dev_err(jrdev, "unable to map shared descriptor\n");
  449. return -ENOMEM;
  450. }
  451. #ifdef DEBUG
  452. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  453. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  454. desc_bytes(desc), 1);
  455. #endif
  456. /*
  457. * Job Descriptor and Shared Descriptors
  458. * must all fit into the 64-word Descriptor h/w Buffer
  459. */
  460. keys_fit_inline = false;
  461. if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
  462. ctx->split_key_pad_len + ctx->enckeylen <=
  463. CAAM_DESC_BYTES_MAX)
  464. keys_fit_inline = true;
  465. /* aead_givencrypt shared descriptor */
  466. desc = ctx->sh_desc_givenc;
  467. init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
  468. /* Generate IV */
  469. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  470. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  471. NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
  472. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  473. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  474. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  475. append_move(desc, MOVE_SRC_INFIFO |
  476. MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
  477. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  478. /* Copy IV to class 1 context */
  479. append_move(desc, MOVE_SRC_CLASS1CTX |
  480. MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
  481. /* Return to encryption */
  482. append_operation(desc, ctx->class2_alg_type |
  483. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  484. /* ivsize + cryptlen = seqoutlen - authsize */
  485. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  486. /* assoclen = seqinlen - (ivsize + cryptlen) */
  487. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  488. /* read assoc before reading payload */
  489. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  490. KEY_VLF);
  491. /* Copy iv from class 1 ctx to class 2 fifo*/
  492. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  493. NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
  494. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  495. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  496. append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
  497. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  498. /* Class 1 operation */
  499. append_operation(desc, ctx->class1_alg_type |
  500. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  501. /* Will write ivsize + cryptlen */
  502. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  503. /* Not need to reload iv */
  504. append_seq_fifo_load(desc, tfm->ivsize,
  505. FIFOLD_CLASS_SKIP);
  506. /* Will read cryptlen */
  507. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  508. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  509. /* Write ICV */
  510. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  511. LDST_SRCDST_BYTE_CONTEXT);
  512. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  513. desc_bytes(desc),
  514. DMA_TO_DEVICE);
  515. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  516. dev_err(jrdev, "unable to map shared descriptor\n");
  517. return -ENOMEM;
  518. }
  519. #ifdef DEBUG
  520. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  521. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  522. desc_bytes(desc), 1);
  523. #endif
  524. return 0;
  525. }
  526. static int aead_setauthsize(struct crypto_aead *authenc,
  527. unsigned int authsize)
  528. {
  529. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  530. ctx->authsize = authsize;
  531. aead_set_sh_desc(authenc);
  532. return 0;
  533. }
  534. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  535. u32 authkeylen)
  536. {
  537. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  538. ctx->split_key_pad_len, key_in, authkeylen,
  539. ctx->alg_op);
  540. }
  541. static int aead_setkey(struct crypto_aead *aead,
  542. const u8 *key, unsigned int keylen)
  543. {
  544. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  545. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  546. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  547. struct device *jrdev = ctx->jrdev;
  548. struct crypto_authenc_keys keys;
  549. int ret = 0;
  550. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  551. goto badkey;
  552. /* Pick class 2 key length from algorithm submask */
  553. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  554. OP_ALG_ALGSEL_SHIFT] * 2;
  555. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  556. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  557. goto badkey;
  558. #ifdef DEBUG
  559. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  560. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  561. keys.authkeylen);
  562. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  563. ctx->split_key_len, ctx->split_key_pad_len);
  564. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  565. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  566. #endif
  567. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  568. if (ret) {
  569. goto badkey;
  570. }
  571. /* postpend encryption key to auth split key */
  572. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  573. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  574. keys.enckeylen, DMA_TO_DEVICE);
  575. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  576. dev_err(jrdev, "unable to map key i/o memory\n");
  577. return -ENOMEM;
  578. }
  579. #ifdef DEBUG
  580. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  581. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  582. ctx->split_key_pad_len + keys.enckeylen, 1);
  583. #endif
  584. ctx->enckeylen = keys.enckeylen;
  585. ret = aead_set_sh_desc(aead);
  586. if (ret) {
  587. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  588. keys.enckeylen, DMA_TO_DEVICE);
  589. }
  590. return ret;
  591. badkey:
  592. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  593. return -EINVAL;
  594. }
  595. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  596. const u8 *key, unsigned int keylen)
  597. {
  598. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  599. struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
  600. struct device *jrdev = ctx->jrdev;
  601. int ret = 0;
  602. u32 *key_jump_cmd;
  603. u32 *desc;
  604. #ifdef DEBUG
  605. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  606. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  607. #endif
  608. memcpy(ctx->key, key, keylen);
  609. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  610. DMA_TO_DEVICE);
  611. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  612. dev_err(jrdev, "unable to map key i/o memory\n");
  613. return -ENOMEM;
  614. }
  615. ctx->enckeylen = keylen;
  616. /* ablkcipher_encrypt shared descriptor */
  617. desc = ctx->sh_desc_enc;
  618. init_sh_desc(desc, HDR_SHARE_SERIAL);
  619. /* Skip if already shared */
  620. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  621. JUMP_COND_SHRD);
  622. /* Load class1 key only */
  623. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  624. ctx->enckeylen, CLASS_1 |
  625. KEY_DEST_CLASS_REG);
  626. set_jump_tgt_here(desc, key_jump_cmd);
  627. /* Load iv */
  628. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  629. LDST_CLASS_1_CCB | tfm->ivsize);
  630. /* Load operation */
  631. append_operation(desc, ctx->class1_alg_type |
  632. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  633. /* Perform operation */
  634. ablkcipher_append_src_dst(desc);
  635. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  636. desc_bytes(desc),
  637. DMA_TO_DEVICE);
  638. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  639. dev_err(jrdev, "unable to map shared descriptor\n");
  640. return -ENOMEM;
  641. }
  642. #ifdef DEBUG
  643. print_hex_dump(KERN_ERR,
  644. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  645. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  646. desc_bytes(desc), 1);
  647. #endif
  648. /* ablkcipher_decrypt shared descriptor */
  649. desc = ctx->sh_desc_dec;
  650. init_sh_desc(desc, HDR_SHARE_SERIAL);
  651. /* Skip if already shared */
  652. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  653. JUMP_COND_SHRD);
  654. /* Load class1 key only */
  655. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  656. ctx->enckeylen, CLASS_1 |
  657. KEY_DEST_CLASS_REG);
  658. set_jump_tgt_here(desc, key_jump_cmd);
  659. /* load IV */
  660. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  661. LDST_CLASS_1_CCB | tfm->ivsize);
  662. /* Choose operation */
  663. append_dec_op1(desc, ctx->class1_alg_type);
  664. /* Perform operation */
  665. ablkcipher_append_src_dst(desc);
  666. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  667. desc_bytes(desc),
  668. DMA_TO_DEVICE);
  669. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  670. dev_err(jrdev, "unable to map shared descriptor\n");
  671. return -ENOMEM;
  672. }
  673. #ifdef DEBUG
  674. print_hex_dump(KERN_ERR,
  675. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  676. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  677. desc_bytes(desc), 1);
  678. #endif
  679. return ret;
  680. }
  681. /*
  682. * aead_edesc - s/w-extended aead descriptor
  683. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  684. * @assoc_chained: if source is chained
  685. * @src_nents: number of segments in input scatterlist
  686. * @src_chained: if source is chained
  687. * @dst_nents: number of segments in output scatterlist
  688. * @dst_chained: if destination is chained
  689. * @iv_dma: dma address of iv for checking continuity and link table
  690. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  691. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  692. * @sec4_sg_dma: bus physical mapped address of h/w link table
  693. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  694. */
  695. struct aead_edesc {
  696. int assoc_nents;
  697. bool assoc_chained;
  698. int src_nents;
  699. bool src_chained;
  700. int dst_nents;
  701. bool dst_chained;
  702. dma_addr_t iv_dma;
  703. int sec4_sg_bytes;
  704. dma_addr_t sec4_sg_dma;
  705. struct sec4_sg_entry *sec4_sg;
  706. u32 hw_desc[0];
  707. };
  708. /*
  709. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  710. * @src_nents: number of segments in input scatterlist
  711. * @src_chained: if source is chained
  712. * @dst_nents: number of segments in output scatterlist
  713. * @dst_chained: if destination is chained
  714. * @iv_dma: dma address of iv for checking continuity and link table
  715. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  716. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  717. * @sec4_sg_dma: bus physical mapped address of h/w link table
  718. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  719. */
  720. struct ablkcipher_edesc {
  721. int src_nents;
  722. bool src_chained;
  723. int dst_nents;
  724. bool dst_chained;
  725. dma_addr_t iv_dma;
  726. int sec4_sg_bytes;
  727. dma_addr_t sec4_sg_dma;
  728. struct sec4_sg_entry *sec4_sg;
  729. u32 hw_desc[0];
  730. };
  731. static void caam_unmap(struct device *dev, struct scatterlist *src,
  732. struct scatterlist *dst, int src_nents,
  733. bool src_chained, int dst_nents, bool dst_chained,
  734. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  735. int sec4_sg_bytes)
  736. {
  737. if (dst != src) {
  738. dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
  739. src_chained);
  740. dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
  741. dst_chained);
  742. } else {
  743. dma_unmap_sg_chained(dev, src, src_nents ? : 1,
  744. DMA_BIDIRECTIONAL, src_chained);
  745. }
  746. if (iv_dma)
  747. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  748. if (sec4_sg_bytes)
  749. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  750. DMA_TO_DEVICE);
  751. }
  752. static void aead_unmap(struct device *dev,
  753. struct aead_edesc *edesc,
  754. struct aead_request *req)
  755. {
  756. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  757. int ivsize = crypto_aead_ivsize(aead);
  758. dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
  759. DMA_TO_DEVICE, edesc->assoc_chained);
  760. caam_unmap(dev, req->src, req->dst,
  761. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  762. edesc->dst_chained, edesc->iv_dma, ivsize,
  763. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  764. }
  765. static void ablkcipher_unmap(struct device *dev,
  766. struct ablkcipher_edesc *edesc,
  767. struct ablkcipher_request *req)
  768. {
  769. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  770. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  771. caam_unmap(dev, req->src, req->dst,
  772. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  773. edesc->dst_chained, edesc->iv_dma, ivsize,
  774. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  775. }
  776. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  777. void *context)
  778. {
  779. struct aead_request *req = context;
  780. struct aead_edesc *edesc;
  781. #ifdef DEBUG
  782. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  783. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  784. int ivsize = crypto_aead_ivsize(aead);
  785. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  786. #endif
  787. edesc = (struct aead_edesc *)((char *)desc -
  788. offsetof(struct aead_edesc, hw_desc));
  789. if (err)
  790. caam_jr_strstatus(jrdev, err);
  791. aead_unmap(jrdev, edesc, req);
  792. #ifdef DEBUG
  793. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  794. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  795. req->assoclen , 1);
  796. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  797. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
  798. edesc->src_nents ? 100 : ivsize, 1);
  799. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  800. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  801. edesc->src_nents ? 100 : req->cryptlen +
  802. ctx->authsize + 4, 1);
  803. #endif
  804. kfree(edesc);
  805. aead_request_complete(req, err);
  806. }
  807. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  808. void *context)
  809. {
  810. struct aead_request *req = context;
  811. struct aead_edesc *edesc;
  812. #ifdef DEBUG
  813. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  814. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  815. int ivsize = crypto_aead_ivsize(aead);
  816. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  817. #endif
  818. edesc = (struct aead_edesc *)((char *)desc -
  819. offsetof(struct aead_edesc, hw_desc));
  820. #ifdef DEBUG
  821. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  822. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  823. ivsize, 1);
  824. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  825. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
  826. req->cryptlen - ctx->authsize, 1);
  827. #endif
  828. if (err)
  829. caam_jr_strstatus(jrdev, err);
  830. aead_unmap(jrdev, edesc, req);
  831. /*
  832. * verify hw auth check passed else return -EBADMSG
  833. */
  834. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  835. err = -EBADMSG;
  836. #ifdef DEBUG
  837. print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
  838. DUMP_PREFIX_ADDRESS, 16, 4,
  839. ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
  840. sizeof(struct iphdr) + req->assoclen +
  841. ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
  842. ctx->authsize + 36, 1);
  843. if (!err && edesc->sec4_sg_bytes) {
  844. struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
  845. print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
  846. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
  847. sg->length + ctx->authsize + 16, 1);
  848. }
  849. #endif
  850. kfree(edesc);
  851. aead_request_complete(req, err);
  852. }
  853. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  854. void *context)
  855. {
  856. struct ablkcipher_request *req = context;
  857. struct ablkcipher_edesc *edesc;
  858. #ifdef DEBUG
  859. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  860. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  861. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  862. #endif
  863. edesc = (struct ablkcipher_edesc *)((char *)desc -
  864. offsetof(struct ablkcipher_edesc, hw_desc));
  865. if (err)
  866. caam_jr_strstatus(jrdev, err);
  867. #ifdef DEBUG
  868. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  869. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  870. edesc->src_nents > 1 ? 100 : ivsize, 1);
  871. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  872. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  873. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  874. #endif
  875. ablkcipher_unmap(jrdev, edesc, req);
  876. kfree(edesc);
  877. ablkcipher_request_complete(req, err);
  878. }
  879. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  880. void *context)
  881. {
  882. struct ablkcipher_request *req = context;
  883. struct ablkcipher_edesc *edesc;
  884. #ifdef DEBUG
  885. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  886. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  887. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  888. #endif
  889. edesc = (struct ablkcipher_edesc *)((char *)desc -
  890. offsetof(struct ablkcipher_edesc, hw_desc));
  891. if (err)
  892. caam_jr_strstatus(jrdev, err);
  893. #ifdef DEBUG
  894. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  895. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  896. ivsize, 1);
  897. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  898. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  899. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  900. #endif
  901. ablkcipher_unmap(jrdev, edesc, req);
  902. kfree(edesc);
  903. ablkcipher_request_complete(req, err);
  904. }
  905. /*
  906. * Fill in aead job descriptor
  907. */
  908. static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
  909. struct aead_edesc *edesc,
  910. struct aead_request *req,
  911. bool all_contig, bool encrypt)
  912. {
  913. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  914. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  915. int ivsize = crypto_aead_ivsize(aead);
  916. int authsize = ctx->authsize;
  917. u32 *desc = edesc->hw_desc;
  918. u32 out_options = 0, in_options;
  919. dma_addr_t dst_dma, src_dma;
  920. int len, sec4_sg_index = 0;
  921. #ifdef DEBUG
  922. debug("assoclen %d cryptlen %d authsize %d\n",
  923. req->assoclen, req->cryptlen, authsize);
  924. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  925. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  926. req->assoclen , 1);
  927. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  928. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  929. edesc->src_nents ? 100 : ivsize, 1);
  930. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  931. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  932. edesc->src_nents ? 100 : req->cryptlen, 1);
  933. print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
  934. DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
  935. desc_bytes(sh_desc), 1);
  936. #endif
  937. len = desc_len(sh_desc);
  938. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  939. if (all_contig) {
  940. src_dma = sg_dma_address(req->assoc);
  941. in_options = 0;
  942. } else {
  943. src_dma = edesc->sec4_sg_dma;
  944. sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
  945. (edesc->src_nents ? : 1);
  946. in_options = LDST_SGF;
  947. }
  948. append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
  949. in_options);
  950. if (likely(req->src == req->dst)) {
  951. if (all_contig) {
  952. dst_dma = sg_dma_address(req->src);
  953. } else {
  954. dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
  955. ((edesc->assoc_nents ? : 1) + 1);
  956. out_options = LDST_SGF;
  957. }
  958. } else {
  959. if (!edesc->dst_nents) {
  960. dst_dma = sg_dma_address(req->dst);
  961. } else {
  962. dst_dma = edesc->sec4_sg_dma +
  963. sec4_sg_index *
  964. sizeof(struct sec4_sg_entry);
  965. out_options = LDST_SGF;
  966. }
  967. }
  968. if (encrypt)
  969. append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
  970. out_options);
  971. else
  972. append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
  973. out_options);
  974. }
  975. /*
  976. * Fill in aead givencrypt job descriptor
  977. */
  978. static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
  979. struct aead_edesc *edesc,
  980. struct aead_request *req,
  981. int contig)
  982. {
  983. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  984. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  985. int ivsize = crypto_aead_ivsize(aead);
  986. int authsize = ctx->authsize;
  987. u32 *desc = edesc->hw_desc;
  988. u32 out_options = 0, in_options;
  989. dma_addr_t dst_dma, src_dma;
  990. int len, sec4_sg_index = 0;
  991. #ifdef DEBUG
  992. debug("assoclen %d cryptlen %d authsize %d\n",
  993. req->assoclen, req->cryptlen, authsize);
  994. print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
  995. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
  996. req->assoclen , 1);
  997. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  998. DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
  999. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1000. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1001. edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
  1002. print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
  1003. DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
  1004. desc_bytes(sh_desc), 1);
  1005. #endif
  1006. len = desc_len(sh_desc);
  1007. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1008. if (contig & GIV_SRC_CONTIG) {
  1009. src_dma = sg_dma_address(req->assoc);
  1010. in_options = 0;
  1011. } else {
  1012. src_dma = edesc->sec4_sg_dma;
  1013. sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
  1014. in_options = LDST_SGF;
  1015. }
  1016. append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
  1017. in_options);
  1018. if (contig & GIV_DST_CONTIG) {
  1019. dst_dma = edesc->iv_dma;
  1020. } else {
  1021. if (likely(req->src == req->dst)) {
  1022. dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
  1023. edesc->assoc_nents;
  1024. out_options = LDST_SGF;
  1025. } else {
  1026. dst_dma = edesc->sec4_sg_dma +
  1027. sec4_sg_index *
  1028. sizeof(struct sec4_sg_entry);
  1029. out_options = LDST_SGF;
  1030. }
  1031. }
  1032. append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
  1033. out_options);
  1034. }
  1035. /*
  1036. * Fill in ablkcipher job descriptor
  1037. */
  1038. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  1039. struct ablkcipher_edesc *edesc,
  1040. struct ablkcipher_request *req,
  1041. bool iv_contig)
  1042. {
  1043. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1044. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1045. u32 *desc = edesc->hw_desc;
  1046. u32 out_options = 0, in_options;
  1047. dma_addr_t dst_dma, src_dma;
  1048. int len, sec4_sg_index = 0;
  1049. #ifdef DEBUG
  1050. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1051. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1052. ivsize, 1);
  1053. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1054. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1055. edesc->src_nents ? 100 : req->nbytes, 1);
  1056. #endif
  1057. len = desc_len(sh_desc);
  1058. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1059. if (iv_contig) {
  1060. src_dma = edesc->iv_dma;
  1061. in_options = 0;
  1062. } else {
  1063. src_dma = edesc->sec4_sg_dma;
  1064. sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
  1065. in_options = LDST_SGF;
  1066. }
  1067. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  1068. if (likely(req->src == req->dst)) {
  1069. if (!edesc->src_nents && iv_contig) {
  1070. dst_dma = sg_dma_address(req->src);
  1071. } else {
  1072. dst_dma = edesc->sec4_sg_dma +
  1073. sizeof(struct sec4_sg_entry);
  1074. out_options = LDST_SGF;
  1075. }
  1076. } else {
  1077. if (!edesc->dst_nents) {
  1078. dst_dma = sg_dma_address(req->dst);
  1079. } else {
  1080. dst_dma = edesc->sec4_sg_dma +
  1081. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1082. out_options = LDST_SGF;
  1083. }
  1084. }
  1085. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  1086. }
  1087. /*
  1088. * allocate and map the aead extended descriptor
  1089. */
  1090. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1091. int desc_bytes, bool *all_contig_ptr,
  1092. bool encrypt)
  1093. {
  1094. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1095. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1096. struct device *jrdev = ctx->jrdev;
  1097. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1098. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1099. int assoc_nents, src_nents, dst_nents = 0;
  1100. struct aead_edesc *edesc;
  1101. dma_addr_t iv_dma = 0;
  1102. int sgc;
  1103. bool all_contig = true;
  1104. bool assoc_chained = false, src_chained = false, dst_chained = false;
  1105. int ivsize = crypto_aead_ivsize(aead);
  1106. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1107. unsigned int authsize = ctx->authsize;
  1108. assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
  1109. if (unlikely(req->dst != req->src)) {
  1110. src_nents = sg_count(req->src, req->cryptlen, &src_chained);
  1111. dst_nents = sg_count(req->dst,
  1112. req->cryptlen +
  1113. (encrypt ? authsize : (-authsize)),
  1114. &dst_chained);
  1115. } else {
  1116. src_nents = sg_count(req->src,
  1117. req->cryptlen +
  1118. (encrypt ? authsize : 0),
  1119. &src_chained);
  1120. }
  1121. sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
  1122. DMA_TO_DEVICE, assoc_chained);
  1123. if (likely(req->src == req->dst)) {
  1124. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1125. DMA_BIDIRECTIONAL, src_chained);
  1126. } else {
  1127. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1128. DMA_TO_DEVICE, src_chained);
  1129. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  1130. DMA_FROM_DEVICE, dst_chained);
  1131. }
  1132. iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
  1133. if (dma_mapping_error(jrdev, iv_dma)) {
  1134. dev_err(jrdev, "unable to map IV\n");
  1135. return ERR_PTR(-ENOMEM);
  1136. }
  1137. /* Check if data are contiguous */
  1138. if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
  1139. iv_dma || src_nents || iv_dma + ivsize !=
  1140. sg_dma_address(req->src)) {
  1141. all_contig = false;
  1142. assoc_nents = assoc_nents ? : 1;
  1143. src_nents = src_nents ? : 1;
  1144. sec4_sg_len = assoc_nents + 1 + src_nents;
  1145. }
  1146. sec4_sg_len += dst_nents;
  1147. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1148. /* allocate space for base edesc and hw desc commands, link tables */
  1149. edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
  1150. sec4_sg_bytes, GFP_DMA | flags);
  1151. if (!edesc) {
  1152. dev_err(jrdev, "could not allocate extended descriptor\n");
  1153. return ERR_PTR(-ENOMEM);
  1154. }
  1155. edesc->assoc_nents = assoc_nents;
  1156. edesc->assoc_chained = assoc_chained;
  1157. edesc->src_nents = src_nents;
  1158. edesc->src_chained = src_chained;
  1159. edesc->dst_nents = dst_nents;
  1160. edesc->dst_chained = dst_chained;
  1161. edesc->iv_dma = iv_dma;
  1162. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1163. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1164. desc_bytes;
  1165. *all_contig_ptr = all_contig;
  1166. sec4_sg_index = 0;
  1167. if (!all_contig) {
  1168. sg_to_sec4_sg(req->assoc,
  1169. (assoc_nents ? : 1),
  1170. edesc->sec4_sg +
  1171. sec4_sg_index, 0);
  1172. sec4_sg_index += assoc_nents ? : 1;
  1173. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  1174. iv_dma, ivsize, 0);
  1175. sec4_sg_index += 1;
  1176. sg_to_sec4_sg_last(req->src,
  1177. (src_nents ? : 1),
  1178. edesc->sec4_sg +
  1179. sec4_sg_index, 0);
  1180. sec4_sg_index += src_nents ? : 1;
  1181. }
  1182. if (dst_nents) {
  1183. sg_to_sec4_sg_last(req->dst, dst_nents,
  1184. edesc->sec4_sg + sec4_sg_index, 0);
  1185. }
  1186. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1187. sec4_sg_bytes, DMA_TO_DEVICE);
  1188. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1189. dev_err(jrdev, "unable to map S/G table\n");
  1190. return ERR_PTR(-ENOMEM);
  1191. }
  1192. return edesc;
  1193. }
  1194. static int aead_encrypt(struct aead_request *req)
  1195. {
  1196. struct aead_edesc *edesc;
  1197. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1198. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1199. struct device *jrdev = ctx->jrdev;
  1200. bool all_contig;
  1201. u32 *desc;
  1202. int ret = 0;
  1203. /* allocate extended descriptor */
  1204. edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
  1205. CAAM_CMD_SZ, &all_contig, true);
  1206. if (IS_ERR(edesc))
  1207. return PTR_ERR(edesc);
  1208. /* Create and submit job descriptor */
  1209. init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
  1210. all_contig, true);
  1211. #ifdef DEBUG
  1212. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1213. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1214. desc_bytes(edesc->hw_desc), 1);
  1215. #endif
  1216. desc = edesc->hw_desc;
  1217. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1218. if (!ret) {
  1219. ret = -EINPROGRESS;
  1220. } else {
  1221. aead_unmap(jrdev, edesc, req);
  1222. kfree(edesc);
  1223. }
  1224. return ret;
  1225. }
  1226. static int aead_decrypt(struct aead_request *req)
  1227. {
  1228. struct aead_edesc *edesc;
  1229. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1230. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1231. struct device *jrdev = ctx->jrdev;
  1232. bool all_contig;
  1233. u32 *desc;
  1234. int ret = 0;
  1235. /* allocate extended descriptor */
  1236. edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
  1237. CAAM_CMD_SZ, &all_contig, false);
  1238. if (IS_ERR(edesc))
  1239. return PTR_ERR(edesc);
  1240. #ifdef DEBUG
  1241. print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  1242. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1243. req->cryptlen, 1);
  1244. #endif
  1245. /* Create and submit job descriptor*/
  1246. init_aead_job(ctx->sh_desc_dec,
  1247. ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
  1248. #ifdef DEBUG
  1249. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1250. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1251. desc_bytes(edesc->hw_desc), 1);
  1252. #endif
  1253. desc = edesc->hw_desc;
  1254. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1255. if (!ret) {
  1256. ret = -EINPROGRESS;
  1257. } else {
  1258. aead_unmap(jrdev, edesc, req);
  1259. kfree(edesc);
  1260. }
  1261. return ret;
  1262. }
  1263. /*
  1264. * allocate and map the aead extended descriptor for aead givencrypt
  1265. */
  1266. static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
  1267. *greq, int desc_bytes,
  1268. u32 *contig_ptr)
  1269. {
  1270. struct aead_request *req = &greq->areq;
  1271. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1272. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1273. struct device *jrdev = ctx->jrdev;
  1274. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1275. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1276. int assoc_nents, src_nents, dst_nents = 0;
  1277. struct aead_edesc *edesc;
  1278. dma_addr_t iv_dma = 0;
  1279. int sgc;
  1280. u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
  1281. int ivsize = crypto_aead_ivsize(aead);
  1282. bool assoc_chained = false, src_chained = false, dst_chained = false;
  1283. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1284. assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
  1285. src_nents = sg_count(req->src, req->cryptlen, &src_chained);
  1286. if (unlikely(req->dst != req->src))
  1287. dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
  1288. &dst_chained);
  1289. sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
  1290. DMA_TO_DEVICE, assoc_chained);
  1291. if (likely(req->src == req->dst)) {
  1292. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1293. DMA_BIDIRECTIONAL, src_chained);
  1294. } else {
  1295. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1296. DMA_TO_DEVICE, src_chained);
  1297. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  1298. DMA_FROM_DEVICE, dst_chained);
  1299. }
  1300. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  1301. if (dma_mapping_error(jrdev, iv_dma)) {
  1302. dev_err(jrdev, "unable to map IV\n");
  1303. return ERR_PTR(-ENOMEM);
  1304. }
  1305. /* Check if data are contiguous */
  1306. if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
  1307. iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
  1308. contig &= ~GIV_SRC_CONTIG;
  1309. if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
  1310. contig &= ~GIV_DST_CONTIG;
  1311. if (unlikely(req->src != req->dst)) {
  1312. dst_nents = dst_nents ? : 1;
  1313. sec4_sg_len += 1;
  1314. }
  1315. if (!(contig & GIV_SRC_CONTIG)) {
  1316. assoc_nents = assoc_nents ? : 1;
  1317. src_nents = src_nents ? : 1;
  1318. sec4_sg_len += assoc_nents + 1 + src_nents;
  1319. if (likely(req->src == req->dst))
  1320. contig &= ~GIV_DST_CONTIG;
  1321. }
  1322. sec4_sg_len += dst_nents;
  1323. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1324. /* allocate space for base edesc and hw desc commands, link tables */
  1325. edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
  1326. sec4_sg_bytes, GFP_DMA | flags);
  1327. if (!edesc) {
  1328. dev_err(jrdev, "could not allocate extended descriptor\n");
  1329. return ERR_PTR(-ENOMEM);
  1330. }
  1331. edesc->assoc_nents = assoc_nents;
  1332. edesc->assoc_chained = assoc_chained;
  1333. edesc->src_nents = src_nents;
  1334. edesc->src_chained = src_chained;
  1335. edesc->dst_nents = dst_nents;
  1336. edesc->dst_chained = dst_chained;
  1337. edesc->iv_dma = iv_dma;
  1338. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1339. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1340. desc_bytes;
  1341. *contig_ptr = contig;
  1342. sec4_sg_index = 0;
  1343. if (!(contig & GIV_SRC_CONTIG)) {
  1344. sg_to_sec4_sg(req->assoc, assoc_nents,
  1345. edesc->sec4_sg +
  1346. sec4_sg_index, 0);
  1347. sec4_sg_index += assoc_nents;
  1348. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  1349. iv_dma, ivsize, 0);
  1350. sec4_sg_index += 1;
  1351. sg_to_sec4_sg_last(req->src, src_nents,
  1352. edesc->sec4_sg +
  1353. sec4_sg_index, 0);
  1354. sec4_sg_index += src_nents;
  1355. }
  1356. if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
  1357. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  1358. iv_dma, ivsize, 0);
  1359. sec4_sg_index += 1;
  1360. sg_to_sec4_sg_last(req->dst, dst_nents,
  1361. edesc->sec4_sg + sec4_sg_index, 0);
  1362. }
  1363. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1364. sec4_sg_bytes, DMA_TO_DEVICE);
  1365. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1366. dev_err(jrdev, "unable to map S/G table\n");
  1367. return ERR_PTR(-ENOMEM);
  1368. }
  1369. return edesc;
  1370. }
  1371. static int aead_givencrypt(struct aead_givcrypt_request *areq)
  1372. {
  1373. struct aead_request *req = &areq->areq;
  1374. struct aead_edesc *edesc;
  1375. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1376. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1377. struct device *jrdev = ctx->jrdev;
  1378. u32 contig;
  1379. u32 *desc;
  1380. int ret = 0;
  1381. /* allocate extended descriptor */
  1382. edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
  1383. CAAM_CMD_SZ, &contig);
  1384. if (IS_ERR(edesc))
  1385. return PTR_ERR(edesc);
  1386. #ifdef DEBUG
  1387. print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
  1388. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1389. req->cryptlen, 1);
  1390. #endif
  1391. /* Create and submit job descriptor*/
  1392. init_aead_giv_job(ctx->sh_desc_givenc,
  1393. ctx->sh_desc_givenc_dma, edesc, req, contig);
  1394. #ifdef DEBUG
  1395. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1396. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1397. desc_bytes(edesc->hw_desc), 1);
  1398. #endif
  1399. desc = edesc->hw_desc;
  1400. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1401. if (!ret) {
  1402. ret = -EINPROGRESS;
  1403. } else {
  1404. aead_unmap(jrdev, edesc, req);
  1405. kfree(edesc);
  1406. }
  1407. return ret;
  1408. }
  1409. static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
  1410. {
  1411. return aead_encrypt(&areq->areq);
  1412. }
  1413. /*
  1414. * allocate and map the ablkcipher extended descriptor for ablkcipher
  1415. */
  1416. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  1417. *req, int desc_bytes,
  1418. bool *iv_contig_out)
  1419. {
  1420. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1421. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1422. struct device *jrdev = ctx->jrdev;
  1423. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1424. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  1425. GFP_KERNEL : GFP_ATOMIC;
  1426. int src_nents, dst_nents = 0, sec4_sg_bytes;
  1427. struct ablkcipher_edesc *edesc;
  1428. dma_addr_t iv_dma = 0;
  1429. bool iv_contig = false;
  1430. int sgc;
  1431. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1432. bool src_chained = false, dst_chained = false;
  1433. int sec4_sg_index;
  1434. src_nents = sg_count(req->src, req->nbytes, &src_chained);
  1435. if (req->dst != req->src)
  1436. dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
  1437. if (likely(req->src == req->dst)) {
  1438. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1439. DMA_BIDIRECTIONAL, src_chained);
  1440. } else {
  1441. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1442. DMA_TO_DEVICE, src_chained);
  1443. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  1444. DMA_FROM_DEVICE, dst_chained);
  1445. }
  1446. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  1447. if (dma_mapping_error(jrdev, iv_dma)) {
  1448. dev_err(jrdev, "unable to map IV\n");
  1449. return ERR_PTR(-ENOMEM);
  1450. }
  1451. /*
  1452. * Check if iv can be contiguous with source and destination.
  1453. * If so, include it. If not, create scatterlist.
  1454. */
  1455. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  1456. iv_contig = true;
  1457. else
  1458. src_nents = src_nents ? : 1;
  1459. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  1460. sizeof(struct sec4_sg_entry);
  1461. /* allocate space for base edesc and hw desc commands, link tables */
  1462. edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
  1463. sec4_sg_bytes, GFP_DMA | flags);
  1464. if (!edesc) {
  1465. dev_err(jrdev, "could not allocate extended descriptor\n");
  1466. return ERR_PTR(-ENOMEM);
  1467. }
  1468. edesc->src_nents = src_nents;
  1469. edesc->src_chained = src_chained;
  1470. edesc->dst_nents = dst_nents;
  1471. edesc->dst_chained = dst_chained;
  1472. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1473. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  1474. desc_bytes;
  1475. sec4_sg_index = 0;
  1476. if (!iv_contig) {
  1477. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  1478. sg_to_sec4_sg_last(req->src, src_nents,
  1479. edesc->sec4_sg + 1, 0);
  1480. sec4_sg_index += 1 + src_nents;
  1481. }
  1482. if (dst_nents) {
  1483. sg_to_sec4_sg_last(req->dst, dst_nents,
  1484. edesc->sec4_sg + sec4_sg_index, 0);
  1485. }
  1486. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1487. sec4_sg_bytes, DMA_TO_DEVICE);
  1488. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1489. dev_err(jrdev, "unable to map S/G table\n");
  1490. return ERR_PTR(-ENOMEM);
  1491. }
  1492. edesc->iv_dma = iv_dma;
  1493. #ifdef DEBUG
  1494. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  1495. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  1496. sec4_sg_bytes, 1);
  1497. #endif
  1498. *iv_contig_out = iv_contig;
  1499. return edesc;
  1500. }
  1501. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  1502. {
  1503. struct ablkcipher_edesc *edesc;
  1504. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1505. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1506. struct device *jrdev = ctx->jrdev;
  1507. bool iv_contig;
  1508. u32 *desc;
  1509. int ret = 0;
  1510. /* allocate extended descriptor */
  1511. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  1512. CAAM_CMD_SZ, &iv_contig);
  1513. if (IS_ERR(edesc))
  1514. return PTR_ERR(edesc);
  1515. /* Create and submit job descriptor*/
  1516. init_ablkcipher_job(ctx->sh_desc_enc,
  1517. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  1518. #ifdef DEBUG
  1519. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1520. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1521. desc_bytes(edesc->hw_desc), 1);
  1522. #endif
  1523. desc = edesc->hw_desc;
  1524. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  1525. if (!ret) {
  1526. ret = -EINPROGRESS;
  1527. } else {
  1528. ablkcipher_unmap(jrdev, edesc, req);
  1529. kfree(edesc);
  1530. }
  1531. return ret;
  1532. }
  1533. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  1534. {
  1535. struct ablkcipher_edesc *edesc;
  1536. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1537. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1538. struct device *jrdev = ctx->jrdev;
  1539. bool iv_contig;
  1540. u32 *desc;
  1541. int ret = 0;
  1542. /* allocate extended descriptor */
  1543. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  1544. CAAM_CMD_SZ, &iv_contig);
  1545. if (IS_ERR(edesc))
  1546. return PTR_ERR(edesc);
  1547. /* Create and submit job descriptor*/
  1548. init_ablkcipher_job(ctx->sh_desc_dec,
  1549. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  1550. desc = edesc->hw_desc;
  1551. #ifdef DEBUG
  1552. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1553. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1554. desc_bytes(edesc->hw_desc), 1);
  1555. #endif
  1556. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  1557. if (!ret) {
  1558. ret = -EINPROGRESS;
  1559. } else {
  1560. ablkcipher_unmap(jrdev, edesc, req);
  1561. kfree(edesc);
  1562. }
  1563. return ret;
  1564. }
  1565. #define template_aead template_u.aead
  1566. #define template_ablkcipher template_u.ablkcipher
  1567. struct caam_alg_template {
  1568. char name[CRYPTO_MAX_ALG_NAME];
  1569. char driver_name[CRYPTO_MAX_ALG_NAME];
  1570. unsigned int blocksize;
  1571. u32 type;
  1572. union {
  1573. struct ablkcipher_alg ablkcipher;
  1574. struct aead_alg aead;
  1575. struct blkcipher_alg blkcipher;
  1576. struct cipher_alg cipher;
  1577. struct compress_alg compress;
  1578. struct rng_alg rng;
  1579. } template_u;
  1580. u32 class1_alg_type;
  1581. u32 class2_alg_type;
  1582. u32 alg_op;
  1583. };
  1584. static struct caam_alg_template driver_algs[] = {
  1585. /* single-pass ipsec_esp descriptor */
  1586. {
  1587. .name = "authenc(hmac(md5),ecb(cipher_null))",
  1588. .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
  1589. .blocksize = NULL_BLOCK_SIZE,
  1590. .type = CRYPTO_ALG_TYPE_AEAD,
  1591. .template_aead = {
  1592. .setkey = aead_setkey,
  1593. .setauthsize = aead_setauthsize,
  1594. .encrypt = aead_encrypt,
  1595. .decrypt = aead_decrypt,
  1596. .givencrypt = aead_null_givencrypt,
  1597. .geniv = "<built-in>",
  1598. .ivsize = NULL_IV_SIZE,
  1599. .maxauthsize = MD5_DIGEST_SIZE,
  1600. },
  1601. .class1_alg_type = 0,
  1602. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1603. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1604. },
  1605. {
  1606. .name = "authenc(hmac(sha1),ecb(cipher_null))",
  1607. .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
  1608. .blocksize = NULL_BLOCK_SIZE,
  1609. .type = CRYPTO_ALG_TYPE_AEAD,
  1610. .template_aead = {
  1611. .setkey = aead_setkey,
  1612. .setauthsize = aead_setauthsize,
  1613. .encrypt = aead_encrypt,
  1614. .decrypt = aead_decrypt,
  1615. .givencrypt = aead_null_givencrypt,
  1616. .geniv = "<built-in>",
  1617. .ivsize = NULL_IV_SIZE,
  1618. .maxauthsize = SHA1_DIGEST_SIZE,
  1619. },
  1620. .class1_alg_type = 0,
  1621. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1622. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1623. },
  1624. {
  1625. .name = "authenc(hmac(sha224),ecb(cipher_null))",
  1626. .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
  1627. .blocksize = NULL_BLOCK_SIZE,
  1628. .type = CRYPTO_ALG_TYPE_AEAD,
  1629. .template_aead = {
  1630. .setkey = aead_setkey,
  1631. .setauthsize = aead_setauthsize,
  1632. .encrypt = aead_encrypt,
  1633. .decrypt = aead_decrypt,
  1634. .givencrypt = aead_null_givencrypt,
  1635. .geniv = "<built-in>",
  1636. .ivsize = NULL_IV_SIZE,
  1637. .maxauthsize = SHA224_DIGEST_SIZE,
  1638. },
  1639. .class1_alg_type = 0,
  1640. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1641. OP_ALG_AAI_HMAC_PRECOMP,
  1642. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1643. },
  1644. {
  1645. .name = "authenc(hmac(sha256),ecb(cipher_null))",
  1646. .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
  1647. .blocksize = NULL_BLOCK_SIZE,
  1648. .type = CRYPTO_ALG_TYPE_AEAD,
  1649. .template_aead = {
  1650. .setkey = aead_setkey,
  1651. .setauthsize = aead_setauthsize,
  1652. .encrypt = aead_encrypt,
  1653. .decrypt = aead_decrypt,
  1654. .givencrypt = aead_null_givencrypt,
  1655. .geniv = "<built-in>",
  1656. .ivsize = NULL_IV_SIZE,
  1657. .maxauthsize = SHA256_DIGEST_SIZE,
  1658. },
  1659. .class1_alg_type = 0,
  1660. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1661. OP_ALG_AAI_HMAC_PRECOMP,
  1662. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  1663. },
  1664. {
  1665. .name = "authenc(hmac(sha384),ecb(cipher_null))",
  1666. .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
  1667. .blocksize = NULL_BLOCK_SIZE,
  1668. .type = CRYPTO_ALG_TYPE_AEAD,
  1669. .template_aead = {
  1670. .setkey = aead_setkey,
  1671. .setauthsize = aead_setauthsize,
  1672. .encrypt = aead_encrypt,
  1673. .decrypt = aead_decrypt,
  1674. .givencrypt = aead_null_givencrypt,
  1675. .geniv = "<built-in>",
  1676. .ivsize = NULL_IV_SIZE,
  1677. .maxauthsize = SHA384_DIGEST_SIZE,
  1678. },
  1679. .class1_alg_type = 0,
  1680. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1681. OP_ALG_AAI_HMAC_PRECOMP,
  1682. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  1683. },
  1684. {
  1685. .name = "authenc(hmac(sha512),ecb(cipher_null))",
  1686. .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
  1687. .blocksize = NULL_BLOCK_SIZE,
  1688. .type = CRYPTO_ALG_TYPE_AEAD,
  1689. .template_aead = {
  1690. .setkey = aead_setkey,
  1691. .setauthsize = aead_setauthsize,
  1692. .encrypt = aead_encrypt,
  1693. .decrypt = aead_decrypt,
  1694. .givencrypt = aead_null_givencrypt,
  1695. .geniv = "<built-in>",
  1696. .ivsize = NULL_IV_SIZE,
  1697. .maxauthsize = SHA512_DIGEST_SIZE,
  1698. },
  1699. .class1_alg_type = 0,
  1700. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1701. OP_ALG_AAI_HMAC_PRECOMP,
  1702. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  1703. },
  1704. {
  1705. .name = "authenc(hmac(md5),cbc(aes))",
  1706. .driver_name = "authenc-hmac-md5-cbc-aes-caam",
  1707. .blocksize = AES_BLOCK_SIZE,
  1708. .type = CRYPTO_ALG_TYPE_AEAD,
  1709. .template_aead = {
  1710. .setkey = aead_setkey,
  1711. .setauthsize = aead_setauthsize,
  1712. .encrypt = aead_encrypt,
  1713. .decrypt = aead_decrypt,
  1714. .givencrypt = aead_givencrypt,
  1715. .geniv = "<built-in>",
  1716. .ivsize = AES_BLOCK_SIZE,
  1717. .maxauthsize = MD5_DIGEST_SIZE,
  1718. },
  1719. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1720. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1721. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1722. },
  1723. {
  1724. .name = "authenc(hmac(sha1),cbc(aes))",
  1725. .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
  1726. .blocksize = AES_BLOCK_SIZE,
  1727. .type = CRYPTO_ALG_TYPE_AEAD,
  1728. .template_aead = {
  1729. .setkey = aead_setkey,
  1730. .setauthsize = aead_setauthsize,
  1731. .encrypt = aead_encrypt,
  1732. .decrypt = aead_decrypt,
  1733. .givencrypt = aead_givencrypt,
  1734. .geniv = "<built-in>",
  1735. .ivsize = AES_BLOCK_SIZE,
  1736. .maxauthsize = SHA1_DIGEST_SIZE,
  1737. },
  1738. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1739. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1740. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1741. },
  1742. {
  1743. .name = "authenc(hmac(sha224),cbc(aes))",
  1744. .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
  1745. .blocksize = AES_BLOCK_SIZE,
  1746. .type = CRYPTO_ALG_TYPE_AEAD,
  1747. .template_aead = {
  1748. .setkey = aead_setkey,
  1749. .setauthsize = aead_setauthsize,
  1750. .encrypt = aead_encrypt,
  1751. .decrypt = aead_decrypt,
  1752. .givencrypt = aead_givencrypt,
  1753. .geniv = "<built-in>",
  1754. .ivsize = AES_BLOCK_SIZE,
  1755. .maxauthsize = SHA224_DIGEST_SIZE,
  1756. },
  1757. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1758. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1759. OP_ALG_AAI_HMAC_PRECOMP,
  1760. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1761. },
  1762. {
  1763. .name = "authenc(hmac(sha256),cbc(aes))",
  1764. .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
  1765. .blocksize = AES_BLOCK_SIZE,
  1766. .type = CRYPTO_ALG_TYPE_AEAD,
  1767. .template_aead = {
  1768. .setkey = aead_setkey,
  1769. .setauthsize = aead_setauthsize,
  1770. .encrypt = aead_encrypt,
  1771. .decrypt = aead_decrypt,
  1772. .givencrypt = aead_givencrypt,
  1773. .geniv = "<built-in>",
  1774. .ivsize = AES_BLOCK_SIZE,
  1775. .maxauthsize = SHA256_DIGEST_SIZE,
  1776. },
  1777. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1778. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1779. OP_ALG_AAI_HMAC_PRECOMP,
  1780. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  1781. },
  1782. {
  1783. .name = "authenc(hmac(sha384),cbc(aes))",
  1784. .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
  1785. .blocksize = AES_BLOCK_SIZE,
  1786. .type = CRYPTO_ALG_TYPE_AEAD,
  1787. .template_aead = {
  1788. .setkey = aead_setkey,
  1789. .setauthsize = aead_setauthsize,
  1790. .encrypt = aead_encrypt,
  1791. .decrypt = aead_decrypt,
  1792. .givencrypt = aead_givencrypt,
  1793. .geniv = "<built-in>",
  1794. .ivsize = AES_BLOCK_SIZE,
  1795. .maxauthsize = SHA384_DIGEST_SIZE,
  1796. },
  1797. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1798. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1799. OP_ALG_AAI_HMAC_PRECOMP,
  1800. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  1801. },
  1802. {
  1803. .name = "authenc(hmac(sha512),cbc(aes))",
  1804. .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
  1805. .blocksize = AES_BLOCK_SIZE,
  1806. .type = CRYPTO_ALG_TYPE_AEAD,
  1807. .template_aead = {
  1808. .setkey = aead_setkey,
  1809. .setauthsize = aead_setauthsize,
  1810. .encrypt = aead_encrypt,
  1811. .decrypt = aead_decrypt,
  1812. .givencrypt = aead_givencrypt,
  1813. .geniv = "<built-in>",
  1814. .ivsize = AES_BLOCK_SIZE,
  1815. .maxauthsize = SHA512_DIGEST_SIZE,
  1816. },
  1817. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1818. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1819. OP_ALG_AAI_HMAC_PRECOMP,
  1820. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  1821. },
  1822. {
  1823. .name = "authenc(hmac(md5),cbc(des3_ede))",
  1824. .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
  1825. .blocksize = DES3_EDE_BLOCK_SIZE,
  1826. .type = CRYPTO_ALG_TYPE_AEAD,
  1827. .template_aead = {
  1828. .setkey = aead_setkey,
  1829. .setauthsize = aead_setauthsize,
  1830. .encrypt = aead_encrypt,
  1831. .decrypt = aead_decrypt,
  1832. .givencrypt = aead_givencrypt,
  1833. .geniv = "<built-in>",
  1834. .ivsize = DES3_EDE_BLOCK_SIZE,
  1835. .maxauthsize = MD5_DIGEST_SIZE,
  1836. },
  1837. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1838. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1839. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1840. },
  1841. {
  1842. .name = "authenc(hmac(sha1),cbc(des3_ede))",
  1843. .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
  1844. .blocksize = DES3_EDE_BLOCK_SIZE,
  1845. .type = CRYPTO_ALG_TYPE_AEAD,
  1846. .template_aead = {
  1847. .setkey = aead_setkey,
  1848. .setauthsize = aead_setauthsize,
  1849. .encrypt = aead_encrypt,
  1850. .decrypt = aead_decrypt,
  1851. .givencrypt = aead_givencrypt,
  1852. .geniv = "<built-in>",
  1853. .ivsize = DES3_EDE_BLOCK_SIZE,
  1854. .maxauthsize = SHA1_DIGEST_SIZE,
  1855. },
  1856. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1857. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1858. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1859. },
  1860. {
  1861. .name = "authenc(hmac(sha224),cbc(des3_ede))",
  1862. .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
  1863. .blocksize = DES3_EDE_BLOCK_SIZE,
  1864. .type = CRYPTO_ALG_TYPE_AEAD,
  1865. .template_aead = {
  1866. .setkey = aead_setkey,
  1867. .setauthsize = aead_setauthsize,
  1868. .encrypt = aead_encrypt,
  1869. .decrypt = aead_decrypt,
  1870. .givencrypt = aead_givencrypt,
  1871. .geniv = "<built-in>",
  1872. .ivsize = DES3_EDE_BLOCK_SIZE,
  1873. .maxauthsize = SHA224_DIGEST_SIZE,
  1874. },
  1875. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1876. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1877. OP_ALG_AAI_HMAC_PRECOMP,
  1878. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1879. },
  1880. {
  1881. .name = "authenc(hmac(sha256),cbc(des3_ede))",
  1882. .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
  1883. .blocksize = DES3_EDE_BLOCK_SIZE,
  1884. .type = CRYPTO_ALG_TYPE_AEAD,
  1885. .template_aead = {
  1886. .setkey = aead_setkey,
  1887. .setauthsize = aead_setauthsize,
  1888. .encrypt = aead_encrypt,
  1889. .decrypt = aead_decrypt,
  1890. .givencrypt = aead_givencrypt,
  1891. .geniv = "<built-in>",
  1892. .ivsize = DES3_EDE_BLOCK_SIZE,
  1893. .maxauthsize = SHA256_DIGEST_SIZE,
  1894. },
  1895. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1896. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1897. OP_ALG_AAI_HMAC_PRECOMP,
  1898. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  1899. },
  1900. {
  1901. .name = "authenc(hmac(sha384),cbc(des3_ede))",
  1902. .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
  1903. .blocksize = DES3_EDE_BLOCK_SIZE,
  1904. .type = CRYPTO_ALG_TYPE_AEAD,
  1905. .template_aead = {
  1906. .setkey = aead_setkey,
  1907. .setauthsize = aead_setauthsize,
  1908. .encrypt = aead_encrypt,
  1909. .decrypt = aead_decrypt,
  1910. .givencrypt = aead_givencrypt,
  1911. .geniv = "<built-in>",
  1912. .ivsize = DES3_EDE_BLOCK_SIZE,
  1913. .maxauthsize = SHA384_DIGEST_SIZE,
  1914. },
  1915. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1916. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1917. OP_ALG_AAI_HMAC_PRECOMP,
  1918. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  1919. },
  1920. {
  1921. .name = "authenc(hmac(sha512),cbc(des3_ede))",
  1922. .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
  1923. .blocksize = DES3_EDE_BLOCK_SIZE,
  1924. .type = CRYPTO_ALG_TYPE_AEAD,
  1925. .template_aead = {
  1926. .setkey = aead_setkey,
  1927. .setauthsize = aead_setauthsize,
  1928. .encrypt = aead_encrypt,
  1929. .decrypt = aead_decrypt,
  1930. .givencrypt = aead_givencrypt,
  1931. .geniv = "<built-in>",
  1932. .ivsize = DES3_EDE_BLOCK_SIZE,
  1933. .maxauthsize = SHA512_DIGEST_SIZE,
  1934. },
  1935. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1936. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1937. OP_ALG_AAI_HMAC_PRECOMP,
  1938. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  1939. },
  1940. {
  1941. .name = "authenc(hmac(md5),cbc(des))",
  1942. .driver_name = "authenc-hmac-md5-cbc-des-caam",
  1943. .blocksize = DES_BLOCK_SIZE,
  1944. .type = CRYPTO_ALG_TYPE_AEAD,
  1945. .template_aead = {
  1946. .setkey = aead_setkey,
  1947. .setauthsize = aead_setauthsize,
  1948. .encrypt = aead_encrypt,
  1949. .decrypt = aead_decrypt,
  1950. .givencrypt = aead_givencrypt,
  1951. .geniv = "<built-in>",
  1952. .ivsize = DES_BLOCK_SIZE,
  1953. .maxauthsize = MD5_DIGEST_SIZE,
  1954. },
  1955. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1956. .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
  1957. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  1958. },
  1959. {
  1960. .name = "authenc(hmac(sha1),cbc(des))",
  1961. .driver_name = "authenc-hmac-sha1-cbc-des-caam",
  1962. .blocksize = DES_BLOCK_SIZE,
  1963. .type = CRYPTO_ALG_TYPE_AEAD,
  1964. .template_aead = {
  1965. .setkey = aead_setkey,
  1966. .setauthsize = aead_setauthsize,
  1967. .encrypt = aead_encrypt,
  1968. .decrypt = aead_decrypt,
  1969. .givencrypt = aead_givencrypt,
  1970. .geniv = "<built-in>",
  1971. .ivsize = DES_BLOCK_SIZE,
  1972. .maxauthsize = SHA1_DIGEST_SIZE,
  1973. },
  1974. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1975. .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
  1976. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  1977. },
  1978. {
  1979. .name = "authenc(hmac(sha224),cbc(des))",
  1980. .driver_name = "authenc-hmac-sha224-cbc-des-caam",
  1981. .blocksize = DES_BLOCK_SIZE,
  1982. .type = CRYPTO_ALG_TYPE_AEAD,
  1983. .template_aead = {
  1984. .setkey = aead_setkey,
  1985. .setauthsize = aead_setauthsize,
  1986. .encrypt = aead_encrypt,
  1987. .decrypt = aead_decrypt,
  1988. .givencrypt = aead_givencrypt,
  1989. .geniv = "<built-in>",
  1990. .ivsize = DES_BLOCK_SIZE,
  1991. .maxauthsize = SHA224_DIGEST_SIZE,
  1992. },
  1993. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1994. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1995. OP_ALG_AAI_HMAC_PRECOMP,
  1996. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  1997. },
  1998. {
  1999. .name = "authenc(hmac(sha256),cbc(des))",
  2000. .driver_name = "authenc-hmac-sha256-cbc-des-caam",
  2001. .blocksize = DES_BLOCK_SIZE,
  2002. .type = CRYPTO_ALG_TYPE_AEAD,
  2003. .template_aead = {
  2004. .setkey = aead_setkey,
  2005. .setauthsize = aead_setauthsize,
  2006. .encrypt = aead_encrypt,
  2007. .decrypt = aead_decrypt,
  2008. .givencrypt = aead_givencrypt,
  2009. .geniv = "<built-in>",
  2010. .ivsize = DES_BLOCK_SIZE,
  2011. .maxauthsize = SHA256_DIGEST_SIZE,
  2012. },
  2013. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2014. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2015. OP_ALG_AAI_HMAC_PRECOMP,
  2016. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2017. },
  2018. {
  2019. .name = "authenc(hmac(sha384),cbc(des))",
  2020. .driver_name = "authenc-hmac-sha384-cbc-des-caam",
  2021. .blocksize = DES_BLOCK_SIZE,
  2022. .type = CRYPTO_ALG_TYPE_AEAD,
  2023. .template_aead = {
  2024. .setkey = aead_setkey,
  2025. .setauthsize = aead_setauthsize,
  2026. .encrypt = aead_encrypt,
  2027. .decrypt = aead_decrypt,
  2028. .givencrypt = aead_givencrypt,
  2029. .geniv = "<built-in>",
  2030. .ivsize = DES_BLOCK_SIZE,
  2031. .maxauthsize = SHA384_DIGEST_SIZE,
  2032. },
  2033. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2034. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2035. OP_ALG_AAI_HMAC_PRECOMP,
  2036. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2037. },
  2038. {
  2039. .name = "authenc(hmac(sha512),cbc(des))",
  2040. .driver_name = "authenc-hmac-sha512-cbc-des-caam",
  2041. .blocksize = DES_BLOCK_SIZE,
  2042. .type = CRYPTO_ALG_TYPE_AEAD,
  2043. .template_aead = {
  2044. .setkey = aead_setkey,
  2045. .setauthsize = aead_setauthsize,
  2046. .encrypt = aead_encrypt,
  2047. .decrypt = aead_decrypt,
  2048. .givencrypt = aead_givencrypt,
  2049. .geniv = "<built-in>",
  2050. .ivsize = DES_BLOCK_SIZE,
  2051. .maxauthsize = SHA512_DIGEST_SIZE,
  2052. },
  2053. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2054. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2055. OP_ALG_AAI_HMAC_PRECOMP,
  2056. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2057. },
  2058. /* ablkcipher descriptor */
  2059. {
  2060. .name = "cbc(aes)",
  2061. .driver_name = "cbc-aes-caam",
  2062. .blocksize = AES_BLOCK_SIZE,
  2063. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2064. .template_ablkcipher = {
  2065. .setkey = ablkcipher_setkey,
  2066. .encrypt = ablkcipher_encrypt,
  2067. .decrypt = ablkcipher_decrypt,
  2068. .geniv = "eseqiv",
  2069. .min_keysize = AES_MIN_KEY_SIZE,
  2070. .max_keysize = AES_MAX_KEY_SIZE,
  2071. .ivsize = AES_BLOCK_SIZE,
  2072. },
  2073. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2074. },
  2075. {
  2076. .name = "cbc(des3_ede)",
  2077. .driver_name = "cbc-3des-caam",
  2078. .blocksize = DES3_EDE_BLOCK_SIZE,
  2079. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2080. .template_ablkcipher = {
  2081. .setkey = ablkcipher_setkey,
  2082. .encrypt = ablkcipher_encrypt,
  2083. .decrypt = ablkcipher_decrypt,
  2084. .geniv = "eseqiv",
  2085. .min_keysize = DES3_EDE_KEY_SIZE,
  2086. .max_keysize = DES3_EDE_KEY_SIZE,
  2087. .ivsize = DES3_EDE_BLOCK_SIZE,
  2088. },
  2089. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2090. },
  2091. {
  2092. .name = "cbc(des)",
  2093. .driver_name = "cbc-des-caam",
  2094. .blocksize = DES_BLOCK_SIZE,
  2095. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2096. .template_ablkcipher = {
  2097. .setkey = ablkcipher_setkey,
  2098. .encrypt = ablkcipher_encrypt,
  2099. .decrypt = ablkcipher_decrypt,
  2100. .geniv = "eseqiv",
  2101. .min_keysize = DES_KEY_SIZE,
  2102. .max_keysize = DES_KEY_SIZE,
  2103. .ivsize = DES_BLOCK_SIZE,
  2104. },
  2105. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2106. }
  2107. };
  2108. struct caam_crypto_alg {
  2109. struct list_head entry;
  2110. int class1_alg_type;
  2111. int class2_alg_type;
  2112. int alg_op;
  2113. struct crypto_alg crypto_alg;
  2114. };
  2115. static int caam_cra_init(struct crypto_tfm *tfm)
  2116. {
  2117. struct crypto_alg *alg = tfm->__crt_alg;
  2118. struct caam_crypto_alg *caam_alg =
  2119. container_of(alg, struct caam_crypto_alg, crypto_alg);
  2120. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  2121. ctx->jrdev = caam_jr_alloc();
  2122. if (IS_ERR(ctx->jrdev)) {
  2123. pr_err("Job Ring Device allocation for transform failed\n");
  2124. return PTR_ERR(ctx->jrdev);
  2125. }
  2126. /* copy descriptor header template value */
  2127. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
  2128. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
  2129. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
  2130. return 0;
  2131. }
  2132. static void caam_cra_exit(struct crypto_tfm *tfm)
  2133. {
  2134. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  2135. if (ctx->sh_desc_enc_dma &&
  2136. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  2137. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  2138. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  2139. if (ctx->sh_desc_dec_dma &&
  2140. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  2141. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  2142. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  2143. if (ctx->sh_desc_givenc_dma &&
  2144. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  2145. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  2146. desc_bytes(ctx->sh_desc_givenc),
  2147. DMA_TO_DEVICE);
  2148. if (ctx->key_dma &&
  2149. !dma_mapping_error(ctx->jrdev, ctx->key_dma))
  2150. dma_unmap_single(ctx->jrdev, ctx->key_dma,
  2151. ctx->enckeylen + ctx->split_key_pad_len,
  2152. DMA_TO_DEVICE);
  2153. caam_jr_free(ctx->jrdev);
  2154. }
  2155. static void __exit caam_algapi_exit(void)
  2156. {
  2157. struct caam_crypto_alg *t_alg, *n;
  2158. if (!alg_list.next)
  2159. return;
  2160. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  2161. crypto_unregister_alg(&t_alg->crypto_alg);
  2162. list_del(&t_alg->entry);
  2163. kfree(t_alg);
  2164. }
  2165. }
  2166. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  2167. *template)
  2168. {
  2169. struct caam_crypto_alg *t_alg;
  2170. struct crypto_alg *alg;
  2171. t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
  2172. if (!t_alg) {
  2173. pr_err("failed to allocate t_alg\n");
  2174. return ERR_PTR(-ENOMEM);
  2175. }
  2176. alg = &t_alg->crypto_alg;
  2177. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  2178. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  2179. template->driver_name);
  2180. alg->cra_module = THIS_MODULE;
  2181. alg->cra_init = caam_cra_init;
  2182. alg->cra_exit = caam_cra_exit;
  2183. alg->cra_priority = CAAM_CRA_PRIORITY;
  2184. alg->cra_blocksize = template->blocksize;
  2185. alg->cra_alignmask = 0;
  2186. alg->cra_ctxsize = sizeof(struct caam_ctx);
  2187. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  2188. template->type;
  2189. switch (template->type) {
  2190. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  2191. alg->cra_type = &crypto_ablkcipher_type;
  2192. alg->cra_ablkcipher = template->template_ablkcipher;
  2193. break;
  2194. case CRYPTO_ALG_TYPE_AEAD:
  2195. alg->cra_type = &crypto_aead_type;
  2196. alg->cra_aead = template->template_aead;
  2197. break;
  2198. }
  2199. t_alg->class1_alg_type = template->class1_alg_type;
  2200. t_alg->class2_alg_type = template->class2_alg_type;
  2201. t_alg->alg_op = template->alg_op;
  2202. return t_alg;
  2203. }
  2204. static int __init caam_algapi_init(void)
  2205. {
  2206. struct device_node *dev_node;
  2207. struct platform_device *pdev;
  2208. struct device *ctrldev;
  2209. void *priv;
  2210. int i = 0, err = 0;
  2211. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  2212. if (!dev_node) {
  2213. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  2214. if (!dev_node)
  2215. return -ENODEV;
  2216. }
  2217. pdev = of_find_device_by_node(dev_node);
  2218. if (!pdev) {
  2219. of_node_put(dev_node);
  2220. return -ENODEV;
  2221. }
  2222. ctrldev = &pdev->dev;
  2223. priv = dev_get_drvdata(ctrldev);
  2224. of_node_put(dev_node);
  2225. /*
  2226. * If priv is NULL, it's probably because the caam driver wasn't
  2227. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  2228. */
  2229. if (!priv)
  2230. return -ENODEV;
  2231. INIT_LIST_HEAD(&alg_list);
  2232. /* register crypto algorithms the device supports */
  2233. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2234. /* TODO: check if h/w supports alg */
  2235. struct caam_crypto_alg *t_alg;
  2236. t_alg = caam_alg_alloc(&driver_algs[i]);
  2237. if (IS_ERR(t_alg)) {
  2238. err = PTR_ERR(t_alg);
  2239. pr_warn("%s alg allocation failed\n",
  2240. driver_algs[i].driver_name);
  2241. continue;
  2242. }
  2243. err = crypto_register_alg(&t_alg->crypto_alg);
  2244. if (err) {
  2245. pr_warn("%s alg registration failed\n",
  2246. t_alg->crypto_alg.cra_driver_name);
  2247. kfree(t_alg);
  2248. } else
  2249. list_add_tail(&t_alg->entry, &alg_list);
  2250. }
  2251. if (!list_empty(&alg_list))
  2252. pr_info("caam algorithms registered in /proc/crypto\n");
  2253. return err;
  2254. }
  2255. module_init(caam_algapi_init);
  2256. module_exit(caam_algapi_exit);
  2257. MODULE_LICENSE("GPL");
  2258. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  2259. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");