caamalg_qi.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Freescale FSL CAAM support for crypto API over QI backend.
  4. * Based on caamalg.c
  5. *
  6. * Copyright 2013-2016 Freescale Semiconductor, Inc.
  7. * Copyright 2016-2018 NXP
  8. */
  9. #include "compat.h"
  10. #include "ctrl.h"
  11. #include "regs.h"
  12. #include "intern.h"
  13. #include "desc_constr.h"
  14. #include "error.h"
  15. #include "sg_sw_qm.h"
  16. #include "key_gen.h"
  17. #include "qi.h"
  18. #include "jr.h"
  19. #include "caamalg_desc.h"
  20. /*
  21. * crypto alg
  22. */
  23. #define CAAM_CRA_PRIORITY 2000
  24. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  25. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  26. SHA512_DIGEST_SIZE * 2)
  27. #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
  28. CAAM_MAX_KEY_SIZE)
  29. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  30. struct caam_alg_entry {
  31. int class1_alg_type;
  32. int class2_alg_type;
  33. bool rfc3686;
  34. bool geniv;
  35. };
  36. struct caam_aead_alg {
  37. struct aead_alg aead;
  38. struct caam_alg_entry caam;
  39. bool registered;
  40. };
  41. struct caam_skcipher_alg {
  42. struct skcipher_alg skcipher;
  43. struct caam_alg_entry caam;
  44. bool registered;
  45. };
  46. /*
  47. * per-session context
  48. */
  49. struct caam_ctx {
  50. struct device *jrdev;
  51. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  52. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  53. u8 key[CAAM_MAX_KEY_SIZE];
  54. dma_addr_t key_dma;
  55. enum dma_data_direction dir;
  56. struct alginfo adata;
  57. struct alginfo cdata;
  58. unsigned int authsize;
  59. struct device *qidev;
  60. spinlock_t lock; /* Protects multiple init of driver context */
  61. struct caam_drv_ctx *drv_ctx[NUM_OP];
  62. };
  63. static int aead_set_sh_desc(struct crypto_aead *aead)
  64. {
  65. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  66. typeof(*alg), aead);
  67. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  68. unsigned int ivsize = crypto_aead_ivsize(aead);
  69. u32 ctx1_iv_off = 0;
  70. u32 *nonce = NULL;
  71. unsigned int data_len[2];
  72. u32 inl_mask;
  73. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  74. OP_ALG_AAI_CTR_MOD128);
  75. const bool is_rfc3686 = alg->caam.rfc3686;
  76. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  77. if (!ctx->cdata.keylen || !ctx->authsize)
  78. return 0;
  79. /*
  80. * AES-CTR needs to load IV in CONTEXT1 reg
  81. * at an offset of 128bits (16bytes)
  82. * CONTEXT1[255:128] = IV
  83. */
  84. if (ctr_mode)
  85. ctx1_iv_off = 16;
  86. /*
  87. * RFC3686 specific:
  88. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  89. */
  90. if (is_rfc3686) {
  91. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  92. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  93. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  94. }
  95. data_len[0] = ctx->adata.keylen_pad;
  96. data_len[1] = ctx->cdata.keylen;
  97. if (alg->caam.geniv)
  98. goto skip_enc;
  99. /* aead_encrypt shared descriptor */
  100. if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
  101. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  102. DESC_JOB_IO_LEN, data_len, &inl_mask,
  103. ARRAY_SIZE(data_len)) < 0)
  104. return -EINVAL;
  105. if (inl_mask & 1)
  106. ctx->adata.key_virt = ctx->key;
  107. else
  108. ctx->adata.key_dma = ctx->key_dma;
  109. if (inl_mask & 2)
  110. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  111. else
  112. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  113. ctx->adata.key_inline = !!(inl_mask & 1);
  114. ctx->cdata.key_inline = !!(inl_mask & 2);
  115. cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  116. ivsize, ctx->authsize, is_rfc3686, nonce,
  117. ctx1_iv_off, true, ctrlpriv->era);
  118. skip_enc:
  119. /* aead_decrypt shared descriptor */
  120. if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
  121. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  122. DESC_JOB_IO_LEN, data_len, &inl_mask,
  123. ARRAY_SIZE(data_len)) < 0)
  124. return -EINVAL;
  125. if (inl_mask & 1)
  126. ctx->adata.key_virt = ctx->key;
  127. else
  128. ctx->adata.key_dma = ctx->key_dma;
  129. if (inl_mask & 2)
  130. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  131. else
  132. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  133. ctx->adata.key_inline = !!(inl_mask & 1);
  134. ctx->cdata.key_inline = !!(inl_mask & 2);
  135. cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  136. ivsize, ctx->authsize, alg->caam.geniv,
  137. is_rfc3686, nonce, ctx1_iv_off, true,
  138. ctrlpriv->era);
  139. if (!alg->caam.geniv)
  140. goto skip_givenc;
  141. /* aead_givencrypt shared descriptor */
  142. if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
  143. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  144. DESC_JOB_IO_LEN, data_len, &inl_mask,
  145. ARRAY_SIZE(data_len)) < 0)
  146. return -EINVAL;
  147. if (inl_mask & 1)
  148. ctx->adata.key_virt = ctx->key;
  149. else
  150. ctx->adata.key_dma = ctx->key_dma;
  151. if (inl_mask & 2)
  152. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  153. else
  154. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  155. ctx->adata.key_inline = !!(inl_mask & 1);
  156. ctx->cdata.key_inline = !!(inl_mask & 2);
  157. cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  158. ivsize, ctx->authsize, is_rfc3686, nonce,
  159. ctx1_iv_off, true, ctrlpriv->era);
  160. skip_givenc:
  161. return 0;
  162. }
  163. static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  164. {
  165. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  166. ctx->authsize = authsize;
  167. aead_set_sh_desc(authenc);
  168. return 0;
  169. }
  170. static int aead_setkey(struct crypto_aead *aead, const u8 *key,
  171. unsigned int keylen)
  172. {
  173. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  174. struct device *jrdev = ctx->jrdev;
  175. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  176. struct crypto_authenc_keys keys;
  177. int ret = 0;
  178. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  179. goto badkey;
  180. #ifdef DEBUG
  181. dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
  182. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  183. keys.authkeylen);
  184. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  185. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  186. #endif
  187. /*
  188. * If DKP is supported, use it in the shared descriptor to generate
  189. * the split key.
  190. */
  191. if (ctrlpriv->era >= 6) {
  192. ctx->adata.keylen = keys.authkeylen;
  193. ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  194. OP_ALG_ALGSEL_MASK);
  195. if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  196. goto badkey;
  197. memcpy(ctx->key, keys.authkey, keys.authkeylen);
  198. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  199. keys.enckeylen);
  200. dma_sync_single_for_device(jrdev, ctx->key_dma,
  201. ctx->adata.keylen_pad +
  202. keys.enckeylen, ctx->dir);
  203. goto skip_split_key;
  204. }
  205. ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  206. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  207. keys.enckeylen);
  208. if (ret)
  209. goto badkey;
  210. /* postpend encryption key to auth split key */
  211. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  212. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  213. keys.enckeylen, ctx->dir);
  214. #ifdef DEBUG
  215. print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  216. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  217. ctx->adata.keylen_pad + keys.enckeylen, 1);
  218. #endif
  219. skip_split_key:
  220. ctx->cdata.keylen = keys.enckeylen;
  221. ret = aead_set_sh_desc(aead);
  222. if (ret)
  223. goto badkey;
  224. /* Now update the driver contexts with the new shared descriptor */
  225. if (ctx->drv_ctx[ENCRYPT]) {
  226. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  227. ctx->sh_desc_enc);
  228. if (ret) {
  229. dev_err(jrdev, "driver enc context update failed\n");
  230. goto badkey;
  231. }
  232. }
  233. if (ctx->drv_ctx[DECRYPT]) {
  234. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  235. ctx->sh_desc_dec);
  236. if (ret) {
  237. dev_err(jrdev, "driver dec context update failed\n");
  238. goto badkey;
  239. }
  240. }
  241. memzero_explicit(&keys, sizeof(keys));
  242. return ret;
  243. badkey:
  244. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  245. memzero_explicit(&keys, sizeof(keys));
  246. return -EINVAL;
  247. }
  248. static int gcm_set_sh_desc(struct crypto_aead *aead)
  249. {
  250. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  251. unsigned int ivsize = crypto_aead_ivsize(aead);
  252. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  253. ctx->cdata.keylen;
  254. if (!ctx->cdata.keylen || !ctx->authsize)
  255. return 0;
  256. /*
  257. * Job Descriptor and Shared Descriptor
  258. * must fit into the 64-word Descriptor h/w Buffer
  259. */
  260. if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
  261. ctx->cdata.key_inline = true;
  262. ctx->cdata.key_virt = ctx->key;
  263. } else {
  264. ctx->cdata.key_inline = false;
  265. ctx->cdata.key_dma = ctx->key_dma;
  266. }
  267. cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  268. ctx->authsize, true);
  269. /*
  270. * Job Descriptor and Shared Descriptor
  271. * must fit into the 64-word Descriptor h/w Buffer
  272. */
  273. if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
  274. ctx->cdata.key_inline = true;
  275. ctx->cdata.key_virt = ctx->key;
  276. } else {
  277. ctx->cdata.key_inline = false;
  278. ctx->cdata.key_dma = ctx->key_dma;
  279. }
  280. cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  281. ctx->authsize, true);
  282. return 0;
  283. }
  284. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  285. {
  286. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  287. ctx->authsize = authsize;
  288. gcm_set_sh_desc(authenc);
  289. return 0;
  290. }
  291. static int gcm_setkey(struct crypto_aead *aead,
  292. const u8 *key, unsigned int keylen)
  293. {
  294. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  295. struct device *jrdev = ctx->jrdev;
  296. int ret;
  297. #ifdef DEBUG
  298. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  299. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  300. #endif
  301. memcpy(ctx->key, key, keylen);
  302. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
  303. ctx->cdata.keylen = keylen;
  304. ret = gcm_set_sh_desc(aead);
  305. if (ret)
  306. return ret;
  307. /* Now update the driver contexts with the new shared descriptor */
  308. if (ctx->drv_ctx[ENCRYPT]) {
  309. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  310. ctx->sh_desc_enc);
  311. if (ret) {
  312. dev_err(jrdev, "driver enc context update failed\n");
  313. return ret;
  314. }
  315. }
  316. if (ctx->drv_ctx[DECRYPT]) {
  317. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  318. ctx->sh_desc_dec);
  319. if (ret) {
  320. dev_err(jrdev, "driver dec context update failed\n");
  321. return ret;
  322. }
  323. }
  324. return 0;
  325. }
  326. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  327. {
  328. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  329. unsigned int ivsize = crypto_aead_ivsize(aead);
  330. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  331. ctx->cdata.keylen;
  332. if (!ctx->cdata.keylen || !ctx->authsize)
  333. return 0;
  334. ctx->cdata.key_virt = ctx->key;
  335. /*
  336. * Job Descriptor and Shared Descriptor
  337. * must fit into the 64-word Descriptor h/w Buffer
  338. */
  339. if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
  340. ctx->cdata.key_inline = true;
  341. } else {
  342. ctx->cdata.key_inline = false;
  343. ctx->cdata.key_dma = ctx->key_dma;
  344. }
  345. cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  346. ctx->authsize, true);
  347. /*
  348. * Job Descriptor and Shared Descriptor
  349. * must fit into the 64-word Descriptor h/w Buffer
  350. */
  351. if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
  352. ctx->cdata.key_inline = true;
  353. } else {
  354. ctx->cdata.key_inline = false;
  355. ctx->cdata.key_dma = ctx->key_dma;
  356. }
  357. cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  358. ctx->authsize, true);
  359. return 0;
  360. }
  361. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  362. unsigned int authsize)
  363. {
  364. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  365. ctx->authsize = authsize;
  366. rfc4106_set_sh_desc(authenc);
  367. return 0;
  368. }
  369. static int rfc4106_setkey(struct crypto_aead *aead,
  370. const u8 *key, unsigned int keylen)
  371. {
  372. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  373. struct device *jrdev = ctx->jrdev;
  374. int ret;
  375. if (keylen < 4)
  376. return -EINVAL;
  377. #ifdef DEBUG
  378. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  379. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  380. #endif
  381. memcpy(ctx->key, key, keylen);
  382. /*
  383. * The last four bytes of the key material are used as the salt value
  384. * in the nonce. Update the AES key length.
  385. */
  386. ctx->cdata.keylen = keylen - 4;
  387. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  388. ctx->dir);
  389. ret = rfc4106_set_sh_desc(aead);
  390. if (ret)
  391. return ret;
  392. /* Now update the driver contexts with the new shared descriptor */
  393. if (ctx->drv_ctx[ENCRYPT]) {
  394. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  395. ctx->sh_desc_enc);
  396. if (ret) {
  397. dev_err(jrdev, "driver enc context update failed\n");
  398. return ret;
  399. }
  400. }
  401. if (ctx->drv_ctx[DECRYPT]) {
  402. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  403. ctx->sh_desc_dec);
  404. if (ret) {
  405. dev_err(jrdev, "driver dec context update failed\n");
  406. return ret;
  407. }
  408. }
  409. return 0;
  410. }
  411. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  412. {
  413. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  414. unsigned int ivsize = crypto_aead_ivsize(aead);
  415. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  416. ctx->cdata.keylen;
  417. if (!ctx->cdata.keylen || !ctx->authsize)
  418. return 0;
  419. ctx->cdata.key_virt = ctx->key;
  420. /*
  421. * Job Descriptor and Shared Descriptor
  422. * must fit into the 64-word Descriptor h/w Buffer
  423. */
  424. if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
  425. ctx->cdata.key_inline = true;
  426. } else {
  427. ctx->cdata.key_inline = false;
  428. ctx->cdata.key_dma = ctx->key_dma;
  429. }
  430. cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  431. ctx->authsize, true);
  432. /*
  433. * Job Descriptor and Shared Descriptor
  434. * must fit into the 64-word Descriptor h/w Buffer
  435. */
  436. if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
  437. ctx->cdata.key_inline = true;
  438. } else {
  439. ctx->cdata.key_inline = false;
  440. ctx->cdata.key_dma = ctx->key_dma;
  441. }
  442. cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  443. ctx->authsize, true);
  444. return 0;
  445. }
  446. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  447. unsigned int authsize)
  448. {
  449. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  450. ctx->authsize = authsize;
  451. rfc4543_set_sh_desc(authenc);
  452. return 0;
  453. }
  454. static int rfc4543_setkey(struct crypto_aead *aead,
  455. const u8 *key, unsigned int keylen)
  456. {
  457. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  458. struct device *jrdev = ctx->jrdev;
  459. int ret;
  460. if (keylen < 4)
  461. return -EINVAL;
  462. #ifdef DEBUG
  463. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  464. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  465. #endif
  466. memcpy(ctx->key, key, keylen);
  467. /*
  468. * The last four bytes of the key material are used as the salt value
  469. * in the nonce. Update the AES key length.
  470. */
  471. ctx->cdata.keylen = keylen - 4;
  472. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  473. ctx->dir);
  474. ret = rfc4543_set_sh_desc(aead);
  475. if (ret)
  476. return ret;
  477. /* Now update the driver contexts with the new shared descriptor */
  478. if (ctx->drv_ctx[ENCRYPT]) {
  479. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  480. ctx->sh_desc_enc);
  481. if (ret) {
  482. dev_err(jrdev, "driver enc context update failed\n");
  483. return ret;
  484. }
  485. }
  486. if (ctx->drv_ctx[DECRYPT]) {
  487. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  488. ctx->sh_desc_dec);
  489. if (ret) {
  490. dev_err(jrdev, "driver dec context update failed\n");
  491. return ret;
  492. }
  493. }
  494. return 0;
  495. }
  496. static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  497. unsigned int keylen)
  498. {
  499. struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  500. struct caam_skcipher_alg *alg =
  501. container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
  502. skcipher);
  503. struct device *jrdev = ctx->jrdev;
  504. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  505. u32 ctx1_iv_off = 0;
  506. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  507. OP_ALG_AAI_CTR_MOD128);
  508. const bool is_rfc3686 = alg->caam.rfc3686;
  509. int ret = 0;
  510. #ifdef DEBUG
  511. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  512. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  513. #endif
  514. /*
  515. * AES-CTR needs to load IV in CONTEXT1 reg
  516. * at an offset of 128bits (16bytes)
  517. * CONTEXT1[255:128] = IV
  518. */
  519. if (ctr_mode)
  520. ctx1_iv_off = 16;
  521. /*
  522. * RFC3686 specific:
  523. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  524. * | *key = {KEY, NONCE}
  525. */
  526. if (is_rfc3686) {
  527. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  528. keylen -= CTR_RFC3686_NONCE_SIZE;
  529. }
  530. ctx->cdata.keylen = keylen;
  531. ctx->cdata.key_virt = key;
  532. ctx->cdata.key_inline = true;
  533. /* skcipher encrypt, decrypt shared descriptors */
  534. cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  535. is_rfc3686, ctx1_iv_off);
  536. cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  537. is_rfc3686, ctx1_iv_off);
  538. /* Now update the driver contexts with the new shared descriptor */
  539. if (ctx->drv_ctx[ENCRYPT]) {
  540. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  541. ctx->sh_desc_enc);
  542. if (ret) {
  543. dev_err(jrdev, "driver enc context update failed\n");
  544. goto badkey;
  545. }
  546. }
  547. if (ctx->drv_ctx[DECRYPT]) {
  548. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  549. ctx->sh_desc_dec);
  550. if (ret) {
  551. dev_err(jrdev, "driver dec context update failed\n");
  552. goto badkey;
  553. }
  554. }
  555. return ret;
  556. badkey:
  557. crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  558. return -EINVAL;
  559. }
  560. static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  561. unsigned int keylen)
  562. {
  563. struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  564. struct device *jrdev = ctx->jrdev;
  565. int ret = 0;
  566. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  567. dev_err(jrdev, "key size mismatch\n");
  568. goto badkey;
  569. }
  570. ctx->cdata.keylen = keylen;
  571. ctx->cdata.key_virt = key;
  572. ctx->cdata.key_inline = true;
  573. /* xts skcipher encrypt, decrypt shared descriptors */
  574. cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
  575. cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
  576. /* Now update the driver contexts with the new shared descriptor */
  577. if (ctx->drv_ctx[ENCRYPT]) {
  578. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  579. ctx->sh_desc_enc);
  580. if (ret) {
  581. dev_err(jrdev, "driver enc context update failed\n");
  582. goto badkey;
  583. }
  584. }
  585. if (ctx->drv_ctx[DECRYPT]) {
  586. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  587. ctx->sh_desc_dec);
  588. if (ret) {
  589. dev_err(jrdev, "driver dec context update failed\n");
  590. goto badkey;
  591. }
  592. }
  593. return ret;
  594. badkey:
  595. crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  596. return -EINVAL;
  597. }
  598. /*
  599. * aead_edesc - s/w-extended aead descriptor
  600. * @src_nents: number of segments in input scatterlist
  601. * @dst_nents: number of segments in output scatterlist
  602. * @iv_dma: dma address of iv for checking continuity and link table
  603. * @qm_sg_bytes: length of dma mapped h/w link table
  604. * @qm_sg_dma: bus physical mapped address of h/w link table
  605. * @assoclen: associated data length, in CAAM endianness
  606. * @assoclen_dma: bus physical mapped address of req->assoclen
  607. * @drv_req: driver-specific request structure
  608. * @sgt: the h/w link table, followed by IV
  609. */
  610. struct aead_edesc {
  611. int src_nents;
  612. int dst_nents;
  613. dma_addr_t iv_dma;
  614. int qm_sg_bytes;
  615. dma_addr_t qm_sg_dma;
  616. unsigned int assoclen;
  617. dma_addr_t assoclen_dma;
  618. struct caam_drv_req drv_req;
  619. struct qm_sg_entry sgt[0];
  620. };
  621. /*
  622. * skcipher_edesc - s/w-extended skcipher descriptor
  623. * @src_nents: number of segments in input scatterlist
  624. * @dst_nents: number of segments in output scatterlist
  625. * @iv_dma: dma address of iv for checking continuity and link table
  626. * @qm_sg_bytes: length of dma mapped h/w link table
  627. * @qm_sg_dma: bus physical mapped address of h/w link table
  628. * @drv_req: driver-specific request structure
  629. * @sgt: the h/w link table, followed by IV
  630. */
  631. struct skcipher_edesc {
  632. int src_nents;
  633. int dst_nents;
  634. dma_addr_t iv_dma;
  635. int qm_sg_bytes;
  636. dma_addr_t qm_sg_dma;
  637. struct caam_drv_req drv_req;
  638. struct qm_sg_entry sgt[0];
  639. };
  640. static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
  641. enum optype type)
  642. {
  643. /*
  644. * This function is called on the fast path with values of 'type'
  645. * known at compile time. Invalid arguments are not expected and
  646. * thus no checks are made.
  647. */
  648. struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
  649. u32 *desc;
  650. if (unlikely(!drv_ctx)) {
  651. spin_lock(&ctx->lock);
  652. /* Read again to check if some other core init drv_ctx */
  653. drv_ctx = ctx->drv_ctx[type];
  654. if (!drv_ctx) {
  655. int cpu;
  656. if (type == ENCRYPT)
  657. desc = ctx->sh_desc_enc;
  658. else /* (type == DECRYPT) */
  659. desc = ctx->sh_desc_dec;
  660. cpu = smp_processor_id();
  661. drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
  662. if (likely(!IS_ERR_OR_NULL(drv_ctx)))
  663. drv_ctx->op_type = type;
  664. ctx->drv_ctx[type] = drv_ctx;
  665. }
  666. spin_unlock(&ctx->lock);
  667. }
  668. return drv_ctx;
  669. }
  670. static void caam_unmap(struct device *dev, struct scatterlist *src,
  671. struct scatterlist *dst, int src_nents,
  672. int dst_nents, dma_addr_t iv_dma, int ivsize,
  673. dma_addr_t qm_sg_dma, int qm_sg_bytes)
  674. {
  675. if (dst != src) {
  676. if (src_nents)
  677. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  678. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  679. } else {
  680. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  681. }
  682. if (iv_dma)
  683. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  684. if (qm_sg_bytes)
  685. dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
  686. }
  687. static void aead_unmap(struct device *dev,
  688. struct aead_edesc *edesc,
  689. struct aead_request *req)
  690. {
  691. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  692. int ivsize = crypto_aead_ivsize(aead);
  693. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  694. edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
  695. dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  696. }
  697. static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
  698. struct skcipher_request *req)
  699. {
  700. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  701. int ivsize = crypto_skcipher_ivsize(skcipher);
  702. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  703. edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
  704. }
  705. static void aead_done(struct caam_drv_req *drv_req, u32 status)
  706. {
  707. struct device *qidev;
  708. struct aead_edesc *edesc;
  709. struct aead_request *aead_req = drv_req->app_ctx;
  710. struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
  711. struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
  712. int ecode = 0;
  713. qidev = caam_ctx->qidev;
  714. if (unlikely(status)) {
  715. u32 ssrc = status & JRSTA_SSRC_MASK;
  716. u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
  717. caam_jr_strstatus(qidev, status);
  718. /*
  719. * verify hw auth check passed else return -EBADMSG
  720. */
  721. if (ssrc == JRSTA_SSRC_CCB_ERROR &&
  722. err_id == JRSTA_CCBERR_ERRID_ICVCHK)
  723. ecode = -EBADMSG;
  724. else
  725. ecode = -EIO;
  726. }
  727. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  728. aead_unmap(qidev, edesc, aead_req);
  729. aead_request_complete(aead_req, ecode);
  730. qi_cache_free(edesc);
  731. }
  732. /*
  733. * allocate and map the aead extended descriptor
  734. */
  735. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  736. bool encrypt)
  737. {
  738. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  739. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  740. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  741. typeof(*alg), aead);
  742. struct device *qidev = ctx->qidev;
  743. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  744. GFP_KERNEL : GFP_ATOMIC;
  745. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  746. struct aead_edesc *edesc;
  747. dma_addr_t qm_sg_dma, iv_dma = 0;
  748. int ivsize = 0;
  749. unsigned int authsize = ctx->authsize;
  750. int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
  751. int in_len, out_len;
  752. struct qm_sg_entry *sg_table, *fd_sgt;
  753. struct caam_drv_ctx *drv_ctx;
  754. drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
  755. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  756. return (struct aead_edesc *)drv_ctx;
  757. /* allocate space for base edesc and hw desc commands, link tables */
  758. edesc = qi_cache_alloc(GFP_DMA | flags);
  759. if (unlikely(!edesc)) {
  760. dev_err(qidev, "could not allocate extended descriptor\n");
  761. return ERR_PTR(-ENOMEM);
  762. }
  763. if (likely(req->src == req->dst)) {
  764. src_nents = sg_nents_for_len(req->src, req->assoclen +
  765. req->cryptlen +
  766. (encrypt ? authsize : 0));
  767. if (unlikely(src_nents < 0)) {
  768. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  769. req->assoclen + req->cryptlen +
  770. (encrypt ? authsize : 0));
  771. qi_cache_free(edesc);
  772. return ERR_PTR(src_nents);
  773. }
  774. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  775. DMA_BIDIRECTIONAL);
  776. if (unlikely(!mapped_src_nents)) {
  777. dev_err(qidev, "unable to map source\n");
  778. qi_cache_free(edesc);
  779. return ERR_PTR(-ENOMEM);
  780. }
  781. } else {
  782. src_nents = sg_nents_for_len(req->src, req->assoclen +
  783. req->cryptlen);
  784. if (unlikely(src_nents < 0)) {
  785. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  786. req->assoclen + req->cryptlen);
  787. qi_cache_free(edesc);
  788. return ERR_PTR(src_nents);
  789. }
  790. dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  791. req->cryptlen +
  792. (encrypt ? authsize :
  793. (-authsize)));
  794. if (unlikely(dst_nents < 0)) {
  795. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  796. req->assoclen + req->cryptlen +
  797. (encrypt ? authsize : (-authsize)));
  798. qi_cache_free(edesc);
  799. return ERR_PTR(dst_nents);
  800. }
  801. if (src_nents) {
  802. mapped_src_nents = dma_map_sg(qidev, req->src,
  803. src_nents, DMA_TO_DEVICE);
  804. if (unlikely(!mapped_src_nents)) {
  805. dev_err(qidev, "unable to map source\n");
  806. qi_cache_free(edesc);
  807. return ERR_PTR(-ENOMEM);
  808. }
  809. } else {
  810. mapped_src_nents = 0;
  811. }
  812. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  813. DMA_FROM_DEVICE);
  814. if (unlikely(!mapped_dst_nents)) {
  815. dev_err(qidev, "unable to map destination\n");
  816. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  817. qi_cache_free(edesc);
  818. return ERR_PTR(-ENOMEM);
  819. }
  820. }
  821. if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
  822. ivsize = crypto_aead_ivsize(aead);
  823. /*
  824. * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
  825. * Input is not contiguous.
  826. */
  827. qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
  828. (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  829. sg_table = &edesc->sgt[0];
  830. qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  831. if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
  832. CAAM_QI_MEMCACHE_SIZE)) {
  833. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  834. qm_sg_ents, ivsize);
  835. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  836. 0, 0, 0);
  837. qi_cache_free(edesc);
  838. return ERR_PTR(-ENOMEM);
  839. }
  840. if (ivsize) {
  841. u8 *iv = (u8 *)(sg_table + qm_sg_ents);
  842. /* Make sure IV is located in a DMAable area */
  843. memcpy(iv, req->iv, ivsize);
  844. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
  845. if (dma_mapping_error(qidev, iv_dma)) {
  846. dev_err(qidev, "unable to map IV\n");
  847. caam_unmap(qidev, req->src, req->dst, src_nents,
  848. dst_nents, 0, 0, 0, 0);
  849. qi_cache_free(edesc);
  850. return ERR_PTR(-ENOMEM);
  851. }
  852. }
  853. edesc->src_nents = src_nents;
  854. edesc->dst_nents = dst_nents;
  855. edesc->iv_dma = iv_dma;
  856. edesc->drv_req.app_ctx = req;
  857. edesc->drv_req.cbk = aead_done;
  858. edesc->drv_req.drv_ctx = drv_ctx;
  859. edesc->assoclen = cpu_to_caam32(req->assoclen);
  860. edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
  861. DMA_TO_DEVICE);
  862. if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
  863. dev_err(qidev, "unable to map assoclen\n");
  864. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  865. iv_dma, ivsize, 0, 0);
  866. qi_cache_free(edesc);
  867. return ERR_PTR(-ENOMEM);
  868. }
  869. dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
  870. qm_sg_index++;
  871. if (ivsize) {
  872. dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
  873. qm_sg_index++;
  874. }
  875. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  876. qm_sg_index += mapped_src_nents;
  877. if (mapped_dst_nents > 1)
  878. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  879. qm_sg_index, 0);
  880. qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  881. if (dma_mapping_error(qidev, qm_sg_dma)) {
  882. dev_err(qidev, "unable to map S/G table\n");
  883. dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  884. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  885. iv_dma, ivsize, 0, 0);
  886. qi_cache_free(edesc);
  887. return ERR_PTR(-ENOMEM);
  888. }
  889. edesc->qm_sg_dma = qm_sg_dma;
  890. edesc->qm_sg_bytes = qm_sg_bytes;
  891. out_len = req->assoclen + req->cryptlen +
  892. (encrypt ? ctx->authsize : (-ctx->authsize));
  893. in_len = 4 + ivsize + req->assoclen + req->cryptlen;
  894. fd_sgt = &edesc->drv_req.fd_sgt[0];
  895. dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
  896. if (req->dst == req->src) {
  897. if (mapped_src_nents == 1)
  898. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  899. out_len, 0);
  900. else
  901. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
  902. (1 + !!ivsize) * sizeof(*sg_table),
  903. out_len, 0);
  904. } else if (mapped_dst_nents == 1) {
  905. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
  906. 0);
  907. } else {
  908. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
  909. qm_sg_index, out_len, 0);
  910. }
  911. return edesc;
  912. }
  913. static inline int aead_crypt(struct aead_request *req, bool encrypt)
  914. {
  915. struct aead_edesc *edesc;
  916. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  917. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  918. int ret;
  919. if (unlikely(caam_congested))
  920. return -EAGAIN;
  921. /* allocate extended descriptor */
  922. edesc = aead_edesc_alloc(req, encrypt);
  923. if (IS_ERR_OR_NULL(edesc))
  924. return PTR_ERR(edesc);
  925. /* Create and submit job descriptor */
  926. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  927. if (!ret) {
  928. ret = -EINPROGRESS;
  929. } else {
  930. aead_unmap(ctx->qidev, edesc, req);
  931. qi_cache_free(edesc);
  932. }
  933. return ret;
  934. }
  935. static int aead_encrypt(struct aead_request *req)
  936. {
  937. return aead_crypt(req, true);
  938. }
  939. static int aead_decrypt(struct aead_request *req)
  940. {
  941. return aead_crypt(req, false);
  942. }
  943. static int ipsec_gcm_encrypt(struct aead_request *req)
  944. {
  945. if (req->assoclen < 8)
  946. return -EINVAL;
  947. return aead_crypt(req, true);
  948. }
  949. static int ipsec_gcm_decrypt(struct aead_request *req)
  950. {
  951. if (req->assoclen < 8)
  952. return -EINVAL;
  953. return aead_crypt(req, false);
  954. }
  955. static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
  956. {
  957. struct skcipher_edesc *edesc;
  958. struct skcipher_request *req = drv_req->app_ctx;
  959. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  960. struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
  961. struct device *qidev = caam_ctx->qidev;
  962. int ivsize = crypto_skcipher_ivsize(skcipher);
  963. #ifdef DEBUG
  964. dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
  965. #endif
  966. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  967. if (status)
  968. caam_jr_strstatus(qidev, status);
  969. #ifdef DEBUG
  970. print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
  971. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  972. edesc->src_nents > 1 ? 100 : ivsize, 1);
  973. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  974. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  975. edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
  976. #endif
  977. skcipher_unmap(qidev, edesc, req);
  978. /*
  979. * The crypto API expects us to set the IV (req->iv) to the last
  980. * ciphertext block. This is used e.g. by the CTS mode.
  981. */
  982. if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
  983. scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
  984. ivsize, ivsize, 0);
  985. qi_cache_free(edesc);
  986. skcipher_request_complete(req, status);
  987. }
  988. static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
  989. bool encrypt)
  990. {
  991. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  992. struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  993. struct device *qidev = ctx->qidev;
  994. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  995. GFP_KERNEL : GFP_ATOMIC;
  996. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  997. struct skcipher_edesc *edesc;
  998. dma_addr_t iv_dma;
  999. u8 *iv;
  1000. int ivsize = crypto_skcipher_ivsize(skcipher);
  1001. int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
  1002. struct qm_sg_entry *sg_table, *fd_sgt;
  1003. struct caam_drv_ctx *drv_ctx;
  1004. drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
  1005. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  1006. return (struct skcipher_edesc *)drv_ctx;
  1007. src_nents = sg_nents_for_len(req->src, req->cryptlen);
  1008. if (unlikely(src_nents < 0)) {
  1009. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  1010. req->cryptlen);
  1011. return ERR_PTR(src_nents);
  1012. }
  1013. if (unlikely(req->src != req->dst)) {
  1014. dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
  1015. if (unlikely(dst_nents < 0)) {
  1016. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  1017. req->cryptlen);
  1018. return ERR_PTR(dst_nents);
  1019. }
  1020. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1021. DMA_TO_DEVICE);
  1022. if (unlikely(!mapped_src_nents)) {
  1023. dev_err(qidev, "unable to map source\n");
  1024. return ERR_PTR(-ENOMEM);
  1025. }
  1026. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  1027. DMA_FROM_DEVICE);
  1028. if (unlikely(!mapped_dst_nents)) {
  1029. dev_err(qidev, "unable to map destination\n");
  1030. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  1031. return ERR_PTR(-ENOMEM);
  1032. }
  1033. } else {
  1034. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1035. DMA_BIDIRECTIONAL);
  1036. if (unlikely(!mapped_src_nents)) {
  1037. dev_err(qidev, "unable to map source\n");
  1038. return ERR_PTR(-ENOMEM);
  1039. }
  1040. }
  1041. qm_sg_ents = 1 + mapped_src_nents;
  1042. dst_sg_idx = qm_sg_ents;
  1043. qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  1044. qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
  1045. if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
  1046. ivsize > CAAM_QI_MEMCACHE_SIZE)) {
  1047. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  1048. qm_sg_ents, ivsize);
  1049. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1050. 0, 0, 0);
  1051. return ERR_PTR(-ENOMEM);
  1052. }
  1053. /* allocate space for base edesc, link tables and IV */
  1054. edesc = qi_cache_alloc(GFP_DMA | flags);
  1055. if (unlikely(!edesc)) {
  1056. dev_err(qidev, "could not allocate extended descriptor\n");
  1057. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1058. 0, 0, 0);
  1059. return ERR_PTR(-ENOMEM);
  1060. }
  1061. /* Make sure IV is located in a DMAable area */
  1062. sg_table = &edesc->sgt[0];
  1063. iv = (u8 *)(sg_table + qm_sg_ents);
  1064. memcpy(iv, req->iv, ivsize);
  1065. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
  1066. if (dma_mapping_error(qidev, iv_dma)) {
  1067. dev_err(qidev, "unable to map IV\n");
  1068. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1069. 0, 0, 0);
  1070. qi_cache_free(edesc);
  1071. return ERR_PTR(-ENOMEM);
  1072. }
  1073. edesc->src_nents = src_nents;
  1074. edesc->dst_nents = dst_nents;
  1075. edesc->iv_dma = iv_dma;
  1076. edesc->qm_sg_bytes = qm_sg_bytes;
  1077. edesc->drv_req.app_ctx = req;
  1078. edesc->drv_req.cbk = skcipher_done;
  1079. edesc->drv_req.drv_ctx = drv_ctx;
  1080. dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  1081. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
  1082. if (mapped_dst_nents > 1)
  1083. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  1084. dst_sg_idx, 0);
  1085. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  1086. DMA_TO_DEVICE);
  1087. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  1088. dev_err(qidev, "unable to map S/G table\n");
  1089. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1090. iv_dma, ivsize, 0, 0);
  1091. qi_cache_free(edesc);
  1092. return ERR_PTR(-ENOMEM);
  1093. }
  1094. fd_sgt = &edesc->drv_req.fd_sgt[0];
  1095. dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
  1096. ivsize + req->cryptlen, 0);
  1097. if (req->src == req->dst) {
  1098. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
  1099. sizeof(*sg_table), req->cryptlen, 0);
  1100. } else if (mapped_dst_nents > 1) {
  1101. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  1102. sizeof(*sg_table), req->cryptlen, 0);
  1103. } else {
  1104. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
  1105. req->cryptlen, 0);
  1106. }
  1107. return edesc;
  1108. }
  1109. static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
  1110. {
  1111. struct skcipher_edesc *edesc;
  1112. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  1113. struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
  1114. int ivsize = crypto_skcipher_ivsize(skcipher);
  1115. int ret;
  1116. if (unlikely(caam_congested))
  1117. return -EAGAIN;
  1118. /* allocate extended descriptor */
  1119. edesc = skcipher_edesc_alloc(req, encrypt);
  1120. if (IS_ERR(edesc))
  1121. return PTR_ERR(edesc);
  1122. /*
  1123. * The crypto API expects us to set the IV (req->iv) to the last
  1124. * ciphertext block.
  1125. */
  1126. if (!encrypt)
  1127. scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
  1128. ivsize, ivsize, 0);
  1129. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  1130. if (!ret) {
  1131. ret = -EINPROGRESS;
  1132. } else {
  1133. skcipher_unmap(ctx->qidev, edesc, req);
  1134. qi_cache_free(edesc);
  1135. }
  1136. return ret;
  1137. }
  1138. static int skcipher_encrypt(struct skcipher_request *req)
  1139. {
  1140. return skcipher_crypt(req, true);
  1141. }
  1142. static int skcipher_decrypt(struct skcipher_request *req)
  1143. {
  1144. return skcipher_crypt(req, false);
  1145. }
  1146. static struct caam_skcipher_alg driver_algs[] = {
  1147. {
  1148. .skcipher = {
  1149. .base = {
  1150. .cra_name = "cbc(aes)",
  1151. .cra_driver_name = "cbc-aes-caam-qi",
  1152. .cra_blocksize = AES_BLOCK_SIZE,
  1153. },
  1154. .setkey = skcipher_setkey,
  1155. .encrypt = skcipher_encrypt,
  1156. .decrypt = skcipher_decrypt,
  1157. .min_keysize = AES_MIN_KEY_SIZE,
  1158. .max_keysize = AES_MAX_KEY_SIZE,
  1159. .ivsize = AES_BLOCK_SIZE,
  1160. },
  1161. .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1162. },
  1163. {
  1164. .skcipher = {
  1165. .base = {
  1166. .cra_name = "cbc(des3_ede)",
  1167. .cra_driver_name = "cbc-3des-caam-qi",
  1168. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1169. },
  1170. .setkey = skcipher_setkey,
  1171. .encrypt = skcipher_encrypt,
  1172. .decrypt = skcipher_decrypt,
  1173. .min_keysize = DES3_EDE_KEY_SIZE,
  1174. .max_keysize = DES3_EDE_KEY_SIZE,
  1175. .ivsize = DES3_EDE_BLOCK_SIZE,
  1176. },
  1177. .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1178. },
  1179. {
  1180. .skcipher = {
  1181. .base = {
  1182. .cra_name = "cbc(des)",
  1183. .cra_driver_name = "cbc-des-caam-qi",
  1184. .cra_blocksize = DES_BLOCK_SIZE,
  1185. },
  1186. .setkey = skcipher_setkey,
  1187. .encrypt = skcipher_encrypt,
  1188. .decrypt = skcipher_decrypt,
  1189. .min_keysize = DES_KEY_SIZE,
  1190. .max_keysize = DES_KEY_SIZE,
  1191. .ivsize = DES_BLOCK_SIZE,
  1192. },
  1193. .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1194. },
  1195. {
  1196. .skcipher = {
  1197. .base = {
  1198. .cra_name = "ctr(aes)",
  1199. .cra_driver_name = "ctr-aes-caam-qi",
  1200. .cra_blocksize = 1,
  1201. },
  1202. .setkey = skcipher_setkey,
  1203. .encrypt = skcipher_encrypt,
  1204. .decrypt = skcipher_decrypt,
  1205. .min_keysize = AES_MIN_KEY_SIZE,
  1206. .max_keysize = AES_MAX_KEY_SIZE,
  1207. .ivsize = AES_BLOCK_SIZE,
  1208. .chunksize = AES_BLOCK_SIZE,
  1209. },
  1210. .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
  1211. OP_ALG_AAI_CTR_MOD128,
  1212. },
  1213. {
  1214. .skcipher = {
  1215. .base = {
  1216. .cra_name = "rfc3686(ctr(aes))",
  1217. .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
  1218. .cra_blocksize = 1,
  1219. },
  1220. .setkey = skcipher_setkey,
  1221. .encrypt = skcipher_encrypt,
  1222. .decrypt = skcipher_decrypt,
  1223. .min_keysize = AES_MIN_KEY_SIZE +
  1224. CTR_RFC3686_NONCE_SIZE,
  1225. .max_keysize = AES_MAX_KEY_SIZE +
  1226. CTR_RFC3686_NONCE_SIZE,
  1227. .ivsize = CTR_RFC3686_IV_SIZE,
  1228. .chunksize = AES_BLOCK_SIZE,
  1229. },
  1230. .caam = {
  1231. .class1_alg_type = OP_ALG_ALGSEL_AES |
  1232. OP_ALG_AAI_CTR_MOD128,
  1233. .rfc3686 = true,
  1234. },
  1235. },
  1236. {
  1237. .skcipher = {
  1238. .base = {
  1239. .cra_name = "xts(aes)",
  1240. .cra_driver_name = "xts-aes-caam-qi",
  1241. .cra_blocksize = AES_BLOCK_SIZE,
  1242. },
  1243. .setkey = xts_skcipher_setkey,
  1244. .encrypt = skcipher_encrypt,
  1245. .decrypt = skcipher_decrypt,
  1246. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1247. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1248. .ivsize = AES_BLOCK_SIZE,
  1249. },
  1250. .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1251. },
  1252. };
  1253. static struct caam_aead_alg driver_aeads[] = {
  1254. {
  1255. .aead = {
  1256. .base = {
  1257. .cra_name = "rfc4106(gcm(aes))",
  1258. .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
  1259. .cra_blocksize = 1,
  1260. },
  1261. .setkey = rfc4106_setkey,
  1262. .setauthsize = rfc4106_setauthsize,
  1263. .encrypt = ipsec_gcm_encrypt,
  1264. .decrypt = ipsec_gcm_decrypt,
  1265. .ivsize = 8,
  1266. .maxauthsize = AES_BLOCK_SIZE,
  1267. },
  1268. .caam = {
  1269. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1270. },
  1271. },
  1272. {
  1273. .aead = {
  1274. .base = {
  1275. .cra_name = "rfc4543(gcm(aes))",
  1276. .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
  1277. .cra_blocksize = 1,
  1278. },
  1279. .setkey = rfc4543_setkey,
  1280. .setauthsize = rfc4543_setauthsize,
  1281. .encrypt = ipsec_gcm_encrypt,
  1282. .decrypt = ipsec_gcm_decrypt,
  1283. .ivsize = 8,
  1284. .maxauthsize = AES_BLOCK_SIZE,
  1285. },
  1286. .caam = {
  1287. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1288. },
  1289. },
  1290. /* Galois Counter Mode */
  1291. {
  1292. .aead = {
  1293. .base = {
  1294. .cra_name = "gcm(aes)",
  1295. .cra_driver_name = "gcm-aes-caam-qi",
  1296. .cra_blocksize = 1,
  1297. },
  1298. .setkey = gcm_setkey,
  1299. .setauthsize = gcm_setauthsize,
  1300. .encrypt = aead_encrypt,
  1301. .decrypt = aead_decrypt,
  1302. .ivsize = 12,
  1303. .maxauthsize = AES_BLOCK_SIZE,
  1304. },
  1305. .caam = {
  1306. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1307. }
  1308. },
  1309. /* single-pass ipsec_esp descriptor */
  1310. {
  1311. .aead = {
  1312. .base = {
  1313. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1314. .cra_driver_name = "authenc-hmac-md5-"
  1315. "cbc-aes-caam-qi",
  1316. .cra_blocksize = AES_BLOCK_SIZE,
  1317. },
  1318. .setkey = aead_setkey,
  1319. .setauthsize = aead_setauthsize,
  1320. .encrypt = aead_encrypt,
  1321. .decrypt = aead_decrypt,
  1322. .ivsize = AES_BLOCK_SIZE,
  1323. .maxauthsize = MD5_DIGEST_SIZE,
  1324. },
  1325. .caam = {
  1326. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1327. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1328. OP_ALG_AAI_HMAC_PRECOMP,
  1329. }
  1330. },
  1331. {
  1332. .aead = {
  1333. .base = {
  1334. .cra_name = "echainiv(authenc(hmac(md5),"
  1335. "cbc(aes)))",
  1336. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1337. "cbc-aes-caam-qi",
  1338. .cra_blocksize = AES_BLOCK_SIZE,
  1339. },
  1340. .setkey = aead_setkey,
  1341. .setauthsize = aead_setauthsize,
  1342. .encrypt = aead_encrypt,
  1343. .decrypt = aead_decrypt,
  1344. .ivsize = AES_BLOCK_SIZE,
  1345. .maxauthsize = MD5_DIGEST_SIZE,
  1346. },
  1347. .caam = {
  1348. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1349. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1350. OP_ALG_AAI_HMAC_PRECOMP,
  1351. .geniv = true,
  1352. }
  1353. },
  1354. {
  1355. .aead = {
  1356. .base = {
  1357. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1358. .cra_driver_name = "authenc-hmac-sha1-"
  1359. "cbc-aes-caam-qi",
  1360. .cra_blocksize = AES_BLOCK_SIZE,
  1361. },
  1362. .setkey = aead_setkey,
  1363. .setauthsize = aead_setauthsize,
  1364. .encrypt = aead_encrypt,
  1365. .decrypt = aead_decrypt,
  1366. .ivsize = AES_BLOCK_SIZE,
  1367. .maxauthsize = SHA1_DIGEST_SIZE,
  1368. },
  1369. .caam = {
  1370. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1371. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1372. OP_ALG_AAI_HMAC_PRECOMP,
  1373. }
  1374. },
  1375. {
  1376. .aead = {
  1377. .base = {
  1378. .cra_name = "echainiv(authenc(hmac(sha1),"
  1379. "cbc(aes)))",
  1380. .cra_driver_name = "echainiv-authenc-"
  1381. "hmac-sha1-cbc-aes-caam-qi",
  1382. .cra_blocksize = AES_BLOCK_SIZE,
  1383. },
  1384. .setkey = aead_setkey,
  1385. .setauthsize = aead_setauthsize,
  1386. .encrypt = aead_encrypt,
  1387. .decrypt = aead_decrypt,
  1388. .ivsize = AES_BLOCK_SIZE,
  1389. .maxauthsize = SHA1_DIGEST_SIZE,
  1390. },
  1391. .caam = {
  1392. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1393. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1394. OP_ALG_AAI_HMAC_PRECOMP,
  1395. .geniv = true,
  1396. },
  1397. },
  1398. {
  1399. .aead = {
  1400. .base = {
  1401. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1402. .cra_driver_name = "authenc-hmac-sha224-"
  1403. "cbc-aes-caam-qi",
  1404. .cra_blocksize = AES_BLOCK_SIZE,
  1405. },
  1406. .setkey = aead_setkey,
  1407. .setauthsize = aead_setauthsize,
  1408. .encrypt = aead_encrypt,
  1409. .decrypt = aead_decrypt,
  1410. .ivsize = AES_BLOCK_SIZE,
  1411. .maxauthsize = SHA224_DIGEST_SIZE,
  1412. },
  1413. .caam = {
  1414. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1415. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1416. OP_ALG_AAI_HMAC_PRECOMP,
  1417. }
  1418. },
  1419. {
  1420. .aead = {
  1421. .base = {
  1422. .cra_name = "echainiv(authenc(hmac(sha224),"
  1423. "cbc(aes)))",
  1424. .cra_driver_name = "echainiv-authenc-"
  1425. "hmac-sha224-cbc-aes-caam-qi",
  1426. .cra_blocksize = AES_BLOCK_SIZE,
  1427. },
  1428. .setkey = aead_setkey,
  1429. .setauthsize = aead_setauthsize,
  1430. .encrypt = aead_encrypt,
  1431. .decrypt = aead_decrypt,
  1432. .ivsize = AES_BLOCK_SIZE,
  1433. .maxauthsize = SHA224_DIGEST_SIZE,
  1434. },
  1435. .caam = {
  1436. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1437. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1438. OP_ALG_AAI_HMAC_PRECOMP,
  1439. .geniv = true,
  1440. }
  1441. },
  1442. {
  1443. .aead = {
  1444. .base = {
  1445. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1446. .cra_driver_name = "authenc-hmac-sha256-"
  1447. "cbc-aes-caam-qi",
  1448. .cra_blocksize = AES_BLOCK_SIZE,
  1449. },
  1450. .setkey = aead_setkey,
  1451. .setauthsize = aead_setauthsize,
  1452. .encrypt = aead_encrypt,
  1453. .decrypt = aead_decrypt,
  1454. .ivsize = AES_BLOCK_SIZE,
  1455. .maxauthsize = SHA256_DIGEST_SIZE,
  1456. },
  1457. .caam = {
  1458. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1459. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1460. OP_ALG_AAI_HMAC_PRECOMP,
  1461. }
  1462. },
  1463. {
  1464. .aead = {
  1465. .base = {
  1466. .cra_name = "echainiv(authenc(hmac(sha256),"
  1467. "cbc(aes)))",
  1468. .cra_driver_name = "echainiv-authenc-"
  1469. "hmac-sha256-cbc-aes-"
  1470. "caam-qi",
  1471. .cra_blocksize = AES_BLOCK_SIZE,
  1472. },
  1473. .setkey = aead_setkey,
  1474. .setauthsize = aead_setauthsize,
  1475. .encrypt = aead_encrypt,
  1476. .decrypt = aead_decrypt,
  1477. .ivsize = AES_BLOCK_SIZE,
  1478. .maxauthsize = SHA256_DIGEST_SIZE,
  1479. },
  1480. .caam = {
  1481. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1482. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1483. OP_ALG_AAI_HMAC_PRECOMP,
  1484. .geniv = true,
  1485. }
  1486. },
  1487. {
  1488. .aead = {
  1489. .base = {
  1490. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  1491. .cra_driver_name = "authenc-hmac-sha384-"
  1492. "cbc-aes-caam-qi",
  1493. .cra_blocksize = AES_BLOCK_SIZE,
  1494. },
  1495. .setkey = aead_setkey,
  1496. .setauthsize = aead_setauthsize,
  1497. .encrypt = aead_encrypt,
  1498. .decrypt = aead_decrypt,
  1499. .ivsize = AES_BLOCK_SIZE,
  1500. .maxauthsize = SHA384_DIGEST_SIZE,
  1501. },
  1502. .caam = {
  1503. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1504. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1505. OP_ALG_AAI_HMAC_PRECOMP,
  1506. }
  1507. },
  1508. {
  1509. .aead = {
  1510. .base = {
  1511. .cra_name = "echainiv(authenc(hmac(sha384),"
  1512. "cbc(aes)))",
  1513. .cra_driver_name = "echainiv-authenc-"
  1514. "hmac-sha384-cbc-aes-"
  1515. "caam-qi",
  1516. .cra_blocksize = AES_BLOCK_SIZE,
  1517. },
  1518. .setkey = aead_setkey,
  1519. .setauthsize = aead_setauthsize,
  1520. .encrypt = aead_encrypt,
  1521. .decrypt = aead_decrypt,
  1522. .ivsize = AES_BLOCK_SIZE,
  1523. .maxauthsize = SHA384_DIGEST_SIZE,
  1524. },
  1525. .caam = {
  1526. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1527. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1528. OP_ALG_AAI_HMAC_PRECOMP,
  1529. .geniv = true,
  1530. }
  1531. },
  1532. {
  1533. .aead = {
  1534. .base = {
  1535. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  1536. .cra_driver_name = "authenc-hmac-sha512-"
  1537. "cbc-aes-caam-qi",
  1538. .cra_blocksize = AES_BLOCK_SIZE,
  1539. },
  1540. .setkey = aead_setkey,
  1541. .setauthsize = aead_setauthsize,
  1542. .encrypt = aead_encrypt,
  1543. .decrypt = aead_decrypt,
  1544. .ivsize = AES_BLOCK_SIZE,
  1545. .maxauthsize = SHA512_DIGEST_SIZE,
  1546. },
  1547. .caam = {
  1548. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1549. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1550. OP_ALG_AAI_HMAC_PRECOMP,
  1551. }
  1552. },
  1553. {
  1554. .aead = {
  1555. .base = {
  1556. .cra_name = "echainiv(authenc(hmac(sha512),"
  1557. "cbc(aes)))",
  1558. .cra_driver_name = "echainiv-authenc-"
  1559. "hmac-sha512-cbc-aes-"
  1560. "caam-qi",
  1561. .cra_blocksize = AES_BLOCK_SIZE,
  1562. },
  1563. .setkey = aead_setkey,
  1564. .setauthsize = aead_setauthsize,
  1565. .encrypt = aead_encrypt,
  1566. .decrypt = aead_decrypt,
  1567. .ivsize = AES_BLOCK_SIZE,
  1568. .maxauthsize = SHA512_DIGEST_SIZE,
  1569. },
  1570. .caam = {
  1571. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1572. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1573. OP_ALG_AAI_HMAC_PRECOMP,
  1574. .geniv = true,
  1575. }
  1576. },
  1577. {
  1578. .aead = {
  1579. .base = {
  1580. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1581. .cra_driver_name = "authenc-hmac-md5-"
  1582. "cbc-des3_ede-caam-qi",
  1583. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1584. },
  1585. .setkey = aead_setkey,
  1586. .setauthsize = aead_setauthsize,
  1587. .encrypt = aead_encrypt,
  1588. .decrypt = aead_decrypt,
  1589. .ivsize = DES3_EDE_BLOCK_SIZE,
  1590. .maxauthsize = MD5_DIGEST_SIZE,
  1591. },
  1592. .caam = {
  1593. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1594. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1595. OP_ALG_AAI_HMAC_PRECOMP,
  1596. }
  1597. },
  1598. {
  1599. .aead = {
  1600. .base = {
  1601. .cra_name = "echainiv(authenc(hmac(md5),"
  1602. "cbc(des3_ede)))",
  1603. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1604. "cbc-des3_ede-caam-qi",
  1605. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1606. },
  1607. .setkey = aead_setkey,
  1608. .setauthsize = aead_setauthsize,
  1609. .encrypt = aead_encrypt,
  1610. .decrypt = aead_decrypt,
  1611. .ivsize = DES3_EDE_BLOCK_SIZE,
  1612. .maxauthsize = MD5_DIGEST_SIZE,
  1613. },
  1614. .caam = {
  1615. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1616. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1617. OP_ALG_AAI_HMAC_PRECOMP,
  1618. .geniv = true,
  1619. }
  1620. },
  1621. {
  1622. .aead = {
  1623. .base = {
  1624. .cra_name = "authenc(hmac(sha1),"
  1625. "cbc(des3_ede))",
  1626. .cra_driver_name = "authenc-hmac-sha1-"
  1627. "cbc-des3_ede-caam-qi",
  1628. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1629. },
  1630. .setkey = aead_setkey,
  1631. .setauthsize = aead_setauthsize,
  1632. .encrypt = aead_encrypt,
  1633. .decrypt = aead_decrypt,
  1634. .ivsize = DES3_EDE_BLOCK_SIZE,
  1635. .maxauthsize = SHA1_DIGEST_SIZE,
  1636. },
  1637. .caam = {
  1638. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1639. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1640. OP_ALG_AAI_HMAC_PRECOMP,
  1641. },
  1642. },
  1643. {
  1644. .aead = {
  1645. .base = {
  1646. .cra_name = "echainiv(authenc(hmac(sha1),"
  1647. "cbc(des3_ede)))",
  1648. .cra_driver_name = "echainiv-authenc-"
  1649. "hmac-sha1-"
  1650. "cbc-des3_ede-caam-qi",
  1651. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1652. },
  1653. .setkey = aead_setkey,
  1654. .setauthsize = aead_setauthsize,
  1655. .encrypt = aead_encrypt,
  1656. .decrypt = aead_decrypt,
  1657. .ivsize = DES3_EDE_BLOCK_SIZE,
  1658. .maxauthsize = SHA1_DIGEST_SIZE,
  1659. },
  1660. .caam = {
  1661. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1662. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1663. OP_ALG_AAI_HMAC_PRECOMP,
  1664. .geniv = true,
  1665. }
  1666. },
  1667. {
  1668. .aead = {
  1669. .base = {
  1670. .cra_name = "authenc(hmac(sha224),"
  1671. "cbc(des3_ede))",
  1672. .cra_driver_name = "authenc-hmac-sha224-"
  1673. "cbc-des3_ede-caam-qi",
  1674. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1675. },
  1676. .setkey = aead_setkey,
  1677. .setauthsize = aead_setauthsize,
  1678. .encrypt = aead_encrypt,
  1679. .decrypt = aead_decrypt,
  1680. .ivsize = DES3_EDE_BLOCK_SIZE,
  1681. .maxauthsize = SHA224_DIGEST_SIZE,
  1682. },
  1683. .caam = {
  1684. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1685. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1686. OP_ALG_AAI_HMAC_PRECOMP,
  1687. },
  1688. },
  1689. {
  1690. .aead = {
  1691. .base = {
  1692. .cra_name = "echainiv(authenc(hmac(sha224),"
  1693. "cbc(des3_ede)))",
  1694. .cra_driver_name = "echainiv-authenc-"
  1695. "hmac-sha224-"
  1696. "cbc-des3_ede-caam-qi",
  1697. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1698. },
  1699. .setkey = aead_setkey,
  1700. .setauthsize = aead_setauthsize,
  1701. .encrypt = aead_encrypt,
  1702. .decrypt = aead_decrypt,
  1703. .ivsize = DES3_EDE_BLOCK_SIZE,
  1704. .maxauthsize = SHA224_DIGEST_SIZE,
  1705. },
  1706. .caam = {
  1707. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1708. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1709. OP_ALG_AAI_HMAC_PRECOMP,
  1710. .geniv = true,
  1711. }
  1712. },
  1713. {
  1714. .aead = {
  1715. .base = {
  1716. .cra_name = "authenc(hmac(sha256),"
  1717. "cbc(des3_ede))",
  1718. .cra_driver_name = "authenc-hmac-sha256-"
  1719. "cbc-des3_ede-caam-qi",
  1720. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1721. },
  1722. .setkey = aead_setkey,
  1723. .setauthsize = aead_setauthsize,
  1724. .encrypt = aead_encrypt,
  1725. .decrypt = aead_decrypt,
  1726. .ivsize = DES3_EDE_BLOCK_SIZE,
  1727. .maxauthsize = SHA256_DIGEST_SIZE,
  1728. },
  1729. .caam = {
  1730. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1731. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1732. OP_ALG_AAI_HMAC_PRECOMP,
  1733. },
  1734. },
  1735. {
  1736. .aead = {
  1737. .base = {
  1738. .cra_name = "echainiv(authenc(hmac(sha256),"
  1739. "cbc(des3_ede)))",
  1740. .cra_driver_name = "echainiv-authenc-"
  1741. "hmac-sha256-"
  1742. "cbc-des3_ede-caam-qi",
  1743. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1744. },
  1745. .setkey = aead_setkey,
  1746. .setauthsize = aead_setauthsize,
  1747. .encrypt = aead_encrypt,
  1748. .decrypt = aead_decrypt,
  1749. .ivsize = DES3_EDE_BLOCK_SIZE,
  1750. .maxauthsize = SHA256_DIGEST_SIZE,
  1751. },
  1752. .caam = {
  1753. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1754. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1755. OP_ALG_AAI_HMAC_PRECOMP,
  1756. .geniv = true,
  1757. }
  1758. },
  1759. {
  1760. .aead = {
  1761. .base = {
  1762. .cra_name = "authenc(hmac(sha384),"
  1763. "cbc(des3_ede))",
  1764. .cra_driver_name = "authenc-hmac-sha384-"
  1765. "cbc-des3_ede-caam-qi",
  1766. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1767. },
  1768. .setkey = aead_setkey,
  1769. .setauthsize = aead_setauthsize,
  1770. .encrypt = aead_encrypt,
  1771. .decrypt = aead_decrypt,
  1772. .ivsize = DES3_EDE_BLOCK_SIZE,
  1773. .maxauthsize = SHA384_DIGEST_SIZE,
  1774. },
  1775. .caam = {
  1776. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1777. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1778. OP_ALG_AAI_HMAC_PRECOMP,
  1779. },
  1780. },
  1781. {
  1782. .aead = {
  1783. .base = {
  1784. .cra_name = "echainiv(authenc(hmac(sha384),"
  1785. "cbc(des3_ede)))",
  1786. .cra_driver_name = "echainiv-authenc-"
  1787. "hmac-sha384-"
  1788. "cbc-des3_ede-caam-qi",
  1789. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1790. },
  1791. .setkey = aead_setkey,
  1792. .setauthsize = aead_setauthsize,
  1793. .encrypt = aead_encrypt,
  1794. .decrypt = aead_decrypt,
  1795. .ivsize = DES3_EDE_BLOCK_SIZE,
  1796. .maxauthsize = SHA384_DIGEST_SIZE,
  1797. },
  1798. .caam = {
  1799. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1800. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1801. OP_ALG_AAI_HMAC_PRECOMP,
  1802. .geniv = true,
  1803. }
  1804. },
  1805. {
  1806. .aead = {
  1807. .base = {
  1808. .cra_name = "authenc(hmac(sha512),"
  1809. "cbc(des3_ede))",
  1810. .cra_driver_name = "authenc-hmac-sha512-"
  1811. "cbc-des3_ede-caam-qi",
  1812. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1813. },
  1814. .setkey = aead_setkey,
  1815. .setauthsize = aead_setauthsize,
  1816. .encrypt = aead_encrypt,
  1817. .decrypt = aead_decrypt,
  1818. .ivsize = DES3_EDE_BLOCK_SIZE,
  1819. .maxauthsize = SHA512_DIGEST_SIZE,
  1820. },
  1821. .caam = {
  1822. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1823. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1824. OP_ALG_AAI_HMAC_PRECOMP,
  1825. },
  1826. },
  1827. {
  1828. .aead = {
  1829. .base = {
  1830. .cra_name = "echainiv(authenc(hmac(sha512),"
  1831. "cbc(des3_ede)))",
  1832. .cra_driver_name = "echainiv-authenc-"
  1833. "hmac-sha512-"
  1834. "cbc-des3_ede-caam-qi",
  1835. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1836. },
  1837. .setkey = aead_setkey,
  1838. .setauthsize = aead_setauthsize,
  1839. .encrypt = aead_encrypt,
  1840. .decrypt = aead_decrypt,
  1841. .ivsize = DES3_EDE_BLOCK_SIZE,
  1842. .maxauthsize = SHA512_DIGEST_SIZE,
  1843. },
  1844. .caam = {
  1845. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1846. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1847. OP_ALG_AAI_HMAC_PRECOMP,
  1848. .geniv = true,
  1849. }
  1850. },
  1851. {
  1852. .aead = {
  1853. .base = {
  1854. .cra_name = "authenc(hmac(md5),cbc(des))",
  1855. .cra_driver_name = "authenc-hmac-md5-"
  1856. "cbc-des-caam-qi",
  1857. .cra_blocksize = DES_BLOCK_SIZE,
  1858. },
  1859. .setkey = aead_setkey,
  1860. .setauthsize = aead_setauthsize,
  1861. .encrypt = aead_encrypt,
  1862. .decrypt = aead_decrypt,
  1863. .ivsize = DES_BLOCK_SIZE,
  1864. .maxauthsize = MD5_DIGEST_SIZE,
  1865. },
  1866. .caam = {
  1867. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1868. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1869. OP_ALG_AAI_HMAC_PRECOMP,
  1870. },
  1871. },
  1872. {
  1873. .aead = {
  1874. .base = {
  1875. .cra_name = "echainiv(authenc(hmac(md5),"
  1876. "cbc(des)))",
  1877. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1878. "cbc-des-caam-qi",
  1879. .cra_blocksize = DES_BLOCK_SIZE,
  1880. },
  1881. .setkey = aead_setkey,
  1882. .setauthsize = aead_setauthsize,
  1883. .encrypt = aead_encrypt,
  1884. .decrypt = aead_decrypt,
  1885. .ivsize = DES_BLOCK_SIZE,
  1886. .maxauthsize = MD5_DIGEST_SIZE,
  1887. },
  1888. .caam = {
  1889. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1890. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1891. OP_ALG_AAI_HMAC_PRECOMP,
  1892. .geniv = true,
  1893. }
  1894. },
  1895. {
  1896. .aead = {
  1897. .base = {
  1898. .cra_name = "authenc(hmac(sha1),cbc(des))",
  1899. .cra_driver_name = "authenc-hmac-sha1-"
  1900. "cbc-des-caam-qi",
  1901. .cra_blocksize = DES_BLOCK_SIZE,
  1902. },
  1903. .setkey = aead_setkey,
  1904. .setauthsize = aead_setauthsize,
  1905. .encrypt = aead_encrypt,
  1906. .decrypt = aead_decrypt,
  1907. .ivsize = DES_BLOCK_SIZE,
  1908. .maxauthsize = SHA1_DIGEST_SIZE,
  1909. },
  1910. .caam = {
  1911. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1912. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1913. OP_ALG_AAI_HMAC_PRECOMP,
  1914. },
  1915. },
  1916. {
  1917. .aead = {
  1918. .base = {
  1919. .cra_name = "echainiv(authenc(hmac(sha1),"
  1920. "cbc(des)))",
  1921. .cra_driver_name = "echainiv-authenc-"
  1922. "hmac-sha1-cbc-des-caam-qi",
  1923. .cra_blocksize = DES_BLOCK_SIZE,
  1924. },
  1925. .setkey = aead_setkey,
  1926. .setauthsize = aead_setauthsize,
  1927. .encrypt = aead_encrypt,
  1928. .decrypt = aead_decrypt,
  1929. .ivsize = DES_BLOCK_SIZE,
  1930. .maxauthsize = SHA1_DIGEST_SIZE,
  1931. },
  1932. .caam = {
  1933. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1934. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1935. OP_ALG_AAI_HMAC_PRECOMP,
  1936. .geniv = true,
  1937. }
  1938. },
  1939. {
  1940. .aead = {
  1941. .base = {
  1942. .cra_name = "authenc(hmac(sha224),cbc(des))",
  1943. .cra_driver_name = "authenc-hmac-sha224-"
  1944. "cbc-des-caam-qi",
  1945. .cra_blocksize = DES_BLOCK_SIZE,
  1946. },
  1947. .setkey = aead_setkey,
  1948. .setauthsize = aead_setauthsize,
  1949. .encrypt = aead_encrypt,
  1950. .decrypt = aead_decrypt,
  1951. .ivsize = DES_BLOCK_SIZE,
  1952. .maxauthsize = SHA224_DIGEST_SIZE,
  1953. },
  1954. .caam = {
  1955. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1956. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1957. OP_ALG_AAI_HMAC_PRECOMP,
  1958. },
  1959. },
  1960. {
  1961. .aead = {
  1962. .base = {
  1963. .cra_name = "echainiv(authenc(hmac(sha224),"
  1964. "cbc(des)))",
  1965. .cra_driver_name = "echainiv-authenc-"
  1966. "hmac-sha224-cbc-des-"
  1967. "caam-qi",
  1968. .cra_blocksize = DES_BLOCK_SIZE,
  1969. },
  1970. .setkey = aead_setkey,
  1971. .setauthsize = aead_setauthsize,
  1972. .encrypt = aead_encrypt,
  1973. .decrypt = aead_decrypt,
  1974. .ivsize = DES_BLOCK_SIZE,
  1975. .maxauthsize = SHA224_DIGEST_SIZE,
  1976. },
  1977. .caam = {
  1978. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1979. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1980. OP_ALG_AAI_HMAC_PRECOMP,
  1981. .geniv = true,
  1982. }
  1983. },
  1984. {
  1985. .aead = {
  1986. .base = {
  1987. .cra_name = "authenc(hmac(sha256),cbc(des))",
  1988. .cra_driver_name = "authenc-hmac-sha256-"
  1989. "cbc-des-caam-qi",
  1990. .cra_blocksize = DES_BLOCK_SIZE,
  1991. },
  1992. .setkey = aead_setkey,
  1993. .setauthsize = aead_setauthsize,
  1994. .encrypt = aead_encrypt,
  1995. .decrypt = aead_decrypt,
  1996. .ivsize = DES_BLOCK_SIZE,
  1997. .maxauthsize = SHA256_DIGEST_SIZE,
  1998. },
  1999. .caam = {
  2000. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2001. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2002. OP_ALG_AAI_HMAC_PRECOMP,
  2003. },
  2004. },
  2005. {
  2006. .aead = {
  2007. .base = {
  2008. .cra_name = "echainiv(authenc(hmac(sha256),"
  2009. "cbc(des)))",
  2010. .cra_driver_name = "echainiv-authenc-"
  2011. "hmac-sha256-cbc-des-"
  2012. "caam-qi",
  2013. .cra_blocksize = DES_BLOCK_SIZE,
  2014. },
  2015. .setkey = aead_setkey,
  2016. .setauthsize = aead_setauthsize,
  2017. .encrypt = aead_encrypt,
  2018. .decrypt = aead_decrypt,
  2019. .ivsize = DES_BLOCK_SIZE,
  2020. .maxauthsize = SHA256_DIGEST_SIZE,
  2021. },
  2022. .caam = {
  2023. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2024. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2025. OP_ALG_AAI_HMAC_PRECOMP,
  2026. .geniv = true,
  2027. },
  2028. },
  2029. {
  2030. .aead = {
  2031. .base = {
  2032. .cra_name = "authenc(hmac(sha384),cbc(des))",
  2033. .cra_driver_name = "authenc-hmac-sha384-"
  2034. "cbc-des-caam-qi",
  2035. .cra_blocksize = DES_BLOCK_SIZE,
  2036. },
  2037. .setkey = aead_setkey,
  2038. .setauthsize = aead_setauthsize,
  2039. .encrypt = aead_encrypt,
  2040. .decrypt = aead_decrypt,
  2041. .ivsize = DES_BLOCK_SIZE,
  2042. .maxauthsize = SHA384_DIGEST_SIZE,
  2043. },
  2044. .caam = {
  2045. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2046. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2047. OP_ALG_AAI_HMAC_PRECOMP,
  2048. },
  2049. },
  2050. {
  2051. .aead = {
  2052. .base = {
  2053. .cra_name = "echainiv(authenc(hmac(sha384),"
  2054. "cbc(des)))",
  2055. .cra_driver_name = "echainiv-authenc-"
  2056. "hmac-sha384-cbc-des-"
  2057. "caam-qi",
  2058. .cra_blocksize = DES_BLOCK_SIZE,
  2059. },
  2060. .setkey = aead_setkey,
  2061. .setauthsize = aead_setauthsize,
  2062. .encrypt = aead_encrypt,
  2063. .decrypt = aead_decrypt,
  2064. .ivsize = DES_BLOCK_SIZE,
  2065. .maxauthsize = SHA384_DIGEST_SIZE,
  2066. },
  2067. .caam = {
  2068. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2069. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2070. OP_ALG_AAI_HMAC_PRECOMP,
  2071. .geniv = true,
  2072. }
  2073. },
  2074. {
  2075. .aead = {
  2076. .base = {
  2077. .cra_name = "authenc(hmac(sha512),cbc(des))",
  2078. .cra_driver_name = "authenc-hmac-sha512-"
  2079. "cbc-des-caam-qi",
  2080. .cra_blocksize = DES_BLOCK_SIZE,
  2081. },
  2082. .setkey = aead_setkey,
  2083. .setauthsize = aead_setauthsize,
  2084. .encrypt = aead_encrypt,
  2085. .decrypt = aead_decrypt,
  2086. .ivsize = DES_BLOCK_SIZE,
  2087. .maxauthsize = SHA512_DIGEST_SIZE,
  2088. },
  2089. .caam = {
  2090. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2091. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2092. OP_ALG_AAI_HMAC_PRECOMP,
  2093. }
  2094. },
  2095. {
  2096. .aead = {
  2097. .base = {
  2098. .cra_name = "echainiv(authenc(hmac(sha512),"
  2099. "cbc(des)))",
  2100. .cra_driver_name = "echainiv-authenc-"
  2101. "hmac-sha512-cbc-des-"
  2102. "caam-qi",
  2103. .cra_blocksize = DES_BLOCK_SIZE,
  2104. },
  2105. .setkey = aead_setkey,
  2106. .setauthsize = aead_setauthsize,
  2107. .encrypt = aead_encrypt,
  2108. .decrypt = aead_decrypt,
  2109. .ivsize = DES_BLOCK_SIZE,
  2110. .maxauthsize = SHA512_DIGEST_SIZE,
  2111. },
  2112. .caam = {
  2113. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2114. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2115. OP_ALG_AAI_HMAC_PRECOMP,
  2116. .geniv = true,
  2117. }
  2118. },
  2119. };
  2120. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  2121. bool uses_dkp)
  2122. {
  2123. struct caam_drv_private *priv;
  2124. /*
  2125. * distribute tfms across job rings to ensure in-order
  2126. * crypto request processing per tfm
  2127. */
  2128. ctx->jrdev = caam_jr_alloc();
  2129. if (IS_ERR(ctx->jrdev)) {
  2130. pr_err("Job Ring Device allocation for transform failed\n");
  2131. return PTR_ERR(ctx->jrdev);
  2132. }
  2133. priv = dev_get_drvdata(ctx->jrdev->parent);
  2134. if (priv->era >= 6 && uses_dkp)
  2135. ctx->dir = DMA_BIDIRECTIONAL;
  2136. else
  2137. ctx->dir = DMA_TO_DEVICE;
  2138. ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
  2139. ctx->dir);
  2140. if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
  2141. dev_err(ctx->jrdev, "unable to map key\n");
  2142. caam_jr_free(ctx->jrdev);
  2143. return -ENOMEM;
  2144. }
  2145. /* copy descriptor header template value */
  2146. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  2147. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  2148. ctx->qidev = priv->qidev;
  2149. spin_lock_init(&ctx->lock);
  2150. ctx->drv_ctx[ENCRYPT] = NULL;
  2151. ctx->drv_ctx[DECRYPT] = NULL;
  2152. return 0;
  2153. }
  2154. static int caam_cra_init(struct crypto_skcipher *tfm)
  2155. {
  2156. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  2157. struct caam_skcipher_alg *caam_alg =
  2158. container_of(alg, typeof(*caam_alg), skcipher);
  2159. return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
  2160. false);
  2161. }
  2162. static int caam_aead_init(struct crypto_aead *tfm)
  2163. {
  2164. struct aead_alg *alg = crypto_aead_alg(tfm);
  2165. struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  2166. aead);
  2167. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  2168. return caam_init_common(ctx, &caam_alg->caam,
  2169. alg->setkey == aead_setkey);
  2170. }
  2171. static void caam_exit_common(struct caam_ctx *ctx)
  2172. {
  2173. caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
  2174. caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
  2175. dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
  2176. caam_jr_free(ctx->jrdev);
  2177. }
  2178. static void caam_cra_exit(struct crypto_skcipher *tfm)
  2179. {
  2180. caam_exit_common(crypto_skcipher_ctx(tfm));
  2181. }
  2182. static void caam_aead_exit(struct crypto_aead *tfm)
  2183. {
  2184. caam_exit_common(crypto_aead_ctx(tfm));
  2185. }
  2186. static void __exit caam_qi_algapi_exit(void)
  2187. {
  2188. int i;
  2189. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2190. struct caam_aead_alg *t_alg = driver_aeads + i;
  2191. if (t_alg->registered)
  2192. crypto_unregister_aead(&t_alg->aead);
  2193. }
  2194. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2195. struct caam_skcipher_alg *t_alg = driver_algs + i;
  2196. if (t_alg->registered)
  2197. crypto_unregister_skcipher(&t_alg->skcipher);
  2198. }
  2199. }
  2200. static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
  2201. {
  2202. struct skcipher_alg *alg = &t_alg->skcipher;
  2203. alg->base.cra_module = THIS_MODULE;
  2204. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2205. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  2206. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  2207. alg->init = caam_cra_init;
  2208. alg->exit = caam_cra_exit;
  2209. }
  2210. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  2211. {
  2212. struct aead_alg *alg = &t_alg->aead;
  2213. alg->base.cra_module = THIS_MODULE;
  2214. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2215. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  2216. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  2217. alg->init = caam_aead_init;
  2218. alg->exit = caam_aead_exit;
  2219. }
  2220. static int __init caam_qi_algapi_init(void)
  2221. {
  2222. struct device_node *dev_node;
  2223. struct platform_device *pdev;
  2224. struct device *ctrldev;
  2225. struct caam_drv_private *priv;
  2226. int i = 0, err = 0;
  2227. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  2228. unsigned int md_limit = SHA512_DIGEST_SIZE;
  2229. bool registered = false;
  2230. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  2231. if (!dev_node) {
  2232. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  2233. if (!dev_node)
  2234. return -ENODEV;
  2235. }
  2236. pdev = of_find_device_by_node(dev_node);
  2237. of_node_put(dev_node);
  2238. if (!pdev)
  2239. return -ENODEV;
  2240. ctrldev = &pdev->dev;
  2241. priv = dev_get_drvdata(ctrldev);
  2242. /*
  2243. * If priv is NULL, it's probably because the caam driver wasn't
  2244. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  2245. */
  2246. if (!priv || !priv->qi_present)
  2247. return -ENODEV;
  2248. if (caam_dpaa2) {
  2249. dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
  2250. return -ENODEV;
  2251. }
  2252. /*
  2253. * Register crypto algorithms the device supports.
  2254. * First, detect presence and attributes of DES, AES, and MD blocks.
  2255. */
  2256. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  2257. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  2258. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  2259. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  2260. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  2261. /* If MD is present, limit digest size based on LP256 */
  2262. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  2263. md_limit = SHA256_DIGEST_SIZE;
  2264. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2265. struct caam_skcipher_alg *t_alg = driver_algs + i;
  2266. u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
  2267. /* Skip DES algorithms if not supported by device */
  2268. if (!des_inst &&
  2269. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  2270. (alg_sel == OP_ALG_ALGSEL_DES)))
  2271. continue;
  2272. /* Skip AES algorithms if not supported by device */
  2273. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  2274. continue;
  2275. caam_skcipher_alg_init(t_alg);
  2276. err = crypto_register_skcipher(&t_alg->skcipher);
  2277. if (err) {
  2278. dev_warn(priv->qidev, "%s alg registration failed\n",
  2279. t_alg->skcipher.base.cra_driver_name);
  2280. continue;
  2281. }
  2282. t_alg->registered = true;
  2283. registered = true;
  2284. }
  2285. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2286. struct caam_aead_alg *t_alg = driver_aeads + i;
  2287. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  2288. OP_ALG_ALGSEL_MASK;
  2289. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  2290. OP_ALG_ALGSEL_MASK;
  2291. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  2292. /* Skip DES algorithms if not supported by device */
  2293. if (!des_inst &&
  2294. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  2295. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  2296. continue;
  2297. /* Skip AES algorithms if not supported by device */
  2298. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  2299. continue;
  2300. /*
  2301. * Check support for AES algorithms not available
  2302. * on LP devices.
  2303. */
  2304. if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
  2305. (alg_aai == OP_ALG_AAI_GCM))
  2306. continue;
  2307. /*
  2308. * Skip algorithms requiring message digests
  2309. * if MD or MD size is not supported by device.
  2310. */
  2311. if (c2_alg_sel &&
  2312. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  2313. continue;
  2314. caam_aead_alg_init(t_alg);
  2315. err = crypto_register_aead(&t_alg->aead);
  2316. if (err) {
  2317. pr_warn("%s alg registration failed\n",
  2318. t_alg->aead.base.cra_driver_name);
  2319. continue;
  2320. }
  2321. t_alg->registered = true;
  2322. registered = true;
  2323. }
  2324. if (registered)
  2325. dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
  2326. return err;
  2327. }
  2328. module_init(caam_qi_algapi_init);
  2329. module_exit(caam_qi_algapi_exit);
  2330. MODULE_LICENSE("GPL");
  2331. MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
  2332. MODULE_AUTHOR("Freescale Semiconductor");