caamalg_qi.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387
  1. /*
  2. * Freescale FSL CAAM support for crypto API over QI backend.
  3. * Based on caamalg.c
  4. *
  5. * Copyright 2013-2016 Freescale Semiconductor, Inc.
  6. * Copyright 2016-2017 NXP
  7. */
  8. #include "compat.h"
  9. #include "regs.h"
  10. #include "intern.h"
  11. #include "desc_constr.h"
  12. #include "error.h"
  13. #include "sg_sw_sec4.h"
  14. #include "sg_sw_qm.h"
  15. #include "key_gen.h"
  16. #include "qi.h"
  17. #include "jr.h"
  18. #include "caamalg_desc.h"
  19. /*
  20. * crypto alg
  21. */
  22. #define CAAM_CRA_PRIORITY 2000
  23. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  24. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  25. SHA512_DIGEST_SIZE * 2)
  26. #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
  27. CAAM_MAX_KEY_SIZE)
  28. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  29. struct caam_alg_entry {
  30. int class1_alg_type;
  31. int class2_alg_type;
  32. bool rfc3686;
  33. bool geniv;
  34. };
  35. struct caam_aead_alg {
  36. struct aead_alg aead;
  37. struct caam_alg_entry caam;
  38. bool registered;
  39. };
  40. /*
  41. * per-session context
  42. */
  43. struct caam_ctx {
  44. struct device *jrdev;
  45. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  46. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  47. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  48. u8 key[CAAM_MAX_KEY_SIZE];
  49. dma_addr_t key_dma;
  50. struct alginfo adata;
  51. struct alginfo cdata;
  52. unsigned int authsize;
  53. struct device *qidev;
  54. spinlock_t lock; /* Protects multiple init of driver context */
  55. struct caam_drv_ctx *drv_ctx[NUM_OP];
  56. };
  57. static int aead_set_sh_desc(struct crypto_aead *aead)
  58. {
  59. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  60. typeof(*alg), aead);
  61. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  62. unsigned int ivsize = crypto_aead_ivsize(aead);
  63. u32 ctx1_iv_off = 0;
  64. u32 *nonce = NULL;
  65. unsigned int data_len[2];
  66. u32 inl_mask;
  67. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  68. OP_ALG_AAI_CTR_MOD128);
  69. const bool is_rfc3686 = alg->caam.rfc3686;
  70. if (!ctx->cdata.keylen || !ctx->authsize)
  71. return 0;
  72. /*
  73. * AES-CTR needs to load IV in CONTEXT1 reg
  74. * at an offset of 128bits (16bytes)
  75. * CONTEXT1[255:128] = IV
  76. */
  77. if (ctr_mode)
  78. ctx1_iv_off = 16;
  79. /*
  80. * RFC3686 specific:
  81. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  82. */
  83. if (is_rfc3686) {
  84. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  85. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  86. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  87. }
  88. data_len[0] = ctx->adata.keylen_pad;
  89. data_len[1] = ctx->cdata.keylen;
  90. if (alg->caam.geniv)
  91. goto skip_enc;
  92. /* aead_encrypt shared descriptor */
  93. if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
  94. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  95. DESC_JOB_IO_LEN, data_len, &inl_mask,
  96. ARRAY_SIZE(data_len)) < 0)
  97. return -EINVAL;
  98. if (inl_mask & 1)
  99. ctx->adata.key_virt = ctx->key;
  100. else
  101. ctx->adata.key_dma = ctx->key_dma;
  102. if (inl_mask & 2)
  103. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  104. else
  105. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  106. ctx->adata.key_inline = !!(inl_mask & 1);
  107. ctx->cdata.key_inline = !!(inl_mask & 2);
  108. cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  109. ivsize, ctx->authsize, is_rfc3686, nonce,
  110. ctx1_iv_off, true);
  111. skip_enc:
  112. /* aead_decrypt shared descriptor */
  113. if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
  114. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  115. DESC_JOB_IO_LEN, data_len, &inl_mask,
  116. ARRAY_SIZE(data_len)) < 0)
  117. return -EINVAL;
  118. if (inl_mask & 1)
  119. ctx->adata.key_virt = ctx->key;
  120. else
  121. ctx->adata.key_dma = ctx->key_dma;
  122. if (inl_mask & 2)
  123. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  124. else
  125. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  126. ctx->adata.key_inline = !!(inl_mask & 1);
  127. ctx->cdata.key_inline = !!(inl_mask & 2);
  128. cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  129. ivsize, ctx->authsize, alg->caam.geniv,
  130. is_rfc3686, nonce, ctx1_iv_off, true);
  131. if (!alg->caam.geniv)
  132. goto skip_givenc;
  133. /* aead_givencrypt shared descriptor */
  134. if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
  135. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  136. DESC_JOB_IO_LEN, data_len, &inl_mask,
  137. ARRAY_SIZE(data_len)) < 0)
  138. return -EINVAL;
  139. if (inl_mask & 1)
  140. ctx->adata.key_virt = ctx->key;
  141. else
  142. ctx->adata.key_dma = ctx->key_dma;
  143. if (inl_mask & 2)
  144. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  145. else
  146. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  147. ctx->adata.key_inline = !!(inl_mask & 1);
  148. ctx->cdata.key_inline = !!(inl_mask & 2);
  149. cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  150. ivsize, ctx->authsize, is_rfc3686, nonce,
  151. ctx1_iv_off, true);
  152. skip_givenc:
  153. return 0;
  154. }
  155. static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  156. {
  157. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  158. ctx->authsize = authsize;
  159. aead_set_sh_desc(authenc);
  160. return 0;
  161. }
  162. static int aead_setkey(struct crypto_aead *aead, const u8 *key,
  163. unsigned int keylen)
  164. {
  165. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  166. struct device *jrdev = ctx->jrdev;
  167. struct crypto_authenc_keys keys;
  168. int ret = 0;
  169. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  170. goto badkey;
  171. #ifdef DEBUG
  172. dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
  173. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  174. keys.authkeylen);
  175. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  176. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  177. #endif
  178. ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  179. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  180. keys.enckeylen);
  181. if (ret)
  182. goto badkey;
  183. /* postpend encryption key to auth split key */
  184. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  185. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  186. keys.enckeylen, DMA_TO_DEVICE);
  187. #ifdef DEBUG
  188. print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  189. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  190. ctx->adata.keylen_pad + keys.enckeylen, 1);
  191. #endif
  192. ctx->cdata.keylen = keys.enckeylen;
  193. ret = aead_set_sh_desc(aead);
  194. if (ret)
  195. goto badkey;
  196. /* Now update the driver contexts with the new shared descriptor */
  197. if (ctx->drv_ctx[ENCRYPT]) {
  198. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  199. ctx->sh_desc_enc);
  200. if (ret) {
  201. dev_err(jrdev, "driver enc context update failed\n");
  202. goto badkey;
  203. }
  204. }
  205. if (ctx->drv_ctx[DECRYPT]) {
  206. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  207. ctx->sh_desc_dec);
  208. if (ret) {
  209. dev_err(jrdev, "driver dec context update failed\n");
  210. goto badkey;
  211. }
  212. }
  213. return ret;
  214. badkey:
  215. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  216. return -EINVAL;
  217. }
  218. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  219. const u8 *key, unsigned int keylen)
  220. {
  221. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  222. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  223. const char *alg_name = crypto_tfm_alg_name(tfm);
  224. struct device *jrdev = ctx->jrdev;
  225. unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  226. u32 ctx1_iv_off = 0;
  227. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  228. OP_ALG_AAI_CTR_MOD128);
  229. const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
  230. int ret = 0;
  231. memcpy(ctx->key, key, keylen);
  232. #ifdef DEBUG
  233. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  234. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  235. #endif
  236. /*
  237. * AES-CTR needs to load IV in CONTEXT1 reg
  238. * at an offset of 128bits (16bytes)
  239. * CONTEXT1[255:128] = IV
  240. */
  241. if (ctr_mode)
  242. ctx1_iv_off = 16;
  243. /*
  244. * RFC3686 specific:
  245. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  246. * | *key = {KEY, NONCE}
  247. */
  248. if (is_rfc3686) {
  249. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  250. keylen -= CTR_RFC3686_NONCE_SIZE;
  251. }
  252. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
  253. ctx->cdata.keylen = keylen;
  254. ctx->cdata.key_virt = ctx->key;
  255. ctx->cdata.key_inline = true;
  256. /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
  257. cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  258. is_rfc3686, ctx1_iv_off);
  259. cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  260. is_rfc3686, ctx1_iv_off);
  261. cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
  262. ivsize, is_rfc3686, ctx1_iv_off);
  263. /* Now update the driver contexts with the new shared descriptor */
  264. if (ctx->drv_ctx[ENCRYPT]) {
  265. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  266. ctx->sh_desc_enc);
  267. if (ret) {
  268. dev_err(jrdev, "driver enc context update failed\n");
  269. goto badkey;
  270. }
  271. }
  272. if (ctx->drv_ctx[DECRYPT]) {
  273. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  274. ctx->sh_desc_dec);
  275. if (ret) {
  276. dev_err(jrdev, "driver dec context update failed\n");
  277. goto badkey;
  278. }
  279. }
  280. if (ctx->drv_ctx[GIVENCRYPT]) {
  281. ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
  282. ctx->sh_desc_givenc);
  283. if (ret) {
  284. dev_err(jrdev, "driver givenc context update failed\n");
  285. goto badkey;
  286. }
  287. }
  288. return ret;
  289. badkey:
  290. crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  291. return -EINVAL;
  292. }
  293. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  294. const u8 *key, unsigned int keylen)
  295. {
  296. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  297. struct device *jrdev = ctx->jrdev;
  298. int ret = 0;
  299. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  300. crypto_ablkcipher_set_flags(ablkcipher,
  301. CRYPTO_TFM_RES_BAD_KEY_LEN);
  302. dev_err(jrdev, "key size mismatch\n");
  303. return -EINVAL;
  304. }
  305. memcpy(ctx->key, key, keylen);
  306. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
  307. ctx->cdata.keylen = keylen;
  308. ctx->cdata.key_virt = ctx->key;
  309. ctx->cdata.key_inline = true;
  310. /* xts ablkcipher encrypt, decrypt shared descriptors */
  311. cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
  312. cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
  313. /* Now update the driver contexts with the new shared descriptor */
  314. if (ctx->drv_ctx[ENCRYPT]) {
  315. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  316. ctx->sh_desc_enc);
  317. if (ret) {
  318. dev_err(jrdev, "driver enc context update failed\n");
  319. goto badkey;
  320. }
  321. }
  322. if (ctx->drv_ctx[DECRYPT]) {
  323. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  324. ctx->sh_desc_dec);
  325. if (ret) {
  326. dev_err(jrdev, "driver dec context update failed\n");
  327. goto badkey;
  328. }
  329. }
  330. return ret;
  331. badkey:
  332. crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  333. return 0;
  334. }
  335. /*
  336. * aead_edesc - s/w-extended aead descriptor
  337. * @src_nents: number of segments in input scatterlist
  338. * @dst_nents: number of segments in output scatterlist
  339. * @iv_dma: dma address of iv for checking continuity and link table
  340. * @qm_sg_bytes: length of dma mapped h/w link table
  341. * @qm_sg_dma: bus physical mapped address of h/w link table
  342. * @assoclen_dma: bus physical mapped address of req->assoclen
  343. * @drv_req: driver-specific request structure
  344. * @sgt: the h/w link table
  345. */
  346. struct aead_edesc {
  347. int src_nents;
  348. int dst_nents;
  349. dma_addr_t iv_dma;
  350. int qm_sg_bytes;
  351. dma_addr_t qm_sg_dma;
  352. dma_addr_t assoclen_dma;
  353. struct caam_drv_req drv_req;
  354. struct qm_sg_entry sgt[0];
  355. };
  356. /*
  357. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  358. * @src_nents: number of segments in input scatterlist
  359. * @dst_nents: number of segments in output scatterlist
  360. * @iv_dma: dma address of iv for checking continuity and link table
  361. * @qm_sg_bytes: length of dma mapped h/w link table
  362. * @qm_sg_dma: bus physical mapped address of h/w link table
  363. * @drv_req: driver-specific request structure
  364. * @sgt: the h/w link table
  365. */
  366. struct ablkcipher_edesc {
  367. int src_nents;
  368. int dst_nents;
  369. dma_addr_t iv_dma;
  370. int qm_sg_bytes;
  371. dma_addr_t qm_sg_dma;
  372. struct caam_drv_req drv_req;
  373. struct qm_sg_entry sgt[0];
  374. };
  375. static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
  376. enum optype type)
  377. {
  378. /*
  379. * This function is called on the fast path with values of 'type'
  380. * known at compile time. Invalid arguments are not expected and
  381. * thus no checks are made.
  382. */
  383. struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
  384. u32 *desc;
  385. if (unlikely(!drv_ctx)) {
  386. spin_lock(&ctx->lock);
  387. /* Read again to check if some other core init drv_ctx */
  388. drv_ctx = ctx->drv_ctx[type];
  389. if (!drv_ctx) {
  390. int cpu;
  391. if (type == ENCRYPT)
  392. desc = ctx->sh_desc_enc;
  393. else if (type == DECRYPT)
  394. desc = ctx->sh_desc_dec;
  395. else /* (type == GIVENCRYPT) */
  396. desc = ctx->sh_desc_givenc;
  397. cpu = smp_processor_id();
  398. drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
  399. if (likely(!IS_ERR_OR_NULL(drv_ctx)))
  400. drv_ctx->op_type = type;
  401. ctx->drv_ctx[type] = drv_ctx;
  402. }
  403. spin_unlock(&ctx->lock);
  404. }
  405. return drv_ctx;
  406. }
  407. static void caam_unmap(struct device *dev, struct scatterlist *src,
  408. struct scatterlist *dst, int src_nents,
  409. int dst_nents, dma_addr_t iv_dma, int ivsize,
  410. enum optype op_type, dma_addr_t qm_sg_dma,
  411. int qm_sg_bytes)
  412. {
  413. if (dst != src) {
  414. if (src_nents)
  415. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  416. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  417. } else {
  418. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  419. }
  420. if (iv_dma)
  421. dma_unmap_single(dev, iv_dma, ivsize,
  422. op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
  423. DMA_TO_DEVICE);
  424. if (qm_sg_bytes)
  425. dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
  426. }
  427. static void aead_unmap(struct device *dev,
  428. struct aead_edesc *edesc,
  429. struct aead_request *req)
  430. {
  431. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  432. int ivsize = crypto_aead_ivsize(aead);
  433. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  434. edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
  435. edesc->qm_sg_dma, edesc->qm_sg_bytes);
  436. dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  437. }
  438. static void ablkcipher_unmap(struct device *dev,
  439. struct ablkcipher_edesc *edesc,
  440. struct ablkcipher_request *req)
  441. {
  442. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  443. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  444. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  445. edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
  446. edesc->qm_sg_dma, edesc->qm_sg_bytes);
  447. }
  448. static void aead_done(struct caam_drv_req *drv_req, u32 status)
  449. {
  450. struct device *qidev;
  451. struct aead_edesc *edesc;
  452. struct aead_request *aead_req = drv_req->app_ctx;
  453. struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
  454. struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
  455. int ecode = 0;
  456. qidev = caam_ctx->qidev;
  457. if (unlikely(status)) {
  458. caam_jr_strstatus(qidev, status);
  459. ecode = -EIO;
  460. }
  461. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  462. aead_unmap(qidev, edesc, aead_req);
  463. aead_request_complete(aead_req, ecode);
  464. qi_cache_free(edesc);
  465. }
  466. /*
  467. * allocate and map the aead extended descriptor
  468. */
  469. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  470. bool encrypt)
  471. {
  472. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  473. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  474. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  475. typeof(*alg), aead);
  476. struct device *qidev = ctx->qidev;
  477. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  478. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  479. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  480. struct aead_edesc *edesc;
  481. dma_addr_t qm_sg_dma, iv_dma = 0;
  482. int ivsize = 0;
  483. unsigned int authsize = ctx->authsize;
  484. int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
  485. int in_len, out_len;
  486. struct qm_sg_entry *sg_table, *fd_sgt;
  487. struct caam_drv_ctx *drv_ctx;
  488. enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  489. drv_ctx = get_drv_ctx(ctx, op_type);
  490. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  491. return (struct aead_edesc *)drv_ctx;
  492. /* allocate space for base edesc and hw desc commands, link tables */
  493. edesc = qi_cache_alloc(GFP_DMA | flags);
  494. if (unlikely(!edesc)) {
  495. dev_err(qidev, "could not allocate extended descriptor\n");
  496. return ERR_PTR(-ENOMEM);
  497. }
  498. if (likely(req->src == req->dst)) {
  499. src_nents = sg_nents_for_len(req->src, req->assoclen +
  500. req->cryptlen +
  501. (encrypt ? authsize : 0));
  502. if (unlikely(src_nents < 0)) {
  503. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  504. req->assoclen + req->cryptlen +
  505. (encrypt ? authsize : 0));
  506. qi_cache_free(edesc);
  507. return ERR_PTR(src_nents);
  508. }
  509. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  510. DMA_BIDIRECTIONAL);
  511. if (unlikely(!mapped_src_nents)) {
  512. dev_err(qidev, "unable to map source\n");
  513. qi_cache_free(edesc);
  514. return ERR_PTR(-ENOMEM);
  515. }
  516. } else {
  517. src_nents = sg_nents_for_len(req->src, req->assoclen +
  518. req->cryptlen);
  519. if (unlikely(src_nents < 0)) {
  520. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  521. req->assoclen + req->cryptlen);
  522. qi_cache_free(edesc);
  523. return ERR_PTR(src_nents);
  524. }
  525. dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  526. req->cryptlen +
  527. (encrypt ? authsize :
  528. (-authsize)));
  529. if (unlikely(dst_nents < 0)) {
  530. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  531. req->assoclen + req->cryptlen +
  532. (encrypt ? authsize : (-authsize)));
  533. qi_cache_free(edesc);
  534. return ERR_PTR(dst_nents);
  535. }
  536. if (src_nents) {
  537. mapped_src_nents = dma_map_sg(qidev, req->src,
  538. src_nents, DMA_TO_DEVICE);
  539. if (unlikely(!mapped_src_nents)) {
  540. dev_err(qidev, "unable to map source\n");
  541. qi_cache_free(edesc);
  542. return ERR_PTR(-ENOMEM);
  543. }
  544. } else {
  545. mapped_src_nents = 0;
  546. }
  547. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  548. DMA_FROM_DEVICE);
  549. if (unlikely(!mapped_dst_nents)) {
  550. dev_err(qidev, "unable to map destination\n");
  551. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  552. qi_cache_free(edesc);
  553. return ERR_PTR(-ENOMEM);
  554. }
  555. }
  556. if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
  557. ivsize = crypto_aead_ivsize(aead);
  558. iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
  559. if (dma_mapping_error(qidev, iv_dma)) {
  560. dev_err(qidev, "unable to map IV\n");
  561. caam_unmap(qidev, req->src, req->dst, src_nents,
  562. dst_nents, 0, 0, op_type, 0, 0);
  563. qi_cache_free(edesc);
  564. return ERR_PTR(-ENOMEM);
  565. }
  566. }
  567. /*
  568. * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
  569. * Input is not contiguous.
  570. */
  571. qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
  572. (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  573. sg_table = &edesc->sgt[0];
  574. qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  575. edesc->src_nents = src_nents;
  576. edesc->dst_nents = dst_nents;
  577. edesc->iv_dma = iv_dma;
  578. edesc->drv_req.app_ctx = req;
  579. edesc->drv_req.cbk = aead_done;
  580. edesc->drv_req.drv_ctx = drv_ctx;
  581. edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
  582. DMA_TO_DEVICE);
  583. if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
  584. dev_err(qidev, "unable to map assoclen\n");
  585. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  586. iv_dma, ivsize, op_type, 0, 0);
  587. qi_cache_free(edesc);
  588. return ERR_PTR(-ENOMEM);
  589. }
  590. dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
  591. qm_sg_index++;
  592. if (ivsize) {
  593. dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
  594. qm_sg_index++;
  595. }
  596. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  597. qm_sg_index += mapped_src_nents;
  598. if (mapped_dst_nents > 1)
  599. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  600. qm_sg_index, 0);
  601. qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  602. if (dma_mapping_error(qidev, qm_sg_dma)) {
  603. dev_err(qidev, "unable to map S/G table\n");
  604. dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  605. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  606. iv_dma, ivsize, op_type, 0, 0);
  607. qi_cache_free(edesc);
  608. return ERR_PTR(-ENOMEM);
  609. }
  610. edesc->qm_sg_dma = qm_sg_dma;
  611. edesc->qm_sg_bytes = qm_sg_bytes;
  612. out_len = req->assoclen + req->cryptlen +
  613. (encrypt ? ctx->authsize : (-ctx->authsize));
  614. in_len = 4 + ivsize + req->assoclen + req->cryptlen;
  615. fd_sgt = &edesc->drv_req.fd_sgt[0];
  616. dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
  617. if (req->dst == req->src) {
  618. if (mapped_src_nents == 1)
  619. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  620. out_len, 0);
  621. else
  622. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
  623. (1 + !!ivsize) * sizeof(*sg_table),
  624. out_len, 0);
  625. } else if (mapped_dst_nents == 1) {
  626. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
  627. 0);
  628. } else {
  629. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
  630. qm_sg_index, out_len, 0);
  631. }
  632. return edesc;
  633. }
  634. static inline int aead_crypt(struct aead_request *req, bool encrypt)
  635. {
  636. struct aead_edesc *edesc;
  637. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  638. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  639. int ret;
  640. if (unlikely(caam_congested))
  641. return -EAGAIN;
  642. /* allocate extended descriptor */
  643. edesc = aead_edesc_alloc(req, encrypt);
  644. if (IS_ERR_OR_NULL(edesc))
  645. return PTR_ERR(edesc);
  646. /* Create and submit job descriptor */
  647. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  648. if (!ret) {
  649. ret = -EINPROGRESS;
  650. } else {
  651. aead_unmap(ctx->qidev, edesc, req);
  652. qi_cache_free(edesc);
  653. }
  654. return ret;
  655. }
  656. static int aead_encrypt(struct aead_request *req)
  657. {
  658. return aead_crypt(req, true);
  659. }
  660. static int aead_decrypt(struct aead_request *req)
  661. {
  662. return aead_crypt(req, false);
  663. }
  664. static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
  665. {
  666. struct ablkcipher_edesc *edesc;
  667. struct ablkcipher_request *req = drv_req->app_ctx;
  668. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  669. struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
  670. struct device *qidev = caam_ctx->qidev;
  671. #ifdef DEBUG
  672. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  673. dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
  674. #endif
  675. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  676. if (status)
  677. caam_jr_strstatus(qidev, status);
  678. #ifdef DEBUG
  679. print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
  680. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  681. edesc->src_nents > 1 ? 100 : ivsize, 1);
  682. dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  683. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  684. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  685. #endif
  686. ablkcipher_unmap(qidev, edesc, req);
  687. qi_cache_free(edesc);
  688. ablkcipher_request_complete(req, status);
  689. }
  690. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  691. *req, bool encrypt)
  692. {
  693. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  694. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  695. struct device *qidev = ctx->qidev;
  696. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  697. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  698. GFP_KERNEL : GFP_ATOMIC;
  699. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  700. struct ablkcipher_edesc *edesc;
  701. dma_addr_t iv_dma;
  702. bool in_contig;
  703. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  704. int dst_sg_idx, qm_sg_ents;
  705. struct qm_sg_entry *sg_table, *fd_sgt;
  706. struct caam_drv_ctx *drv_ctx;
  707. enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  708. drv_ctx = get_drv_ctx(ctx, op_type);
  709. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  710. return (struct ablkcipher_edesc *)drv_ctx;
  711. src_nents = sg_nents_for_len(req->src, req->nbytes);
  712. if (unlikely(src_nents < 0)) {
  713. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  714. req->nbytes);
  715. return ERR_PTR(src_nents);
  716. }
  717. if (unlikely(req->src != req->dst)) {
  718. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  719. if (unlikely(dst_nents < 0)) {
  720. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  721. req->nbytes);
  722. return ERR_PTR(dst_nents);
  723. }
  724. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  725. DMA_TO_DEVICE);
  726. if (unlikely(!mapped_src_nents)) {
  727. dev_err(qidev, "unable to map source\n");
  728. return ERR_PTR(-ENOMEM);
  729. }
  730. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  731. DMA_FROM_DEVICE);
  732. if (unlikely(!mapped_dst_nents)) {
  733. dev_err(qidev, "unable to map destination\n");
  734. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  735. return ERR_PTR(-ENOMEM);
  736. }
  737. } else {
  738. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  739. DMA_BIDIRECTIONAL);
  740. if (unlikely(!mapped_src_nents)) {
  741. dev_err(qidev, "unable to map source\n");
  742. return ERR_PTR(-ENOMEM);
  743. }
  744. }
  745. iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
  746. if (dma_mapping_error(qidev, iv_dma)) {
  747. dev_err(qidev, "unable to map IV\n");
  748. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  749. 0, 0, 0, 0);
  750. return ERR_PTR(-ENOMEM);
  751. }
  752. if (mapped_src_nents == 1 &&
  753. iv_dma + ivsize == sg_dma_address(req->src)) {
  754. in_contig = true;
  755. qm_sg_ents = 0;
  756. } else {
  757. in_contig = false;
  758. qm_sg_ents = 1 + mapped_src_nents;
  759. }
  760. dst_sg_idx = qm_sg_ents;
  761. /* allocate space for base edesc and link tables */
  762. edesc = qi_cache_alloc(GFP_DMA | flags);
  763. if (unlikely(!edesc)) {
  764. dev_err(qidev, "could not allocate extended descriptor\n");
  765. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  766. iv_dma, ivsize, op_type, 0, 0);
  767. return ERR_PTR(-ENOMEM);
  768. }
  769. edesc->src_nents = src_nents;
  770. edesc->dst_nents = dst_nents;
  771. edesc->iv_dma = iv_dma;
  772. qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  773. sg_table = &edesc->sgt[0];
  774. edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  775. edesc->drv_req.app_ctx = req;
  776. edesc->drv_req.cbk = ablkcipher_done;
  777. edesc->drv_req.drv_ctx = drv_ctx;
  778. if (!in_contig) {
  779. dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  780. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
  781. }
  782. if (mapped_dst_nents > 1)
  783. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  784. dst_sg_idx, 0);
  785. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  786. DMA_TO_DEVICE);
  787. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  788. dev_err(qidev, "unable to map S/G table\n");
  789. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  790. iv_dma, ivsize, op_type, 0, 0);
  791. qi_cache_free(edesc);
  792. return ERR_PTR(-ENOMEM);
  793. }
  794. fd_sgt = &edesc->drv_req.fd_sgt[0];
  795. if (!in_contig)
  796. dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
  797. ivsize + req->nbytes, 0);
  798. else
  799. dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
  800. 0);
  801. if (req->src == req->dst) {
  802. if (!in_contig)
  803. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
  804. sizeof(*sg_table), req->nbytes, 0);
  805. else
  806. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  807. req->nbytes, 0);
  808. } else if (mapped_dst_nents > 1) {
  809. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  810. sizeof(*sg_table), req->nbytes, 0);
  811. } else {
  812. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
  813. req->nbytes, 0);
  814. }
  815. return edesc;
  816. }
  817. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  818. struct skcipher_givcrypt_request *creq)
  819. {
  820. struct ablkcipher_request *req = &creq->creq;
  821. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  822. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  823. struct device *qidev = ctx->qidev;
  824. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  825. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  826. GFP_KERNEL : GFP_ATOMIC;
  827. int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
  828. struct ablkcipher_edesc *edesc;
  829. dma_addr_t iv_dma;
  830. bool out_contig;
  831. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  832. struct qm_sg_entry *sg_table, *fd_sgt;
  833. int dst_sg_idx, qm_sg_ents;
  834. struct caam_drv_ctx *drv_ctx;
  835. drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
  836. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  837. return (struct ablkcipher_edesc *)drv_ctx;
  838. src_nents = sg_nents_for_len(req->src, req->nbytes);
  839. if (unlikely(src_nents < 0)) {
  840. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  841. req->nbytes);
  842. return ERR_PTR(src_nents);
  843. }
  844. if (unlikely(req->src != req->dst)) {
  845. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  846. if (unlikely(dst_nents < 0)) {
  847. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  848. req->nbytes);
  849. return ERR_PTR(dst_nents);
  850. }
  851. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  852. DMA_TO_DEVICE);
  853. if (unlikely(!mapped_src_nents)) {
  854. dev_err(qidev, "unable to map source\n");
  855. return ERR_PTR(-ENOMEM);
  856. }
  857. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  858. DMA_FROM_DEVICE);
  859. if (unlikely(!mapped_dst_nents)) {
  860. dev_err(qidev, "unable to map destination\n");
  861. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  862. return ERR_PTR(-ENOMEM);
  863. }
  864. } else {
  865. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  866. DMA_BIDIRECTIONAL);
  867. if (unlikely(!mapped_src_nents)) {
  868. dev_err(qidev, "unable to map source\n");
  869. return ERR_PTR(-ENOMEM);
  870. }
  871. dst_nents = src_nents;
  872. mapped_dst_nents = src_nents;
  873. }
  874. iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
  875. if (dma_mapping_error(qidev, iv_dma)) {
  876. dev_err(qidev, "unable to map IV\n");
  877. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  878. 0, 0, 0, 0);
  879. return ERR_PTR(-ENOMEM);
  880. }
  881. qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
  882. dst_sg_idx = qm_sg_ents;
  883. if (mapped_dst_nents == 1 &&
  884. iv_dma + ivsize == sg_dma_address(req->dst)) {
  885. out_contig = true;
  886. } else {
  887. out_contig = false;
  888. qm_sg_ents += 1 + mapped_dst_nents;
  889. }
  890. /* allocate space for base edesc and link tables */
  891. edesc = qi_cache_alloc(GFP_DMA | flags);
  892. if (!edesc) {
  893. dev_err(qidev, "could not allocate extended descriptor\n");
  894. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  895. iv_dma, ivsize, GIVENCRYPT, 0, 0);
  896. return ERR_PTR(-ENOMEM);
  897. }
  898. edesc->src_nents = src_nents;
  899. edesc->dst_nents = dst_nents;
  900. edesc->iv_dma = iv_dma;
  901. sg_table = &edesc->sgt[0];
  902. edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  903. edesc->drv_req.app_ctx = req;
  904. edesc->drv_req.cbk = ablkcipher_done;
  905. edesc->drv_req.drv_ctx = drv_ctx;
  906. if (mapped_src_nents > 1)
  907. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
  908. if (!out_contig) {
  909. dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
  910. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  911. dst_sg_idx + 1, 0);
  912. }
  913. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  914. DMA_TO_DEVICE);
  915. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  916. dev_err(qidev, "unable to map S/G table\n");
  917. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  918. iv_dma, ivsize, GIVENCRYPT, 0, 0);
  919. qi_cache_free(edesc);
  920. return ERR_PTR(-ENOMEM);
  921. }
  922. fd_sgt = &edesc->drv_req.fd_sgt[0];
  923. if (mapped_src_nents > 1)
  924. dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
  925. 0);
  926. else
  927. dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
  928. req->nbytes, 0);
  929. if (!out_contig)
  930. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  931. sizeof(*sg_table), ivsize + req->nbytes,
  932. 0);
  933. else
  934. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
  935. ivsize + req->nbytes, 0);
  936. return edesc;
  937. }
  938. static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
  939. {
  940. struct ablkcipher_edesc *edesc;
  941. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  942. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  943. int ret;
  944. if (unlikely(caam_congested))
  945. return -EAGAIN;
  946. /* allocate extended descriptor */
  947. edesc = ablkcipher_edesc_alloc(req, encrypt);
  948. if (IS_ERR(edesc))
  949. return PTR_ERR(edesc);
  950. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  951. if (!ret) {
  952. ret = -EINPROGRESS;
  953. } else {
  954. ablkcipher_unmap(ctx->qidev, edesc, req);
  955. qi_cache_free(edesc);
  956. }
  957. return ret;
  958. }
  959. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  960. {
  961. return ablkcipher_crypt(req, true);
  962. }
  963. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  964. {
  965. return ablkcipher_crypt(req, false);
  966. }
  967. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  968. {
  969. struct ablkcipher_request *req = &creq->creq;
  970. struct ablkcipher_edesc *edesc;
  971. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  972. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  973. int ret;
  974. if (unlikely(caam_congested))
  975. return -EAGAIN;
  976. /* allocate extended descriptor */
  977. edesc = ablkcipher_giv_edesc_alloc(creq);
  978. if (IS_ERR(edesc))
  979. return PTR_ERR(edesc);
  980. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  981. if (!ret) {
  982. ret = -EINPROGRESS;
  983. } else {
  984. ablkcipher_unmap(ctx->qidev, edesc, req);
  985. qi_cache_free(edesc);
  986. }
  987. return ret;
  988. }
  989. #define template_ablkcipher template_u.ablkcipher
  990. struct caam_alg_template {
  991. char name[CRYPTO_MAX_ALG_NAME];
  992. char driver_name[CRYPTO_MAX_ALG_NAME];
  993. unsigned int blocksize;
  994. u32 type;
  995. union {
  996. struct ablkcipher_alg ablkcipher;
  997. } template_u;
  998. u32 class1_alg_type;
  999. u32 class2_alg_type;
  1000. };
  1001. static struct caam_alg_template driver_algs[] = {
  1002. /* ablkcipher descriptor */
  1003. {
  1004. .name = "cbc(aes)",
  1005. .driver_name = "cbc-aes-caam-qi",
  1006. .blocksize = AES_BLOCK_SIZE,
  1007. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1008. .template_ablkcipher = {
  1009. .setkey = ablkcipher_setkey,
  1010. .encrypt = ablkcipher_encrypt,
  1011. .decrypt = ablkcipher_decrypt,
  1012. .givencrypt = ablkcipher_givencrypt,
  1013. .geniv = "<built-in>",
  1014. .min_keysize = AES_MIN_KEY_SIZE,
  1015. .max_keysize = AES_MAX_KEY_SIZE,
  1016. .ivsize = AES_BLOCK_SIZE,
  1017. },
  1018. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1019. },
  1020. {
  1021. .name = "cbc(des3_ede)",
  1022. .driver_name = "cbc-3des-caam-qi",
  1023. .blocksize = DES3_EDE_BLOCK_SIZE,
  1024. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1025. .template_ablkcipher = {
  1026. .setkey = ablkcipher_setkey,
  1027. .encrypt = ablkcipher_encrypt,
  1028. .decrypt = ablkcipher_decrypt,
  1029. .givencrypt = ablkcipher_givencrypt,
  1030. .geniv = "<built-in>",
  1031. .min_keysize = DES3_EDE_KEY_SIZE,
  1032. .max_keysize = DES3_EDE_KEY_SIZE,
  1033. .ivsize = DES3_EDE_BLOCK_SIZE,
  1034. },
  1035. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1036. },
  1037. {
  1038. .name = "cbc(des)",
  1039. .driver_name = "cbc-des-caam-qi",
  1040. .blocksize = DES_BLOCK_SIZE,
  1041. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1042. .template_ablkcipher = {
  1043. .setkey = ablkcipher_setkey,
  1044. .encrypt = ablkcipher_encrypt,
  1045. .decrypt = ablkcipher_decrypt,
  1046. .givencrypt = ablkcipher_givencrypt,
  1047. .geniv = "<built-in>",
  1048. .min_keysize = DES_KEY_SIZE,
  1049. .max_keysize = DES_KEY_SIZE,
  1050. .ivsize = DES_BLOCK_SIZE,
  1051. },
  1052. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1053. },
  1054. {
  1055. .name = "ctr(aes)",
  1056. .driver_name = "ctr-aes-caam-qi",
  1057. .blocksize = 1,
  1058. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1059. .template_ablkcipher = {
  1060. .setkey = ablkcipher_setkey,
  1061. .encrypt = ablkcipher_encrypt,
  1062. .decrypt = ablkcipher_decrypt,
  1063. .geniv = "chainiv",
  1064. .min_keysize = AES_MIN_KEY_SIZE,
  1065. .max_keysize = AES_MAX_KEY_SIZE,
  1066. .ivsize = AES_BLOCK_SIZE,
  1067. },
  1068. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1069. },
  1070. {
  1071. .name = "rfc3686(ctr(aes))",
  1072. .driver_name = "rfc3686-ctr-aes-caam-qi",
  1073. .blocksize = 1,
  1074. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1075. .template_ablkcipher = {
  1076. .setkey = ablkcipher_setkey,
  1077. .encrypt = ablkcipher_encrypt,
  1078. .decrypt = ablkcipher_decrypt,
  1079. .givencrypt = ablkcipher_givencrypt,
  1080. .geniv = "<built-in>",
  1081. .min_keysize = AES_MIN_KEY_SIZE +
  1082. CTR_RFC3686_NONCE_SIZE,
  1083. .max_keysize = AES_MAX_KEY_SIZE +
  1084. CTR_RFC3686_NONCE_SIZE,
  1085. .ivsize = CTR_RFC3686_IV_SIZE,
  1086. },
  1087. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1088. },
  1089. {
  1090. .name = "xts(aes)",
  1091. .driver_name = "xts-aes-caam-qi",
  1092. .blocksize = AES_BLOCK_SIZE,
  1093. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1094. .template_ablkcipher = {
  1095. .setkey = xts_ablkcipher_setkey,
  1096. .encrypt = ablkcipher_encrypt,
  1097. .decrypt = ablkcipher_decrypt,
  1098. .geniv = "eseqiv",
  1099. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1100. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1101. .ivsize = AES_BLOCK_SIZE,
  1102. },
  1103. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1104. },
  1105. };
  1106. static struct caam_aead_alg driver_aeads[] = {
  1107. /* single-pass ipsec_esp descriptor */
  1108. {
  1109. .aead = {
  1110. .base = {
  1111. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1112. .cra_driver_name = "authenc-hmac-md5-"
  1113. "cbc-aes-caam-qi",
  1114. .cra_blocksize = AES_BLOCK_SIZE,
  1115. },
  1116. .setkey = aead_setkey,
  1117. .setauthsize = aead_setauthsize,
  1118. .encrypt = aead_encrypt,
  1119. .decrypt = aead_decrypt,
  1120. .ivsize = AES_BLOCK_SIZE,
  1121. .maxauthsize = MD5_DIGEST_SIZE,
  1122. },
  1123. .caam = {
  1124. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1125. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1126. OP_ALG_AAI_HMAC_PRECOMP,
  1127. }
  1128. },
  1129. {
  1130. .aead = {
  1131. .base = {
  1132. .cra_name = "echainiv(authenc(hmac(md5),"
  1133. "cbc(aes)))",
  1134. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1135. "cbc-aes-caam-qi",
  1136. .cra_blocksize = AES_BLOCK_SIZE,
  1137. },
  1138. .setkey = aead_setkey,
  1139. .setauthsize = aead_setauthsize,
  1140. .encrypt = aead_encrypt,
  1141. .decrypt = aead_decrypt,
  1142. .ivsize = AES_BLOCK_SIZE,
  1143. .maxauthsize = MD5_DIGEST_SIZE,
  1144. },
  1145. .caam = {
  1146. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1147. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1148. OP_ALG_AAI_HMAC_PRECOMP,
  1149. .geniv = true,
  1150. }
  1151. },
  1152. {
  1153. .aead = {
  1154. .base = {
  1155. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1156. .cra_driver_name = "authenc-hmac-sha1-"
  1157. "cbc-aes-caam-qi",
  1158. .cra_blocksize = AES_BLOCK_SIZE,
  1159. },
  1160. .setkey = aead_setkey,
  1161. .setauthsize = aead_setauthsize,
  1162. .encrypt = aead_encrypt,
  1163. .decrypt = aead_decrypt,
  1164. .ivsize = AES_BLOCK_SIZE,
  1165. .maxauthsize = SHA1_DIGEST_SIZE,
  1166. },
  1167. .caam = {
  1168. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1169. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1170. OP_ALG_AAI_HMAC_PRECOMP,
  1171. }
  1172. },
  1173. {
  1174. .aead = {
  1175. .base = {
  1176. .cra_name = "echainiv(authenc(hmac(sha1),"
  1177. "cbc(aes)))",
  1178. .cra_driver_name = "echainiv-authenc-"
  1179. "hmac-sha1-cbc-aes-caam-qi",
  1180. .cra_blocksize = AES_BLOCK_SIZE,
  1181. },
  1182. .setkey = aead_setkey,
  1183. .setauthsize = aead_setauthsize,
  1184. .encrypt = aead_encrypt,
  1185. .decrypt = aead_decrypt,
  1186. .ivsize = AES_BLOCK_SIZE,
  1187. .maxauthsize = SHA1_DIGEST_SIZE,
  1188. },
  1189. .caam = {
  1190. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1191. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1192. OP_ALG_AAI_HMAC_PRECOMP,
  1193. .geniv = true,
  1194. },
  1195. },
  1196. {
  1197. .aead = {
  1198. .base = {
  1199. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1200. .cra_driver_name = "authenc-hmac-sha224-"
  1201. "cbc-aes-caam-qi",
  1202. .cra_blocksize = AES_BLOCK_SIZE,
  1203. },
  1204. .setkey = aead_setkey,
  1205. .setauthsize = aead_setauthsize,
  1206. .encrypt = aead_encrypt,
  1207. .decrypt = aead_decrypt,
  1208. .ivsize = AES_BLOCK_SIZE,
  1209. .maxauthsize = SHA224_DIGEST_SIZE,
  1210. },
  1211. .caam = {
  1212. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1213. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1214. OP_ALG_AAI_HMAC_PRECOMP,
  1215. }
  1216. },
  1217. {
  1218. .aead = {
  1219. .base = {
  1220. .cra_name = "echainiv(authenc(hmac(sha224),"
  1221. "cbc(aes)))",
  1222. .cra_driver_name = "echainiv-authenc-"
  1223. "hmac-sha224-cbc-aes-caam-qi",
  1224. .cra_blocksize = AES_BLOCK_SIZE,
  1225. },
  1226. .setkey = aead_setkey,
  1227. .setauthsize = aead_setauthsize,
  1228. .encrypt = aead_encrypt,
  1229. .decrypt = aead_decrypt,
  1230. .ivsize = AES_BLOCK_SIZE,
  1231. .maxauthsize = SHA224_DIGEST_SIZE,
  1232. },
  1233. .caam = {
  1234. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1235. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1236. OP_ALG_AAI_HMAC_PRECOMP,
  1237. .geniv = true,
  1238. }
  1239. },
  1240. {
  1241. .aead = {
  1242. .base = {
  1243. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1244. .cra_driver_name = "authenc-hmac-sha256-"
  1245. "cbc-aes-caam-qi",
  1246. .cra_blocksize = AES_BLOCK_SIZE,
  1247. },
  1248. .setkey = aead_setkey,
  1249. .setauthsize = aead_setauthsize,
  1250. .encrypt = aead_encrypt,
  1251. .decrypt = aead_decrypt,
  1252. .ivsize = AES_BLOCK_SIZE,
  1253. .maxauthsize = SHA256_DIGEST_SIZE,
  1254. },
  1255. .caam = {
  1256. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1257. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1258. OP_ALG_AAI_HMAC_PRECOMP,
  1259. }
  1260. },
  1261. {
  1262. .aead = {
  1263. .base = {
  1264. .cra_name = "echainiv(authenc(hmac(sha256),"
  1265. "cbc(aes)))",
  1266. .cra_driver_name = "echainiv-authenc-"
  1267. "hmac-sha256-cbc-aes-"
  1268. "caam-qi",
  1269. .cra_blocksize = AES_BLOCK_SIZE,
  1270. },
  1271. .setkey = aead_setkey,
  1272. .setauthsize = aead_setauthsize,
  1273. .encrypt = aead_encrypt,
  1274. .decrypt = aead_decrypt,
  1275. .ivsize = AES_BLOCK_SIZE,
  1276. .maxauthsize = SHA256_DIGEST_SIZE,
  1277. },
  1278. .caam = {
  1279. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1280. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1281. OP_ALG_AAI_HMAC_PRECOMP,
  1282. .geniv = true,
  1283. }
  1284. },
  1285. {
  1286. .aead = {
  1287. .base = {
  1288. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  1289. .cra_driver_name = "authenc-hmac-sha384-"
  1290. "cbc-aes-caam-qi",
  1291. .cra_blocksize = AES_BLOCK_SIZE,
  1292. },
  1293. .setkey = aead_setkey,
  1294. .setauthsize = aead_setauthsize,
  1295. .encrypt = aead_encrypt,
  1296. .decrypt = aead_decrypt,
  1297. .ivsize = AES_BLOCK_SIZE,
  1298. .maxauthsize = SHA384_DIGEST_SIZE,
  1299. },
  1300. .caam = {
  1301. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1302. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1303. OP_ALG_AAI_HMAC_PRECOMP,
  1304. }
  1305. },
  1306. {
  1307. .aead = {
  1308. .base = {
  1309. .cra_name = "echainiv(authenc(hmac(sha384),"
  1310. "cbc(aes)))",
  1311. .cra_driver_name = "echainiv-authenc-"
  1312. "hmac-sha384-cbc-aes-"
  1313. "caam-qi",
  1314. .cra_blocksize = AES_BLOCK_SIZE,
  1315. },
  1316. .setkey = aead_setkey,
  1317. .setauthsize = aead_setauthsize,
  1318. .encrypt = aead_encrypt,
  1319. .decrypt = aead_decrypt,
  1320. .ivsize = AES_BLOCK_SIZE,
  1321. .maxauthsize = SHA384_DIGEST_SIZE,
  1322. },
  1323. .caam = {
  1324. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1325. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1326. OP_ALG_AAI_HMAC_PRECOMP,
  1327. .geniv = true,
  1328. }
  1329. },
  1330. {
  1331. .aead = {
  1332. .base = {
  1333. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  1334. .cra_driver_name = "authenc-hmac-sha512-"
  1335. "cbc-aes-caam-qi",
  1336. .cra_blocksize = AES_BLOCK_SIZE,
  1337. },
  1338. .setkey = aead_setkey,
  1339. .setauthsize = aead_setauthsize,
  1340. .encrypt = aead_encrypt,
  1341. .decrypt = aead_decrypt,
  1342. .ivsize = AES_BLOCK_SIZE,
  1343. .maxauthsize = SHA512_DIGEST_SIZE,
  1344. },
  1345. .caam = {
  1346. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1347. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1348. OP_ALG_AAI_HMAC_PRECOMP,
  1349. }
  1350. },
  1351. {
  1352. .aead = {
  1353. .base = {
  1354. .cra_name = "echainiv(authenc(hmac(sha512),"
  1355. "cbc(aes)))",
  1356. .cra_driver_name = "echainiv-authenc-"
  1357. "hmac-sha512-cbc-aes-"
  1358. "caam-qi",
  1359. .cra_blocksize = AES_BLOCK_SIZE,
  1360. },
  1361. .setkey = aead_setkey,
  1362. .setauthsize = aead_setauthsize,
  1363. .encrypt = aead_encrypt,
  1364. .decrypt = aead_decrypt,
  1365. .ivsize = AES_BLOCK_SIZE,
  1366. .maxauthsize = SHA512_DIGEST_SIZE,
  1367. },
  1368. .caam = {
  1369. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1370. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1371. OP_ALG_AAI_HMAC_PRECOMP,
  1372. .geniv = true,
  1373. }
  1374. },
  1375. {
  1376. .aead = {
  1377. .base = {
  1378. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1379. .cra_driver_name = "authenc-hmac-md5-"
  1380. "cbc-des3_ede-caam-qi",
  1381. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1382. },
  1383. .setkey = aead_setkey,
  1384. .setauthsize = aead_setauthsize,
  1385. .encrypt = aead_encrypt,
  1386. .decrypt = aead_decrypt,
  1387. .ivsize = DES3_EDE_BLOCK_SIZE,
  1388. .maxauthsize = MD5_DIGEST_SIZE,
  1389. },
  1390. .caam = {
  1391. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1392. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1393. OP_ALG_AAI_HMAC_PRECOMP,
  1394. }
  1395. },
  1396. {
  1397. .aead = {
  1398. .base = {
  1399. .cra_name = "echainiv(authenc(hmac(md5),"
  1400. "cbc(des3_ede)))",
  1401. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1402. "cbc-des3_ede-caam-qi",
  1403. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1404. },
  1405. .setkey = aead_setkey,
  1406. .setauthsize = aead_setauthsize,
  1407. .encrypt = aead_encrypt,
  1408. .decrypt = aead_decrypt,
  1409. .ivsize = DES3_EDE_BLOCK_SIZE,
  1410. .maxauthsize = MD5_DIGEST_SIZE,
  1411. },
  1412. .caam = {
  1413. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1414. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1415. OP_ALG_AAI_HMAC_PRECOMP,
  1416. .geniv = true,
  1417. }
  1418. },
  1419. {
  1420. .aead = {
  1421. .base = {
  1422. .cra_name = "authenc(hmac(sha1),"
  1423. "cbc(des3_ede))",
  1424. .cra_driver_name = "authenc-hmac-sha1-"
  1425. "cbc-des3_ede-caam-qi",
  1426. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1427. },
  1428. .setkey = aead_setkey,
  1429. .setauthsize = aead_setauthsize,
  1430. .encrypt = aead_encrypt,
  1431. .decrypt = aead_decrypt,
  1432. .ivsize = DES3_EDE_BLOCK_SIZE,
  1433. .maxauthsize = SHA1_DIGEST_SIZE,
  1434. },
  1435. .caam = {
  1436. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1437. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1438. OP_ALG_AAI_HMAC_PRECOMP,
  1439. },
  1440. },
  1441. {
  1442. .aead = {
  1443. .base = {
  1444. .cra_name = "echainiv(authenc(hmac(sha1),"
  1445. "cbc(des3_ede)))",
  1446. .cra_driver_name = "echainiv-authenc-"
  1447. "hmac-sha1-"
  1448. "cbc-des3_ede-caam-qi",
  1449. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1450. },
  1451. .setkey = aead_setkey,
  1452. .setauthsize = aead_setauthsize,
  1453. .encrypt = aead_encrypt,
  1454. .decrypt = aead_decrypt,
  1455. .ivsize = DES3_EDE_BLOCK_SIZE,
  1456. .maxauthsize = SHA1_DIGEST_SIZE,
  1457. },
  1458. .caam = {
  1459. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1460. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1461. OP_ALG_AAI_HMAC_PRECOMP,
  1462. .geniv = true,
  1463. }
  1464. },
  1465. {
  1466. .aead = {
  1467. .base = {
  1468. .cra_name = "authenc(hmac(sha224),"
  1469. "cbc(des3_ede))",
  1470. .cra_driver_name = "authenc-hmac-sha224-"
  1471. "cbc-des3_ede-caam-qi",
  1472. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1473. },
  1474. .setkey = aead_setkey,
  1475. .setauthsize = aead_setauthsize,
  1476. .encrypt = aead_encrypt,
  1477. .decrypt = aead_decrypt,
  1478. .ivsize = DES3_EDE_BLOCK_SIZE,
  1479. .maxauthsize = SHA224_DIGEST_SIZE,
  1480. },
  1481. .caam = {
  1482. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1483. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1484. OP_ALG_AAI_HMAC_PRECOMP,
  1485. },
  1486. },
  1487. {
  1488. .aead = {
  1489. .base = {
  1490. .cra_name = "echainiv(authenc(hmac(sha224),"
  1491. "cbc(des3_ede)))",
  1492. .cra_driver_name = "echainiv-authenc-"
  1493. "hmac-sha224-"
  1494. "cbc-des3_ede-caam-qi",
  1495. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1496. },
  1497. .setkey = aead_setkey,
  1498. .setauthsize = aead_setauthsize,
  1499. .encrypt = aead_encrypt,
  1500. .decrypt = aead_decrypt,
  1501. .ivsize = DES3_EDE_BLOCK_SIZE,
  1502. .maxauthsize = SHA224_DIGEST_SIZE,
  1503. },
  1504. .caam = {
  1505. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1506. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1507. OP_ALG_AAI_HMAC_PRECOMP,
  1508. .geniv = true,
  1509. }
  1510. },
  1511. {
  1512. .aead = {
  1513. .base = {
  1514. .cra_name = "authenc(hmac(sha256),"
  1515. "cbc(des3_ede))",
  1516. .cra_driver_name = "authenc-hmac-sha256-"
  1517. "cbc-des3_ede-caam-qi",
  1518. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1519. },
  1520. .setkey = aead_setkey,
  1521. .setauthsize = aead_setauthsize,
  1522. .encrypt = aead_encrypt,
  1523. .decrypt = aead_decrypt,
  1524. .ivsize = DES3_EDE_BLOCK_SIZE,
  1525. .maxauthsize = SHA256_DIGEST_SIZE,
  1526. },
  1527. .caam = {
  1528. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1529. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1530. OP_ALG_AAI_HMAC_PRECOMP,
  1531. },
  1532. },
  1533. {
  1534. .aead = {
  1535. .base = {
  1536. .cra_name = "echainiv(authenc(hmac(sha256),"
  1537. "cbc(des3_ede)))",
  1538. .cra_driver_name = "echainiv-authenc-"
  1539. "hmac-sha256-"
  1540. "cbc-des3_ede-caam-qi",
  1541. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1542. },
  1543. .setkey = aead_setkey,
  1544. .setauthsize = aead_setauthsize,
  1545. .encrypt = aead_encrypt,
  1546. .decrypt = aead_decrypt,
  1547. .ivsize = DES3_EDE_BLOCK_SIZE,
  1548. .maxauthsize = SHA256_DIGEST_SIZE,
  1549. },
  1550. .caam = {
  1551. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1552. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1553. OP_ALG_AAI_HMAC_PRECOMP,
  1554. .geniv = true,
  1555. }
  1556. },
  1557. {
  1558. .aead = {
  1559. .base = {
  1560. .cra_name = "authenc(hmac(sha384),"
  1561. "cbc(des3_ede))",
  1562. .cra_driver_name = "authenc-hmac-sha384-"
  1563. "cbc-des3_ede-caam-qi",
  1564. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1565. },
  1566. .setkey = aead_setkey,
  1567. .setauthsize = aead_setauthsize,
  1568. .encrypt = aead_encrypt,
  1569. .decrypt = aead_decrypt,
  1570. .ivsize = DES3_EDE_BLOCK_SIZE,
  1571. .maxauthsize = SHA384_DIGEST_SIZE,
  1572. },
  1573. .caam = {
  1574. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1575. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1576. OP_ALG_AAI_HMAC_PRECOMP,
  1577. },
  1578. },
  1579. {
  1580. .aead = {
  1581. .base = {
  1582. .cra_name = "echainiv(authenc(hmac(sha384),"
  1583. "cbc(des3_ede)))",
  1584. .cra_driver_name = "echainiv-authenc-"
  1585. "hmac-sha384-"
  1586. "cbc-des3_ede-caam-qi",
  1587. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1588. },
  1589. .setkey = aead_setkey,
  1590. .setauthsize = aead_setauthsize,
  1591. .encrypt = aead_encrypt,
  1592. .decrypt = aead_decrypt,
  1593. .ivsize = DES3_EDE_BLOCK_SIZE,
  1594. .maxauthsize = SHA384_DIGEST_SIZE,
  1595. },
  1596. .caam = {
  1597. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1598. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1599. OP_ALG_AAI_HMAC_PRECOMP,
  1600. .geniv = true,
  1601. }
  1602. },
  1603. {
  1604. .aead = {
  1605. .base = {
  1606. .cra_name = "authenc(hmac(sha512),"
  1607. "cbc(des3_ede))",
  1608. .cra_driver_name = "authenc-hmac-sha512-"
  1609. "cbc-des3_ede-caam-qi",
  1610. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1611. },
  1612. .setkey = aead_setkey,
  1613. .setauthsize = aead_setauthsize,
  1614. .encrypt = aead_encrypt,
  1615. .decrypt = aead_decrypt,
  1616. .ivsize = DES3_EDE_BLOCK_SIZE,
  1617. .maxauthsize = SHA512_DIGEST_SIZE,
  1618. },
  1619. .caam = {
  1620. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1621. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1622. OP_ALG_AAI_HMAC_PRECOMP,
  1623. },
  1624. },
  1625. {
  1626. .aead = {
  1627. .base = {
  1628. .cra_name = "echainiv(authenc(hmac(sha512),"
  1629. "cbc(des3_ede)))",
  1630. .cra_driver_name = "echainiv-authenc-"
  1631. "hmac-sha512-"
  1632. "cbc-des3_ede-caam-qi",
  1633. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1634. },
  1635. .setkey = aead_setkey,
  1636. .setauthsize = aead_setauthsize,
  1637. .encrypt = aead_encrypt,
  1638. .decrypt = aead_decrypt,
  1639. .ivsize = DES3_EDE_BLOCK_SIZE,
  1640. .maxauthsize = SHA512_DIGEST_SIZE,
  1641. },
  1642. .caam = {
  1643. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1644. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1645. OP_ALG_AAI_HMAC_PRECOMP,
  1646. .geniv = true,
  1647. }
  1648. },
  1649. {
  1650. .aead = {
  1651. .base = {
  1652. .cra_name = "authenc(hmac(md5),cbc(des))",
  1653. .cra_driver_name = "authenc-hmac-md5-"
  1654. "cbc-des-caam-qi",
  1655. .cra_blocksize = DES_BLOCK_SIZE,
  1656. },
  1657. .setkey = aead_setkey,
  1658. .setauthsize = aead_setauthsize,
  1659. .encrypt = aead_encrypt,
  1660. .decrypt = aead_decrypt,
  1661. .ivsize = DES_BLOCK_SIZE,
  1662. .maxauthsize = MD5_DIGEST_SIZE,
  1663. },
  1664. .caam = {
  1665. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1666. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1667. OP_ALG_AAI_HMAC_PRECOMP,
  1668. },
  1669. },
  1670. {
  1671. .aead = {
  1672. .base = {
  1673. .cra_name = "echainiv(authenc(hmac(md5),"
  1674. "cbc(des)))",
  1675. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1676. "cbc-des-caam-qi",
  1677. .cra_blocksize = DES_BLOCK_SIZE,
  1678. },
  1679. .setkey = aead_setkey,
  1680. .setauthsize = aead_setauthsize,
  1681. .encrypt = aead_encrypt,
  1682. .decrypt = aead_decrypt,
  1683. .ivsize = DES_BLOCK_SIZE,
  1684. .maxauthsize = MD5_DIGEST_SIZE,
  1685. },
  1686. .caam = {
  1687. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1688. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1689. OP_ALG_AAI_HMAC_PRECOMP,
  1690. .geniv = true,
  1691. }
  1692. },
  1693. {
  1694. .aead = {
  1695. .base = {
  1696. .cra_name = "authenc(hmac(sha1),cbc(des))",
  1697. .cra_driver_name = "authenc-hmac-sha1-"
  1698. "cbc-des-caam-qi",
  1699. .cra_blocksize = DES_BLOCK_SIZE,
  1700. },
  1701. .setkey = aead_setkey,
  1702. .setauthsize = aead_setauthsize,
  1703. .encrypt = aead_encrypt,
  1704. .decrypt = aead_decrypt,
  1705. .ivsize = DES_BLOCK_SIZE,
  1706. .maxauthsize = SHA1_DIGEST_SIZE,
  1707. },
  1708. .caam = {
  1709. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1710. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1711. OP_ALG_AAI_HMAC_PRECOMP,
  1712. },
  1713. },
  1714. {
  1715. .aead = {
  1716. .base = {
  1717. .cra_name = "echainiv(authenc(hmac(sha1),"
  1718. "cbc(des)))",
  1719. .cra_driver_name = "echainiv-authenc-"
  1720. "hmac-sha1-cbc-des-caam-qi",
  1721. .cra_blocksize = DES_BLOCK_SIZE,
  1722. },
  1723. .setkey = aead_setkey,
  1724. .setauthsize = aead_setauthsize,
  1725. .encrypt = aead_encrypt,
  1726. .decrypt = aead_decrypt,
  1727. .ivsize = DES_BLOCK_SIZE,
  1728. .maxauthsize = SHA1_DIGEST_SIZE,
  1729. },
  1730. .caam = {
  1731. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1732. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1733. OP_ALG_AAI_HMAC_PRECOMP,
  1734. .geniv = true,
  1735. }
  1736. },
  1737. {
  1738. .aead = {
  1739. .base = {
  1740. .cra_name = "authenc(hmac(sha224),cbc(des))",
  1741. .cra_driver_name = "authenc-hmac-sha224-"
  1742. "cbc-des-caam-qi",
  1743. .cra_blocksize = DES_BLOCK_SIZE,
  1744. },
  1745. .setkey = aead_setkey,
  1746. .setauthsize = aead_setauthsize,
  1747. .encrypt = aead_encrypt,
  1748. .decrypt = aead_decrypt,
  1749. .ivsize = DES_BLOCK_SIZE,
  1750. .maxauthsize = SHA224_DIGEST_SIZE,
  1751. },
  1752. .caam = {
  1753. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1754. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1755. OP_ALG_AAI_HMAC_PRECOMP,
  1756. },
  1757. },
  1758. {
  1759. .aead = {
  1760. .base = {
  1761. .cra_name = "echainiv(authenc(hmac(sha224),"
  1762. "cbc(des)))",
  1763. .cra_driver_name = "echainiv-authenc-"
  1764. "hmac-sha224-cbc-des-"
  1765. "caam-qi",
  1766. .cra_blocksize = DES_BLOCK_SIZE,
  1767. },
  1768. .setkey = aead_setkey,
  1769. .setauthsize = aead_setauthsize,
  1770. .encrypt = aead_encrypt,
  1771. .decrypt = aead_decrypt,
  1772. .ivsize = DES_BLOCK_SIZE,
  1773. .maxauthsize = SHA224_DIGEST_SIZE,
  1774. },
  1775. .caam = {
  1776. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1777. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1778. OP_ALG_AAI_HMAC_PRECOMP,
  1779. .geniv = true,
  1780. }
  1781. },
  1782. {
  1783. .aead = {
  1784. .base = {
  1785. .cra_name = "authenc(hmac(sha256),cbc(des))",
  1786. .cra_driver_name = "authenc-hmac-sha256-"
  1787. "cbc-des-caam-qi",
  1788. .cra_blocksize = DES_BLOCK_SIZE,
  1789. },
  1790. .setkey = aead_setkey,
  1791. .setauthsize = aead_setauthsize,
  1792. .encrypt = aead_encrypt,
  1793. .decrypt = aead_decrypt,
  1794. .ivsize = DES_BLOCK_SIZE,
  1795. .maxauthsize = SHA256_DIGEST_SIZE,
  1796. },
  1797. .caam = {
  1798. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1799. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1800. OP_ALG_AAI_HMAC_PRECOMP,
  1801. },
  1802. },
  1803. {
  1804. .aead = {
  1805. .base = {
  1806. .cra_name = "echainiv(authenc(hmac(sha256),"
  1807. "cbc(des)))",
  1808. .cra_driver_name = "echainiv-authenc-"
  1809. "hmac-sha256-cbc-desi-"
  1810. "caam-qi",
  1811. .cra_blocksize = DES_BLOCK_SIZE,
  1812. },
  1813. .setkey = aead_setkey,
  1814. .setauthsize = aead_setauthsize,
  1815. .encrypt = aead_encrypt,
  1816. .decrypt = aead_decrypt,
  1817. .ivsize = DES_BLOCK_SIZE,
  1818. .maxauthsize = SHA256_DIGEST_SIZE,
  1819. },
  1820. .caam = {
  1821. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1822. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1823. OP_ALG_AAI_HMAC_PRECOMP,
  1824. .geniv = true,
  1825. },
  1826. },
  1827. {
  1828. .aead = {
  1829. .base = {
  1830. .cra_name = "authenc(hmac(sha384),cbc(des))",
  1831. .cra_driver_name = "authenc-hmac-sha384-"
  1832. "cbc-des-caam-qi",
  1833. .cra_blocksize = DES_BLOCK_SIZE,
  1834. },
  1835. .setkey = aead_setkey,
  1836. .setauthsize = aead_setauthsize,
  1837. .encrypt = aead_encrypt,
  1838. .decrypt = aead_decrypt,
  1839. .ivsize = DES_BLOCK_SIZE,
  1840. .maxauthsize = SHA384_DIGEST_SIZE,
  1841. },
  1842. .caam = {
  1843. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1844. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1845. OP_ALG_AAI_HMAC_PRECOMP,
  1846. },
  1847. },
  1848. {
  1849. .aead = {
  1850. .base = {
  1851. .cra_name = "echainiv(authenc(hmac(sha384),"
  1852. "cbc(des)))",
  1853. .cra_driver_name = "echainiv-authenc-"
  1854. "hmac-sha384-cbc-des-"
  1855. "caam-qi",
  1856. .cra_blocksize = DES_BLOCK_SIZE,
  1857. },
  1858. .setkey = aead_setkey,
  1859. .setauthsize = aead_setauthsize,
  1860. .encrypt = aead_encrypt,
  1861. .decrypt = aead_decrypt,
  1862. .ivsize = DES_BLOCK_SIZE,
  1863. .maxauthsize = SHA384_DIGEST_SIZE,
  1864. },
  1865. .caam = {
  1866. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1867. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1868. OP_ALG_AAI_HMAC_PRECOMP,
  1869. .geniv = true,
  1870. }
  1871. },
  1872. {
  1873. .aead = {
  1874. .base = {
  1875. .cra_name = "authenc(hmac(sha512),cbc(des))",
  1876. .cra_driver_name = "authenc-hmac-sha512-"
  1877. "cbc-des-caam-qi",
  1878. .cra_blocksize = DES_BLOCK_SIZE,
  1879. },
  1880. .setkey = aead_setkey,
  1881. .setauthsize = aead_setauthsize,
  1882. .encrypt = aead_encrypt,
  1883. .decrypt = aead_decrypt,
  1884. .ivsize = DES_BLOCK_SIZE,
  1885. .maxauthsize = SHA512_DIGEST_SIZE,
  1886. },
  1887. .caam = {
  1888. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1889. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1890. OP_ALG_AAI_HMAC_PRECOMP,
  1891. }
  1892. },
  1893. {
  1894. .aead = {
  1895. .base = {
  1896. .cra_name = "echainiv(authenc(hmac(sha512),"
  1897. "cbc(des)))",
  1898. .cra_driver_name = "echainiv-authenc-"
  1899. "hmac-sha512-cbc-des-"
  1900. "caam-qi",
  1901. .cra_blocksize = DES_BLOCK_SIZE,
  1902. },
  1903. .setkey = aead_setkey,
  1904. .setauthsize = aead_setauthsize,
  1905. .encrypt = aead_encrypt,
  1906. .decrypt = aead_decrypt,
  1907. .ivsize = DES_BLOCK_SIZE,
  1908. .maxauthsize = SHA512_DIGEST_SIZE,
  1909. },
  1910. .caam = {
  1911. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1912. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1913. OP_ALG_AAI_HMAC_PRECOMP,
  1914. .geniv = true,
  1915. }
  1916. },
  1917. };
  1918. struct caam_crypto_alg {
  1919. struct list_head entry;
  1920. struct crypto_alg crypto_alg;
  1921. struct caam_alg_entry caam;
  1922. };
  1923. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  1924. {
  1925. struct caam_drv_private *priv;
  1926. /*
  1927. * distribute tfms across job rings to ensure in-order
  1928. * crypto request processing per tfm
  1929. */
  1930. ctx->jrdev = caam_jr_alloc();
  1931. if (IS_ERR(ctx->jrdev)) {
  1932. pr_err("Job Ring Device allocation for transform failed\n");
  1933. return PTR_ERR(ctx->jrdev);
  1934. }
  1935. ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
  1936. DMA_TO_DEVICE);
  1937. if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
  1938. dev_err(ctx->jrdev, "unable to map key\n");
  1939. caam_jr_free(ctx->jrdev);
  1940. return -ENOMEM;
  1941. }
  1942. /* copy descriptor header template value */
  1943. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  1944. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  1945. priv = dev_get_drvdata(ctx->jrdev->parent);
  1946. ctx->qidev = priv->qidev;
  1947. spin_lock_init(&ctx->lock);
  1948. ctx->drv_ctx[ENCRYPT] = NULL;
  1949. ctx->drv_ctx[DECRYPT] = NULL;
  1950. ctx->drv_ctx[GIVENCRYPT] = NULL;
  1951. return 0;
  1952. }
  1953. static int caam_cra_init(struct crypto_tfm *tfm)
  1954. {
  1955. struct crypto_alg *alg = tfm->__crt_alg;
  1956. struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  1957. crypto_alg);
  1958. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  1959. return caam_init_common(ctx, &caam_alg->caam);
  1960. }
  1961. static int caam_aead_init(struct crypto_aead *tfm)
  1962. {
  1963. struct aead_alg *alg = crypto_aead_alg(tfm);
  1964. struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  1965. aead);
  1966. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  1967. return caam_init_common(ctx, &caam_alg->caam);
  1968. }
  1969. static void caam_exit_common(struct caam_ctx *ctx)
  1970. {
  1971. caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
  1972. caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
  1973. caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
  1974. dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
  1975. DMA_TO_DEVICE);
  1976. caam_jr_free(ctx->jrdev);
  1977. }
  1978. static void caam_cra_exit(struct crypto_tfm *tfm)
  1979. {
  1980. caam_exit_common(crypto_tfm_ctx(tfm));
  1981. }
  1982. static void caam_aead_exit(struct crypto_aead *tfm)
  1983. {
  1984. caam_exit_common(crypto_aead_ctx(tfm));
  1985. }
  1986. static struct list_head alg_list;
  1987. static void __exit caam_qi_algapi_exit(void)
  1988. {
  1989. struct caam_crypto_alg *t_alg, *n;
  1990. int i;
  1991. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  1992. struct caam_aead_alg *t_alg = driver_aeads + i;
  1993. if (t_alg->registered)
  1994. crypto_unregister_aead(&t_alg->aead);
  1995. }
  1996. if (!alg_list.next)
  1997. return;
  1998. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  1999. crypto_unregister_alg(&t_alg->crypto_alg);
  2000. list_del(&t_alg->entry);
  2001. kfree(t_alg);
  2002. }
  2003. }
  2004. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  2005. *template)
  2006. {
  2007. struct caam_crypto_alg *t_alg;
  2008. struct crypto_alg *alg;
  2009. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  2010. if (!t_alg)
  2011. return ERR_PTR(-ENOMEM);
  2012. alg = &t_alg->crypto_alg;
  2013. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  2014. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  2015. template->driver_name);
  2016. alg->cra_module = THIS_MODULE;
  2017. alg->cra_init = caam_cra_init;
  2018. alg->cra_exit = caam_cra_exit;
  2019. alg->cra_priority = CAAM_CRA_PRIORITY;
  2020. alg->cra_blocksize = template->blocksize;
  2021. alg->cra_alignmask = 0;
  2022. alg->cra_ctxsize = sizeof(struct caam_ctx);
  2023. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  2024. template->type;
  2025. switch (template->type) {
  2026. case CRYPTO_ALG_TYPE_GIVCIPHER:
  2027. alg->cra_type = &crypto_givcipher_type;
  2028. alg->cra_ablkcipher = template->template_ablkcipher;
  2029. break;
  2030. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  2031. alg->cra_type = &crypto_ablkcipher_type;
  2032. alg->cra_ablkcipher = template->template_ablkcipher;
  2033. break;
  2034. }
  2035. t_alg->caam.class1_alg_type = template->class1_alg_type;
  2036. t_alg->caam.class2_alg_type = template->class2_alg_type;
  2037. return t_alg;
  2038. }
  2039. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  2040. {
  2041. struct aead_alg *alg = &t_alg->aead;
  2042. alg->base.cra_module = THIS_MODULE;
  2043. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2044. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  2045. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  2046. alg->init = caam_aead_init;
  2047. alg->exit = caam_aead_exit;
  2048. }
  2049. static int __init caam_qi_algapi_init(void)
  2050. {
  2051. struct device_node *dev_node;
  2052. struct platform_device *pdev;
  2053. struct device *ctrldev;
  2054. struct caam_drv_private *priv;
  2055. int i = 0, err = 0;
  2056. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  2057. unsigned int md_limit = SHA512_DIGEST_SIZE;
  2058. bool registered = false;
  2059. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  2060. if (!dev_node) {
  2061. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  2062. if (!dev_node)
  2063. return -ENODEV;
  2064. }
  2065. pdev = of_find_device_by_node(dev_node);
  2066. of_node_put(dev_node);
  2067. if (!pdev)
  2068. return -ENODEV;
  2069. ctrldev = &pdev->dev;
  2070. priv = dev_get_drvdata(ctrldev);
  2071. /*
  2072. * If priv is NULL, it's probably because the caam driver wasn't
  2073. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  2074. */
  2075. if (!priv || !priv->qi_present)
  2076. return -ENODEV;
  2077. INIT_LIST_HEAD(&alg_list);
  2078. /*
  2079. * Register crypto algorithms the device supports.
  2080. * First, detect presence and attributes of DES, AES, and MD blocks.
  2081. */
  2082. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  2083. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  2084. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  2085. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  2086. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  2087. /* If MD is present, limit digest size based on LP256 */
  2088. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  2089. md_limit = SHA256_DIGEST_SIZE;
  2090. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2091. struct caam_crypto_alg *t_alg;
  2092. struct caam_alg_template *alg = driver_algs + i;
  2093. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  2094. /* Skip DES algorithms if not supported by device */
  2095. if (!des_inst &&
  2096. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  2097. (alg_sel == OP_ALG_ALGSEL_DES)))
  2098. continue;
  2099. /* Skip AES algorithms if not supported by device */
  2100. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  2101. continue;
  2102. t_alg = caam_alg_alloc(alg);
  2103. if (IS_ERR(t_alg)) {
  2104. err = PTR_ERR(t_alg);
  2105. dev_warn(priv->qidev, "%s alg allocation failed\n",
  2106. alg->driver_name);
  2107. continue;
  2108. }
  2109. err = crypto_register_alg(&t_alg->crypto_alg);
  2110. if (err) {
  2111. dev_warn(priv->qidev, "%s alg registration failed\n",
  2112. t_alg->crypto_alg.cra_driver_name);
  2113. kfree(t_alg);
  2114. continue;
  2115. }
  2116. list_add_tail(&t_alg->entry, &alg_list);
  2117. registered = true;
  2118. }
  2119. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2120. struct caam_aead_alg *t_alg = driver_aeads + i;
  2121. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  2122. OP_ALG_ALGSEL_MASK;
  2123. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  2124. OP_ALG_ALGSEL_MASK;
  2125. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  2126. /* Skip DES algorithms if not supported by device */
  2127. if (!des_inst &&
  2128. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  2129. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  2130. continue;
  2131. /* Skip AES algorithms if not supported by device */
  2132. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  2133. continue;
  2134. /*
  2135. * Check support for AES algorithms not available
  2136. * on LP devices.
  2137. */
  2138. if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
  2139. (alg_aai == OP_ALG_AAI_GCM))
  2140. continue;
  2141. /*
  2142. * Skip algorithms requiring message digests
  2143. * if MD or MD size is not supported by device.
  2144. */
  2145. if (c2_alg_sel &&
  2146. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  2147. continue;
  2148. caam_aead_alg_init(t_alg);
  2149. err = crypto_register_aead(&t_alg->aead);
  2150. if (err) {
  2151. pr_warn("%s alg registration failed\n",
  2152. t_alg->aead.base.cra_driver_name);
  2153. continue;
  2154. }
  2155. t_alg->registered = true;
  2156. registered = true;
  2157. }
  2158. if (registered)
  2159. dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
  2160. return err;
  2161. }
  2162. module_init(caam_qi_algapi_init);
  2163. module_exit(caam_qi_algapi_exit);
  2164. MODULE_LICENSE("GPL");
  2165. MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
  2166. MODULE_AUTHOR("Freescale Semiconductor");