caamalg_qi.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843
  1. /*
  2. * Freescale FSL CAAM support for crypto API over QI backend.
  3. * Based on caamalg.c
  4. *
  5. * Copyright 2013-2016 Freescale Semiconductor, Inc.
  6. * Copyright 2016-2017 NXP
  7. */
  8. #include "compat.h"
  9. #include "ctrl.h"
  10. #include "regs.h"
  11. #include "intern.h"
  12. #include "desc_constr.h"
  13. #include "error.h"
  14. #include "sg_sw_qm.h"
  15. #include "key_gen.h"
  16. #include "qi.h"
  17. #include "jr.h"
  18. #include "caamalg_desc.h"
  19. /*
  20. * crypto alg
  21. */
  22. #define CAAM_CRA_PRIORITY 2000
  23. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  24. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  25. SHA512_DIGEST_SIZE * 2)
  26. #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
  27. CAAM_MAX_KEY_SIZE)
  28. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  29. struct caam_alg_entry {
  30. int class1_alg_type;
  31. int class2_alg_type;
  32. bool rfc3686;
  33. bool geniv;
  34. };
  35. struct caam_aead_alg {
  36. struct aead_alg aead;
  37. struct caam_alg_entry caam;
  38. bool registered;
  39. };
  40. /*
  41. * per-session context
  42. */
  43. struct caam_ctx {
  44. struct device *jrdev;
  45. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  46. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  47. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  48. u8 key[CAAM_MAX_KEY_SIZE];
  49. dma_addr_t key_dma;
  50. enum dma_data_direction dir;
  51. struct alginfo adata;
  52. struct alginfo cdata;
  53. unsigned int authsize;
  54. struct device *qidev;
  55. spinlock_t lock; /* Protects multiple init of driver context */
  56. struct caam_drv_ctx *drv_ctx[NUM_OP];
  57. };
  58. static int aead_set_sh_desc(struct crypto_aead *aead)
  59. {
  60. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  61. typeof(*alg), aead);
  62. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  63. unsigned int ivsize = crypto_aead_ivsize(aead);
  64. u32 ctx1_iv_off = 0;
  65. u32 *nonce = NULL;
  66. unsigned int data_len[2];
  67. u32 inl_mask;
  68. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  69. OP_ALG_AAI_CTR_MOD128);
  70. const bool is_rfc3686 = alg->caam.rfc3686;
  71. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  72. if (!ctx->cdata.keylen || !ctx->authsize)
  73. return 0;
  74. /*
  75. * AES-CTR needs to load IV in CONTEXT1 reg
  76. * at an offset of 128bits (16bytes)
  77. * CONTEXT1[255:128] = IV
  78. */
  79. if (ctr_mode)
  80. ctx1_iv_off = 16;
  81. /*
  82. * RFC3686 specific:
  83. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  84. */
  85. if (is_rfc3686) {
  86. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  87. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  88. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  89. }
  90. data_len[0] = ctx->adata.keylen_pad;
  91. data_len[1] = ctx->cdata.keylen;
  92. if (alg->caam.geniv)
  93. goto skip_enc;
  94. /* aead_encrypt shared descriptor */
  95. if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
  96. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  97. DESC_JOB_IO_LEN, data_len, &inl_mask,
  98. ARRAY_SIZE(data_len)) < 0)
  99. return -EINVAL;
  100. if (inl_mask & 1)
  101. ctx->adata.key_virt = ctx->key;
  102. else
  103. ctx->adata.key_dma = ctx->key_dma;
  104. if (inl_mask & 2)
  105. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  106. else
  107. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  108. ctx->adata.key_inline = !!(inl_mask & 1);
  109. ctx->cdata.key_inline = !!(inl_mask & 2);
  110. cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  111. ivsize, ctx->authsize, is_rfc3686, nonce,
  112. ctx1_iv_off, true, ctrlpriv->era);
  113. skip_enc:
  114. /* aead_decrypt shared descriptor */
  115. if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
  116. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  117. DESC_JOB_IO_LEN, data_len, &inl_mask,
  118. ARRAY_SIZE(data_len)) < 0)
  119. return -EINVAL;
  120. if (inl_mask & 1)
  121. ctx->adata.key_virt = ctx->key;
  122. else
  123. ctx->adata.key_dma = ctx->key_dma;
  124. if (inl_mask & 2)
  125. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  126. else
  127. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  128. ctx->adata.key_inline = !!(inl_mask & 1);
  129. ctx->cdata.key_inline = !!(inl_mask & 2);
  130. cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  131. ivsize, ctx->authsize, alg->caam.geniv,
  132. is_rfc3686, nonce, ctx1_iv_off, true,
  133. ctrlpriv->era);
  134. if (!alg->caam.geniv)
  135. goto skip_givenc;
  136. /* aead_givencrypt shared descriptor */
  137. if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
  138. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  139. DESC_JOB_IO_LEN, data_len, &inl_mask,
  140. ARRAY_SIZE(data_len)) < 0)
  141. return -EINVAL;
  142. if (inl_mask & 1)
  143. ctx->adata.key_virt = ctx->key;
  144. else
  145. ctx->adata.key_dma = ctx->key_dma;
  146. if (inl_mask & 2)
  147. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  148. else
  149. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  150. ctx->adata.key_inline = !!(inl_mask & 1);
  151. ctx->cdata.key_inline = !!(inl_mask & 2);
  152. cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  153. ivsize, ctx->authsize, is_rfc3686, nonce,
  154. ctx1_iv_off, true, ctrlpriv->era);
  155. skip_givenc:
  156. return 0;
  157. }
  158. static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  159. {
  160. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  161. ctx->authsize = authsize;
  162. aead_set_sh_desc(authenc);
  163. return 0;
  164. }
  165. static int aead_setkey(struct crypto_aead *aead, const u8 *key,
  166. unsigned int keylen)
  167. {
  168. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  169. struct device *jrdev = ctx->jrdev;
  170. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  171. struct crypto_authenc_keys keys;
  172. int ret = 0;
  173. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  174. goto badkey;
  175. #ifdef DEBUG
  176. dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
  177. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  178. keys.authkeylen);
  179. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  180. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  181. #endif
  182. /*
  183. * If DKP is supported, use it in the shared descriptor to generate
  184. * the split key.
  185. */
  186. if (ctrlpriv->era >= 6) {
  187. ctx->adata.keylen = keys.authkeylen;
  188. ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  189. OP_ALG_ALGSEL_MASK);
  190. if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  191. goto badkey;
  192. memcpy(ctx->key, keys.authkey, keys.authkeylen);
  193. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  194. keys.enckeylen);
  195. dma_sync_single_for_device(jrdev, ctx->key_dma,
  196. ctx->adata.keylen_pad +
  197. keys.enckeylen, ctx->dir);
  198. goto skip_split_key;
  199. }
  200. ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  201. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  202. keys.enckeylen);
  203. if (ret)
  204. goto badkey;
  205. /* postpend encryption key to auth split key */
  206. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  207. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  208. keys.enckeylen, ctx->dir);
  209. #ifdef DEBUG
  210. print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  211. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  212. ctx->adata.keylen_pad + keys.enckeylen, 1);
  213. #endif
  214. skip_split_key:
  215. ctx->cdata.keylen = keys.enckeylen;
  216. ret = aead_set_sh_desc(aead);
  217. if (ret)
  218. goto badkey;
  219. /* Now update the driver contexts with the new shared descriptor */
  220. if (ctx->drv_ctx[ENCRYPT]) {
  221. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  222. ctx->sh_desc_enc);
  223. if (ret) {
  224. dev_err(jrdev, "driver enc context update failed\n");
  225. goto badkey;
  226. }
  227. }
  228. if (ctx->drv_ctx[DECRYPT]) {
  229. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  230. ctx->sh_desc_dec);
  231. if (ret) {
  232. dev_err(jrdev, "driver dec context update failed\n");
  233. goto badkey;
  234. }
  235. }
  236. memzero_explicit(&keys, sizeof(keys));
  237. return ret;
  238. badkey:
  239. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  240. memzero_explicit(&keys, sizeof(keys));
  241. return -EINVAL;
  242. }
  243. static int gcm_set_sh_desc(struct crypto_aead *aead)
  244. {
  245. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  246. unsigned int ivsize = crypto_aead_ivsize(aead);
  247. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  248. ctx->cdata.keylen;
  249. if (!ctx->cdata.keylen || !ctx->authsize)
  250. return 0;
  251. /*
  252. * Job Descriptor and Shared Descriptor
  253. * must fit into the 64-word Descriptor h/w Buffer
  254. */
  255. if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
  256. ctx->cdata.key_inline = true;
  257. ctx->cdata.key_virt = ctx->key;
  258. } else {
  259. ctx->cdata.key_inline = false;
  260. ctx->cdata.key_dma = ctx->key_dma;
  261. }
  262. cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  263. ctx->authsize, true);
  264. /*
  265. * Job Descriptor and Shared Descriptor
  266. * must fit into the 64-word Descriptor h/w Buffer
  267. */
  268. if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
  269. ctx->cdata.key_inline = true;
  270. ctx->cdata.key_virt = ctx->key;
  271. } else {
  272. ctx->cdata.key_inline = false;
  273. ctx->cdata.key_dma = ctx->key_dma;
  274. }
  275. cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  276. ctx->authsize, true);
  277. return 0;
  278. }
  279. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  280. {
  281. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  282. ctx->authsize = authsize;
  283. gcm_set_sh_desc(authenc);
  284. return 0;
  285. }
  286. static int gcm_setkey(struct crypto_aead *aead,
  287. const u8 *key, unsigned int keylen)
  288. {
  289. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  290. struct device *jrdev = ctx->jrdev;
  291. int ret;
  292. #ifdef DEBUG
  293. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  294. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  295. #endif
  296. memcpy(ctx->key, key, keylen);
  297. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
  298. ctx->cdata.keylen = keylen;
  299. ret = gcm_set_sh_desc(aead);
  300. if (ret)
  301. return ret;
  302. /* Now update the driver contexts with the new shared descriptor */
  303. if (ctx->drv_ctx[ENCRYPT]) {
  304. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  305. ctx->sh_desc_enc);
  306. if (ret) {
  307. dev_err(jrdev, "driver enc context update failed\n");
  308. return ret;
  309. }
  310. }
  311. if (ctx->drv_ctx[DECRYPT]) {
  312. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  313. ctx->sh_desc_dec);
  314. if (ret) {
  315. dev_err(jrdev, "driver dec context update failed\n");
  316. return ret;
  317. }
  318. }
  319. return 0;
  320. }
  321. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  322. {
  323. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  324. unsigned int ivsize = crypto_aead_ivsize(aead);
  325. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  326. ctx->cdata.keylen;
  327. if (!ctx->cdata.keylen || !ctx->authsize)
  328. return 0;
  329. ctx->cdata.key_virt = ctx->key;
  330. /*
  331. * Job Descriptor and Shared Descriptor
  332. * must fit into the 64-word Descriptor h/w Buffer
  333. */
  334. if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
  335. ctx->cdata.key_inline = true;
  336. } else {
  337. ctx->cdata.key_inline = false;
  338. ctx->cdata.key_dma = ctx->key_dma;
  339. }
  340. cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  341. ctx->authsize, true);
  342. /*
  343. * Job Descriptor and Shared Descriptor
  344. * must fit into the 64-word Descriptor h/w Buffer
  345. */
  346. if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
  347. ctx->cdata.key_inline = true;
  348. } else {
  349. ctx->cdata.key_inline = false;
  350. ctx->cdata.key_dma = ctx->key_dma;
  351. }
  352. cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  353. ctx->authsize, true);
  354. return 0;
  355. }
  356. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  357. unsigned int authsize)
  358. {
  359. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  360. ctx->authsize = authsize;
  361. rfc4106_set_sh_desc(authenc);
  362. return 0;
  363. }
  364. static int rfc4106_setkey(struct crypto_aead *aead,
  365. const u8 *key, unsigned int keylen)
  366. {
  367. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  368. struct device *jrdev = ctx->jrdev;
  369. int ret;
  370. if (keylen < 4)
  371. return -EINVAL;
  372. #ifdef DEBUG
  373. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  374. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  375. #endif
  376. memcpy(ctx->key, key, keylen);
  377. /*
  378. * The last four bytes of the key material are used as the salt value
  379. * in the nonce. Update the AES key length.
  380. */
  381. ctx->cdata.keylen = keylen - 4;
  382. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  383. ctx->dir);
  384. ret = rfc4106_set_sh_desc(aead);
  385. if (ret)
  386. return ret;
  387. /* Now update the driver contexts with the new shared descriptor */
  388. if (ctx->drv_ctx[ENCRYPT]) {
  389. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  390. ctx->sh_desc_enc);
  391. if (ret) {
  392. dev_err(jrdev, "driver enc context update failed\n");
  393. return ret;
  394. }
  395. }
  396. if (ctx->drv_ctx[DECRYPT]) {
  397. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  398. ctx->sh_desc_dec);
  399. if (ret) {
  400. dev_err(jrdev, "driver dec context update failed\n");
  401. return ret;
  402. }
  403. }
  404. return 0;
  405. }
  406. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  407. {
  408. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  409. unsigned int ivsize = crypto_aead_ivsize(aead);
  410. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  411. ctx->cdata.keylen;
  412. if (!ctx->cdata.keylen || !ctx->authsize)
  413. return 0;
  414. ctx->cdata.key_virt = ctx->key;
  415. /*
  416. * Job Descriptor and Shared Descriptor
  417. * must fit into the 64-word Descriptor h/w Buffer
  418. */
  419. if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
  420. ctx->cdata.key_inline = true;
  421. } else {
  422. ctx->cdata.key_inline = false;
  423. ctx->cdata.key_dma = ctx->key_dma;
  424. }
  425. cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  426. ctx->authsize, true);
  427. /*
  428. * Job Descriptor and Shared Descriptor
  429. * must fit into the 64-word Descriptor h/w Buffer
  430. */
  431. if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
  432. ctx->cdata.key_inline = true;
  433. } else {
  434. ctx->cdata.key_inline = false;
  435. ctx->cdata.key_dma = ctx->key_dma;
  436. }
  437. cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  438. ctx->authsize, true);
  439. return 0;
  440. }
  441. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  442. unsigned int authsize)
  443. {
  444. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  445. ctx->authsize = authsize;
  446. rfc4543_set_sh_desc(authenc);
  447. return 0;
  448. }
  449. static int rfc4543_setkey(struct crypto_aead *aead,
  450. const u8 *key, unsigned int keylen)
  451. {
  452. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  453. struct device *jrdev = ctx->jrdev;
  454. int ret;
  455. if (keylen < 4)
  456. return -EINVAL;
  457. #ifdef DEBUG
  458. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  459. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  460. #endif
  461. memcpy(ctx->key, key, keylen);
  462. /*
  463. * The last four bytes of the key material are used as the salt value
  464. * in the nonce. Update the AES key length.
  465. */
  466. ctx->cdata.keylen = keylen - 4;
  467. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  468. ctx->dir);
  469. ret = rfc4543_set_sh_desc(aead);
  470. if (ret)
  471. return ret;
  472. /* Now update the driver contexts with the new shared descriptor */
  473. if (ctx->drv_ctx[ENCRYPT]) {
  474. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  475. ctx->sh_desc_enc);
  476. if (ret) {
  477. dev_err(jrdev, "driver enc context update failed\n");
  478. return ret;
  479. }
  480. }
  481. if (ctx->drv_ctx[DECRYPT]) {
  482. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  483. ctx->sh_desc_dec);
  484. if (ret) {
  485. dev_err(jrdev, "driver dec context update failed\n");
  486. return ret;
  487. }
  488. }
  489. return 0;
  490. }
  491. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  492. const u8 *key, unsigned int keylen)
  493. {
  494. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  495. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  496. const char *alg_name = crypto_tfm_alg_name(tfm);
  497. struct device *jrdev = ctx->jrdev;
  498. unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  499. u32 ctx1_iv_off = 0;
  500. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  501. OP_ALG_AAI_CTR_MOD128);
  502. const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
  503. int ret = 0;
  504. #ifdef DEBUG
  505. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  506. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  507. #endif
  508. /*
  509. * AES-CTR needs to load IV in CONTEXT1 reg
  510. * at an offset of 128bits (16bytes)
  511. * CONTEXT1[255:128] = IV
  512. */
  513. if (ctr_mode)
  514. ctx1_iv_off = 16;
  515. /*
  516. * RFC3686 specific:
  517. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  518. * | *key = {KEY, NONCE}
  519. */
  520. if (is_rfc3686) {
  521. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  522. keylen -= CTR_RFC3686_NONCE_SIZE;
  523. }
  524. ctx->cdata.keylen = keylen;
  525. ctx->cdata.key_virt = key;
  526. ctx->cdata.key_inline = true;
  527. /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
  528. cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  529. is_rfc3686, ctx1_iv_off);
  530. cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  531. is_rfc3686, ctx1_iv_off);
  532. cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
  533. ivsize, is_rfc3686, ctx1_iv_off);
  534. /* Now update the driver contexts with the new shared descriptor */
  535. if (ctx->drv_ctx[ENCRYPT]) {
  536. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  537. ctx->sh_desc_enc);
  538. if (ret) {
  539. dev_err(jrdev, "driver enc context update failed\n");
  540. goto badkey;
  541. }
  542. }
  543. if (ctx->drv_ctx[DECRYPT]) {
  544. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  545. ctx->sh_desc_dec);
  546. if (ret) {
  547. dev_err(jrdev, "driver dec context update failed\n");
  548. goto badkey;
  549. }
  550. }
  551. if (ctx->drv_ctx[GIVENCRYPT]) {
  552. ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
  553. ctx->sh_desc_givenc);
  554. if (ret) {
  555. dev_err(jrdev, "driver givenc context update failed\n");
  556. goto badkey;
  557. }
  558. }
  559. return ret;
  560. badkey:
  561. crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  562. return -EINVAL;
  563. }
  564. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  565. const u8 *key, unsigned int keylen)
  566. {
  567. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  568. struct device *jrdev = ctx->jrdev;
  569. int ret = 0;
  570. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  571. crypto_ablkcipher_set_flags(ablkcipher,
  572. CRYPTO_TFM_RES_BAD_KEY_LEN);
  573. dev_err(jrdev, "key size mismatch\n");
  574. return -EINVAL;
  575. }
  576. ctx->cdata.keylen = keylen;
  577. ctx->cdata.key_virt = key;
  578. ctx->cdata.key_inline = true;
  579. /* xts ablkcipher encrypt, decrypt shared descriptors */
  580. cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
  581. cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
  582. /* Now update the driver contexts with the new shared descriptor */
  583. if (ctx->drv_ctx[ENCRYPT]) {
  584. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  585. ctx->sh_desc_enc);
  586. if (ret) {
  587. dev_err(jrdev, "driver enc context update failed\n");
  588. goto badkey;
  589. }
  590. }
  591. if (ctx->drv_ctx[DECRYPT]) {
  592. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  593. ctx->sh_desc_dec);
  594. if (ret) {
  595. dev_err(jrdev, "driver dec context update failed\n");
  596. goto badkey;
  597. }
  598. }
  599. return ret;
  600. badkey:
  601. crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  602. return 0;
  603. }
  604. /*
  605. * aead_edesc - s/w-extended aead descriptor
  606. * @src_nents: number of segments in input scatterlist
  607. * @dst_nents: number of segments in output scatterlist
  608. * @iv_dma: dma address of iv for checking continuity and link table
  609. * @qm_sg_bytes: length of dma mapped h/w link table
  610. * @qm_sg_dma: bus physical mapped address of h/w link table
  611. * @assoclen: associated data length, in CAAM endianness
  612. * @assoclen_dma: bus physical mapped address of req->assoclen
  613. * @drv_req: driver-specific request structure
  614. * @sgt: the h/w link table
  615. */
  616. struct aead_edesc {
  617. int src_nents;
  618. int dst_nents;
  619. dma_addr_t iv_dma;
  620. int qm_sg_bytes;
  621. dma_addr_t qm_sg_dma;
  622. unsigned int assoclen;
  623. dma_addr_t assoclen_dma;
  624. struct caam_drv_req drv_req;
  625. #define CAAM_QI_MAX_AEAD_SG \
  626. ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
  627. sizeof(struct qm_sg_entry))
  628. struct qm_sg_entry sgt[0];
  629. };
  630. /*
  631. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  632. * @src_nents: number of segments in input scatterlist
  633. * @dst_nents: number of segments in output scatterlist
  634. * @iv_dma: dma address of iv for checking continuity and link table
  635. * @qm_sg_bytes: length of dma mapped h/w link table
  636. * @qm_sg_dma: bus physical mapped address of h/w link table
  637. * @drv_req: driver-specific request structure
  638. * @sgt: the h/w link table
  639. */
  640. struct ablkcipher_edesc {
  641. int src_nents;
  642. int dst_nents;
  643. dma_addr_t iv_dma;
  644. int qm_sg_bytes;
  645. dma_addr_t qm_sg_dma;
  646. struct caam_drv_req drv_req;
  647. #define CAAM_QI_MAX_ABLKCIPHER_SG \
  648. ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
  649. sizeof(struct qm_sg_entry))
  650. struct qm_sg_entry sgt[0];
  651. };
  652. static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
  653. enum optype type)
  654. {
  655. /*
  656. * This function is called on the fast path with values of 'type'
  657. * known at compile time. Invalid arguments are not expected and
  658. * thus no checks are made.
  659. */
  660. struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
  661. u32 *desc;
  662. if (unlikely(!drv_ctx)) {
  663. spin_lock(&ctx->lock);
  664. /* Read again to check if some other core init drv_ctx */
  665. drv_ctx = ctx->drv_ctx[type];
  666. if (!drv_ctx) {
  667. int cpu;
  668. if (type == ENCRYPT)
  669. desc = ctx->sh_desc_enc;
  670. else if (type == DECRYPT)
  671. desc = ctx->sh_desc_dec;
  672. else /* (type == GIVENCRYPT) */
  673. desc = ctx->sh_desc_givenc;
  674. cpu = smp_processor_id();
  675. drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
  676. if (likely(!IS_ERR_OR_NULL(drv_ctx)))
  677. drv_ctx->op_type = type;
  678. ctx->drv_ctx[type] = drv_ctx;
  679. }
  680. spin_unlock(&ctx->lock);
  681. }
  682. return drv_ctx;
  683. }
  684. static void caam_unmap(struct device *dev, struct scatterlist *src,
  685. struct scatterlist *dst, int src_nents,
  686. int dst_nents, dma_addr_t iv_dma, int ivsize,
  687. enum optype op_type, dma_addr_t qm_sg_dma,
  688. int qm_sg_bytes)
  689. {
  690. if (dst != src) {
  691. if (src_nents)
  692. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  693. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  694. } else {
  695. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  696. }
  697. if (iv_dma)
  698. dma_unmap_single(dev, iv_dma, ivsize,
  699. op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
  700. DMA_TO_DEVICE);
  701. if (qm_sg_bytes)
  702. dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
  703. }
  704. static void aead_unmap(struct device *dev,
  705. struct aead_edesc *edesc,
  706. struct aead_request *req)
  707. {
  708. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  709. int ivsize = crypto_aead_ivsize(aead);
  710. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  711. edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
  712. edesc->qm_sg_dma, edesc->qm_sg_bytes);
  713. dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  714. }
  715. static void ablkcipher_unmap(struct device *dev,
  716. struct ablkcipher_edesc *edesc,
  717. struct ablkcipher_request *req)
  718. {
  719. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  720. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  721. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  722. edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
  723. edesc->qm_sg_dma, edesc->qm_sg_bytes);
  724. }
  725. static void aead_done(struct caam_drv_req *drv_req, u32 status)
  726. {
  727. struct device *qidev;
  728. struct aead_edesc *edesc;
  729. struct aead_request *aead_req = drv_req->app_ctx;
  730. struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
  731. struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
  732. int ecode = 0;
  733. qidev = caam_ctx->qidev;
  734. if (unlikely(status)) {
  735. u32 ssrc = status & JRSTA_SSRC_MASK;
  736. u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
  737. caam_jr_strstatus(qidev, status);
  738. /*
  739. * verify hw auth check passed else return -EBADMSG
  740. */
  741. if (ssrc == JRSTA_SSRC_CCB_ERROR &&
  742. err_id == JRSTA_CCBERR_ERRID_ICVCHK)
  743. ecode = -EBADMSG;
  744. else
  745. ecode = -EIO;
  746. }
  747. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  748. aead_unmap(qidev, edesc, aead_req);
  749. aead_request_complete(aead_req, ecode);
  750. qi_cache_free(edesc);
  751. }
  752. /*
  753. * allocate and map the aead extended descriptor
  754. */
  755. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  756. bool encrypt)
  757. {
  758. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  759. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  760. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  761. typeof(*alg), aead);
  762. struct device *qidev = ctx->qidev;
  763. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  764. GFP_KERNEL : GFP_ATOMIC;
  765. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  766. struct aead_edesc *edesc;
  767. dma_addr_t qm_sg_dma, iv_dma = 0;
  768. int ivsize = 0;
  769. unsigned int authsize = ctx->authsize;
  770. int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
  771. int in_len, out_len;
  772. struct qm_sg_entry *sg_table, *fd_sgt;
  773. struct caam_drv_ctx *drv_ctx;
  774. enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  775. drv_ctx = get_drv_ctx(ctx, op_type);
  776. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  777. return (struct aead_edesc *)drv_ctx;
  778. /* allocate space for base edesc and hw desc commands, link tables */
  779. edesc = qi_cache_alloc(GFP_DMA | flags);
  780. if (unlikely(!edesc)) {
  781. dev_err(qidev, "could not allocate extended descriptor\n");
  782. return ERR_PTR(-ENOMEM);
  783. }
  784. if (likely(req->src == req->dst)) {
  785. src_nents = sg_nents_for_len(req->src, req->assoclen +
  786. req->cryptlen +
  787. (encrypt ? authsize : 0));
  788. if (unlikely(src_nents < 0)) {
  789. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  790. req->assoclen + req->cryptlen +
  791. (encrypt ? authsize : 0));
  792. qi_cache_free(edesc);
  793. return ERR_PTR(src_nents);
  794. }
  795. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  796. DMA_BIDIRECTIONAL);
  797. if (unlikely(!mapped_src_nents)) {
  798. dev_err(qidev, "unable to map source\n");
  799. qi_cache_free(edesc);
  800. return ERR_PTR(-ENOMEM);
  801. }
  802. } else {
  803. src_nents = sg_nents_for_len(req->src, req->assoclen +
  804. req->cryptlen);
  805. if (unlikely(src_nents < 0)) {
  806. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  807. req->assoclen + req->cryptlen);
  808. qi_cache_free(edesc);
  809. return ERR_PTR(src_nents);
  810. }
  811. dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  812. req->cryptlen +
  813. (encrypt ? authsize :
  814. (-authsize)));
  815. if (unlikely(dst_nents < 0)) {
  816. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  817. req->assoclen + req->cryptlen +
  818. (encrypt ? authsize : (-authsize)));
  819. qi_cache_free(edesc);
  820. return ERR_PTR(dst_nents);
  821. }
  822. if (src_nents) {
  823. mapped_src_nents = dma_map_sg(qidev, req->src,
  824. src_nents, DMA_TO_DEVICE);
  825. if (unlikely(!mapped_src_nents)) {
  826. dev_err(qidev, "unable to map source\n");
  827. qi_cache_free(edesc);
  828. return ERR_PTR(-ENOMEM);
  829. }
  830. } else {
  831. mapped_src_nents = 0;
  832. }
  833. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  834. DMA_FROM_DEVICE);
  835. if (unlikely(!mapped_dst_nents)) {
  836. dev_err(qidev, "unable to map destination\n");
  837. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  838. qi_cache_free(edesc);
  839. return ERR_PTR(-ENOMEM);
  840. }
  841. }
  842. if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
  843. ivsize = crypto_aead_ivsize(aead);
  844. iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
  845. if (dma_mapping_error(qidev, iv_dma)) {
  846. dev_err(qidev, "unable to map IV\n");
  847. caam_unmap(qidev, req->src, req->dst, src_nents,
  848. dst_nents, 0, 0, op_type, 0, 0);
  849. qi_cache_free(edesc);
  850. return ERR_PTR(-ENOMEM);
  851. }
  852. }
  853. /*
  854. * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
  855. * Input is not contiguous.
  856. */
  857. qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
  858. (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  859. if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
  860. dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
  861. qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
  862. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  863. iv_dma, ivsize, op_type, 0, 0);
  864. qi_cache_free(edesc);
  865. return ERR_PTR(-ENOMEM);
  866. }
  867. sg_table = &edesc->sgt[0];
  868. qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  869. edesc->src_nents = src_nents;
  870. edesc->dst_nents = dst_nents;
  871. edesc->iv_dma = iv_dma;
  872. edesc->drv_req.app_ctx = req;
  873. edesc->drv_req.cbk = aead_done;
  874. edesc->drv_req.drv_ctx = drv_ctx;
  875. edesc->assoclen = cpu_to_caam32(req->assoclen);
  876. edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
  877. DMA_TO_DEVICE);
  878. if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
  879. dev_err(qidev, "unable to map assoclen\n");
  880. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  881. iv_dma, ivsize, op_type, 0, 0);
  882. qi_cache_free(edesc);
  883. return ERR_PTR(-ENOMEM);
  884. }
  885. dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
  886. qm_sg_index++;
  887. if (ivsize) {
  888. dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
  889. qm_sg_index++;
  890. }
  891. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  892. qm_sg_index += mapped_src_nents;
  893. if (mapped_dst_nents > 1)
  894. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  895. qm_sg_index, 0);
  896. qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  897. if (dma_mapping_error(qidev, qm_sg_dma)) {
  898. dev_err(qidev, "unable to map S/G table\n");
  899. dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  900. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  901. iv_dma, ivsize, op_type, 0, 0);
  902. qi_cache_free(edesc);
  903. return ERR_PTR(-ENOMEM);
  904. }
  905. edesc->qm_sg_dma = qm_sg_dma;
  906. edesc->qm_sg_bytes = qm_sg_bytes;
  907. out_len = req->assoclen + req->cryptlen +
  908. (encrypt ? ctx->authsize : (-ctx->authsize));
  909. in_len = 4 + ivsize + req->assoclen + req->cryptlen;
  910. fd_sgt = &edesc->drv_req.fd_sgt[0];
  911. dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
  912. if (req->dst == req->src) {
  913. if (mapped_src_nents == 1)
  914. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  915. out_len, 0);
  916. else
  917. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
  918. (1 + !!ivsize) * sizeof(*sg_table),
  919. out_len, 0);
  920. } else if (mapped_dst_nents == 1) {
  921. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
  922. 0);
  923. } else {
  924. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
  925. qm_sg_index, out_len, 0);
  926. }
  927. return edesc;
  928. }
  929. static inline int aead_crypt(struct aead_request *req, bool encrypt)
  930. {
  931. struct aead_edesc *edesc;
  932. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  933. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  934. int ret;
  935. if (unlikely(caam_congested))
  936. return -EAGAIN;
  937. /* allocate extended descriptor */
  938. edesc = aead_edesc_alloc(req, encrypt);
  939. if (IS_ERR_OR_NULL(edesc))
  940. return PTR_ERR(edesc);
  941. /* Create and submit job descriptor */
  942. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  943. if (!ret) {
  944. ret = -EINPROGRESS;
  945. } else {
  946. aead_unmap(ctx->qidev, edesc, req);
  947. qi_cache_free(edesc);
  948. }
  949. return ret;
  950. }
  951. static int aead_encrypt(struct aead_request *req)
  952. {
  953. return aead_crypt(req, true);
  954. }
  955. static int aead_decrypt(struct aead_request *req)
  956. {
  957. return aead_crypt(req, false);
  958. }
  959. static int ipsec_gcm_encrypt(struct aead_request *req)
  960. {
  961. if (req->assoclen < 8)
  962. return -EINVAL;
  963. return aead_crypt(req, true);
  964. }
  965. static int ipsec_gcm_decrypt(struct aead_request *req)
  966. {
  967. if (req->assoclen < 8)
  968. return -EINVAL;
  969. return aead_crypt(req, false);
  970. }
  971. static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
  972. {
  973. struct ablkcipher_edesc *edesc;
  974. struct ablkcipher_request *req = drv_req->app_ctx;
  975. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  976. struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
  977. struct device *qidev = caam_ctx->qidev;
  978. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  979. #ifdef DEBUG
  980. dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
  981. #endif
  982. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  983. if (status)
  984. caam_jr_strstatus(qidev, status);
  985. #ifdef DEBUG
  986. print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
  987. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  988. edesc->src_nents > 1 ? 100 : ivsize, 1);
  989. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  990. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  991. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  992. #endif
  993. ablkcipher_unmap(qidev, edesc, req);
  994. qi_cache_free(edesc);
  995. /*
  996. * The crypto API expects us to set the IV (req->info) to the last
  997. * ciphertext block. This is used e.g. by the CTS mode.
  998. */
  999. scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
  1000. ivsize, 0);
  1001. ablkcipher_request_complete(req, status);
  1002. }
  1003. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  1004. *req, bool encrypt)
  1005. {
  1006. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1007. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1008. struct device *qidev = ctx->qidev;
  1009. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1010. GFP_KERNEL : GFP_ATOMIC;
  1011. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  1012. struct ablkcipher_edesc *edesc;
  1013. dma_addr_t iv_dma;
  1014. bool in_contig;
  1015. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1016. int dst_sg_idx, qm_sg_ents;
  1017. struct qm_sg_entry *sg_table, *fd_sgt;
  1018. struct caam_drv_ctx *drv_ctx;
  1019. enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  1020. drv_ctx = get_drv_ctx(ctx, op_type);
  1021. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  1022. return (struct ablkcipher_edesc *)drv_ctx;
  1023. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1024. if (unlikely(src_nents < 0)) {
  1025. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  1026. req->nbytes);
  1027. return ERR_PTR(src_nents);
  1028. }
  1029. if (unlikely(req->src != req->dst)) {
  1030. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  1031. if (unlikely(dst_nents < 0)) {
  1032. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  1033. req->nbytes);
  1034. return ERR_PTR(dst_nents);
  1035. }
  1036. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1037. DMA_TO_DEVICE);
  1038. if (unlikely(!mapped_src_nents)) {
  1039. dev_err(qidev, "unable to map source\n");
  1040. return ERR_PTR(-ENOMEM);
  1041. }
  1042. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  1043. DMA_FROM_DEVICE);
  1044. if (unlikely(!mapped_dst_nents)) {
  1045. dev_err(qidev, "unable to map destination\n");
  1046. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  1047. return ERR_PTR(-ENOMEM);
  1048. }
  1049. } else {
  1050. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1051. DMA_BIDIRECTIONAL);
  1052. if (unlikely(!mapped_src_nents)) {
  1053. dev_err(qidev, "unable to map source\n");
  1054. return ERR_PTR(-ENOMEM);
  1055. }
  1056. }
  1057. iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
  1058. if (dma_mapping_error(qidev, iv_dma)) {
  1059. dev_err(qidev, "unable to map IV\n");
  1060. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1061. 0, 0, 0, 0);
  1062. return ERR_PTR(-ENOMEM);
  1063. }
  1064. if (mapped_src_nents == 1 &&
  1065. iv_dma + ivsize == sg_dma_address(req->src)) {
  1066. in_contig = true;
  1067. qm_sg_ents = 0;
  1068. } else {
  1069. in_contig = false;
  1070. qm_sg_ents = 1 + mapped_src_nents;
  1071. }
  1072. dst_sg_idx = qm_sg_ents;
  1073. qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  1074. if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
  1075. dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
  1076. qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
  1077. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1078. iv_dma, ivsize, op_type, 0, 0);
  1079. return ERR_PTR(-ENOMEM);
  1080. }
  1081. /* allocate space for base edesc and link tables */
  1082. edesc = qi_cache_alloc(GFP_DMA | flags);
  1083. if (unlikely(!edesc)) {
  1084. dev_err(qidev, "could not allocate extended descriptor\n");
  1085. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1086. iv_dma, ivsize, op_type, 0, 0);
  1087. return ERR_PTR(-ENOMEM);
  1088. }
  1089. edesc->src_nents = src_nents;
  1090. edesc->dst_nents = dst_nents;
  1091. edesc->iv_dma = iv_dma;
  1092. sg_table = &edesc->sgt[0];
  1093. edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  1094. edesc->drv_req.app_ctx = req;
  1095. edesc->drv_req.cbk = ablkcipher_done;
  1096. edesc->drv_req.drv_ctx = drv_ctx;
  1097. if (!in_contig) {
  1098. dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  1099. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
  1100. }
  1101. if (mapped_dst_nents > 1)
  1102. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  1103. dst_sg_idx, 0);
  1104. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  1105. DMA_TO_DEVICE);
  1106. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  1107. dev_err(qidev, "unable to map S/G table\n");
  1108. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1109. iv_dma, ivsize, op_type, 0, 0);
  1110. qi_cache_free(edesc);
  1111. return ERR_PTR(-ENOMEM);
  1112. }
  1113. fd_sgt = &edesc->drv_req.fd_sgt[0];
  1114. if (!in_contig)
  1115. dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
  1116. ivsize + req->nbytes, 0);
  1117. else
  1118. dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
  1119. 0);
  1120. if (req->src == req->dst) {
  1121. if (!in_contig)
  1122. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
  1123. sizeof(*sg_table), req->nbytes, 0);
  1124. else
  1125. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  1126. req->nbytes, 0);
  1127. } else if (mapped_dst_nents > 1) {
  1128. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  1129. sizeof(*sg_table), req->nbytes, 0);
  1130. } else {
  1131. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
  1132. req->nbytes, 0);
  1133. }
  1134. return edesc;
  1135. }
  1136. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  1137. struct skcipher_givcrypt_request *creq)
  1138. {
  1139. struct ablkcipher_request *req = &creq->creq;
  1140. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1141. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1142. struct device *qidev = ctx->qidev;
  1143. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1144. GFP_KERNEL : GFP_ATOMIC;
  1145. int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
  1146. struct ablkcipher_edesc *edesc;
  1147. dma_addr_t iv_dma;
  1148. bool out_contig;
  1149. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1150. struct qm_sg_entry *sg_table, *fd_sgt;
  1151. int dst_sg_idx, qm_sg_ents;
  1152. struct caam_drv_ctx *drv_ctx;
  1153. drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
  1154. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  1155. return (struct ablkcipher_edesc *)drv_ctx;
  1156. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1157. if (unlikely(src_nents < 0)) {
  1158. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  1159. req->nbytes);
  1160. return ERR_PTR(src_nents);
  1161. }
  1162. if (unlikely(req->src != req->dst)) {
  1163. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  1164. if (unlikely(dst_nents < 0)) {
  1165. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  1166. req->nbytes);
  1167. return ERR_PTR(dst_nents);
  1168. }
  1169. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1170. DMA_TO_DEVICE);
  1171. if (unlikely(!mapped_src_nents)) {
  1172. dev_err(qidev, "unable to map source\n");
  1173. return ERR_PTR(-ENOMEM);
  1174. }
  1175. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  1176. DMA_FROM_DEVICE);
  1177. if (unlikely(!mapped_dst_nents)) {
  1178. dev_err(qidev, "unable to map destination\n");
  1179. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  1180. return ERR_PTR(-ENOMEM);
  1181. }
  1182. } else {
  1183. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1184. DMA_BIDIRECTIONAL);
  1185. if (unlikely(!mapped_src_nents)) {
  1186. dev_err(qidev, "unable to map source\n");
  1187. return ERR_PTR(-ENOMEM);
  1188. }
  1189. dst_nents = src_nents;
  1190. mapped_dst_nents = src_nents;
  1191. }
  1192. iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
  1193. if (dma_mapping_error(qidev, iv_dma)) {
  1194. dev_err(qidev, "unable to map IV\n");
  1195. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1196. 0, 0, 0, 0);
  1197. return ERR_PTR(-ENOMEM);
  1198. }
  1199. qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
  1200. dst_sg_idx = qm_sg_ents;
  1201. if (mapped_dst_nents == 1 &&
  1202. iv_dma + ivsize == sg_dma_address(req->dst)) {
  1203. out_contig = true;
  1204. } else {
  1205. out_contig = false;
  1206. qm_sg_ents += 1 + mapped_dst_nents;
  1207. }
  1208. if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
  1209. dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
  1210. qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
  1211. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1212. iv_dma, ivsize, GIVENCRYPT, 0, 0);
  1213. return ERR_PTR(-ENOMEM);
  1214. }
  1215. /* allocate space for base edesc and link tables */
  1216. edesc = qi_cache_alloc(GFP_DMA | flags);
  1217. if (!edesc) {
  1218. dev_err(qidev, "could not allocate extended descriptor\n");
  1219. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1220. iv_dma, ivsize, GIVENCRYPT, 0, 0);
  1221. return ERR_PTR(-ENOMEM);
  1222. }
  1223. edesc->src_nents = src_nents;
  1224. edesc->dst_nents = dst_nents;
  1225. edesc->iv_dma = iv_dma;
  1226. sg_table = &edesc->sgt[0];
  1227. edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  1228. edesc->drv_req.app_ctx = req;
  1229. edesc->drv_req.cbk = ablkcipher_done;
  1230. edesc->drv_req.drv_ctx = drv_ctx;
  1231. if (mapped_src_nents > 1)
  1232. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
  1233. if (!out_contig) {
  1234. dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
  1235. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  1236. dst_sg_idx + 1, 0);
  1237. }
  1238. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  1239. DMA_TO_DEVICE);
  1240. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  1241. dev_err(qidev, "unable to map S/G table\n");
  1242. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1243. iv_dma, ivsize, GIVENCRYPT, 0, 0);
  1244. qi_cache_free(edesc);
  1245. return ERR_PTR(-ENOMEM);
  1246. }
  1247. fd_sgt = &edesc->drv_req.fd_sgt[0];
  1248. if (mapped_src_nents > 1)
  1249. dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
  1250. 0);
  1251. else
  1252. dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
  1253. req->nbytes, 0);
  1254. if (!out_contig)
  1255. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  1256. sizeof(*sg_table), ivsize + req->nbytes,
  1257. 0);
  1258. else
  1259. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
  1260. ivsize + req->nbytes, 0);
  1261. return edesc;
  1262. }
  1263. static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
  1264. {
  1265. struct ablkcipher_edesc *edesc;
  1266. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1267. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1268. int ret;
  1269. if (unlikely(caam_congested))
  1270. return -EAGAIN;
  1271. /* allocate extended descriptor */
  1272. edesc = ablkcipher_edesc_alloc(req, encrypt);
  1273. if (IS_ERR(edesc))
  1274. return PTR_ERR(edesc);
  1275. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  1276. if (!ret) {
  1277. ret = -EINPROGRESS;
  1278. } else {
  1279. ablkcipher_unmap(ctx->qidev, edesc, req);
  1280. qi_cache_free(edesc);
  1281. }
  1282. return ret;
  1283. }
  1284. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  1285. {
  1286. return ablkcipher_crypt(req, true);
  1287. }
  1288. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  1289. {
  1290. return ablkcipher_crypt(req, false);
  1291. }
  1292. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  1293. {
  1294. struct ablkcipher_request *req = &creq->creq;
  1295. struct ablkcipher_edesc *edesc;
  1296. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1297. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1298. int ret;
  1299. if (unlikely(caam_congested))
  1300. return -EAGAIN;
  1301. /* allocate extended descriptor */
  1302. edesc = ablkcipher_giv_edesc_alloc(creq);
  1303. if (IS_ERR(edesc))
  1304. return PTR_ERR(edesc);
  1305. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  1306. if (!ret) {
  1307. ret = -EINPROGRESS;
  1308. } else {
  1309. ablkcipher_unmap(ctx->qidev, edesc, req);
  1310. qi_cache_free(edesc);
  1311. }
  1312. return ret;
  1313. }
  1314. #define template_ablkcipher template_u.ablkcipher
  1315. struct caam_alg_template {
  1316. char name[CRYPTO_MAX_ALG_NAME];
  1317. char driver_name[CRYPTO_MAX_ALG_NAME];
  1318. unsigned int blocksize;
  1319. u32 type;
  1320. union {
  1321. struct ablkcipher_alg ablkcipher;
  1322. } template_u;
  1323. u32 class1_alg_type;
  1324. u32 class2_alg_type;
  1325. };
  1326. static struct caam_alg_template driver_algs[] = {
  1327. /* ablkcipher descriptor */
  1328. {
  1329. .name = "cbc(aes)",
  1330. .driver_name = "cbc-aes-caam-qi",
  1331. .blocksize = AES_BLOCK_SIZE,
  1332. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1333. .template_ablkcipher = {
  1334. .setkey = ablkcipher_setkey,
  1335. .encrypt = ablkcipher_encrypt,
  1336. .decrypt = ablkcipher_decrypt,
  1337. .givencrypt = ablkcipher_givencrypt,
  1338. .geniv = "<built-in>",
  1339. .min_keysize = AES_MIN_KEY_SIZE,
  1340. .max_keysize = AES_MAX_KEY_SIZE,
  1341. .ivsize = AES_BLOCK_SIZE,
  1342. },
  1343. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1344. },
  1345. {
  1346. .name = "cbc(des3_ede)",
  1347. .driver_name = "cbc-3des-caam-qi",
  1348. .blocksize = DES3_EDE_BLOCK_SIZE,
  1349. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1350. .template_ablkcipher = {
  1351. .setkey = ablkcipher_setkey,
  1352. .encrypt = ablkcipher_encrypt,
  1353. .decrypt = ablkcipher_decrypt,
  1354. .givencrypt = ablkcipher_givencrypt,
  1355. .geniv = "<built-in>",
  1356. .min_keysize = DES3_EDE_KEY_SIZE,
  1357. .max_keysize = DES3_EDE_KEY_SIZE,
  1358. .ivsize = DES3_EDE_BLOCK_SIZE,
  1359. },
  1360. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1361. },
  1362. {
  1363. .name = "cbc(des)",
  1364. .driver_name = "cbc-des-caam-qi",
  1365. .blocksize = DES_BLOCK_SIZE,
  1366. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1367. .template_ablkcipher = {
  1368. .setkey = ablkcipher_setkey,
  1369. .encrypt = ablkcipher_encrypt,
  1370. .decrypt = ablkcipher_decrypt,
  1371. .givencrypt = ablkcipher_givencrypt,
  1372. .geniv = "<built-in>",
  1373. .min_keysize = DES_KEY_SIZE,
  1374. .max_keysize = DES_KEY_SIZE,
  1375. .ivsize = DES_BLOCK_SIZE,
  1376. },
  1377. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1378. },
  1379. {
  1380. .name = "ctr(aes)",
  1381. .driver_name = "ctr-aes-caam-qi",
  1382. .blocksize = 1,
  1383. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1384. .template_ablkcipher = {
  1385. .setkey = ablkcipher_setkey,
  1386. .encrypt = ablkcipher_encrypt,
  1387. .decrypt = ablkcipher_decrypt,
  1388. .geniv = "chainiv",
  1389. .min_keysize = AES_MIN_KEY_SIZE,
  1390. .max_keysize = AES_MAX_KEY_SIZE,
  1391. .ivsize = AES_BLOCK_SIZE,
  1392. },
  1393. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1394. },
  1395. {
  1396. .name = "rfc3686(ctr(aes))",
  1397. .driver_name = "rfc3686-ctr-aes-caam-qi",
  1398. .blocksize = 1,
  1399. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1400. .template_ablkcipher = {
  1401. .setkey = ablkcipher_setkey,
  1402. .encrypt = ablkcipher_encrypt,
  1403. .decrypt = ablkcipher_decrypt,
  1404. .givencrypt = ablkcipher_givencrypt,
  1405. .geniv = "<built-in>",
  1406. .min_keysize = AES_MIN_KEY_SIZE +
  1407. CTR_RFC3686_NONCE_SIZE,
  1408. .max_keysize = AES_MAX_KEY_SIZE +
  1409. CTR_RFC3686_NONCE_SIZE,
  1410. .ivsize = CTR_RFC3686_IV_SIZE,
  1411. },
  1412. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1413. },
  1414. {
  1415. .name = "xts(aes)",
  1416. .driver_name = "xts-aes-caam-qi",
  1417. .blocksize = AES_BLOCK_SIZE,
  1418. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1419. .template_ablkcipher = {
  1420. .setkey = xts_ablkcipher_setkey,
  1421. .encrypt = ablkcipher_encrypt,
  1422. .decrypt = ablkcipher_decrypt,
  1423. .geniv = "eseqiv",
  1424. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1425. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1426. .ivsize = AES_BLOCK_SIZE,
  1427. },
  1428. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1429. },
  1430. };
  1431. static struct caam_aead_alg driver_aeads[] = {
  1432. {
  1433. .aead = {
  1434. .base = {
  1435. .cra_name = "rfc4106(gcm(aes))",
  1436. .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
  1437. .cra_blocksize = 1,
  1438. },
  1439. .setkey = rfc4106_setkey,
  1440. .setauthsize = rfc4106_setauthsize,
  1441. .encrypt = ipsec_gcm_encrypt,
  1442. .decrypt = ipsec_gcm_decrypt,
  1443. .ivsize = 8,
  1444. .maxauthsize = AES_BLOCK_SIZE,
  1445. },
  1446. .caam = {
  1447. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1448. },
  1449. },
  1450. {
  1451. .aead = {
  1452. .base = {
  1453. .cra_name = "rfc4543(gcm(aes))",
  1454. .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
  1455. .cra_blocksize = 1,
  1456. },
  1457. .setkey = rfc4543_setkey,
  1458. .setauthsize = rfc4543_setauthsize,
  1459. .encrypt = ipsec_gcm_encrypt,
  1460. .decrypt = ipsec_gcm_decrypt,
  1461. .ivsize = 8,
  1462. .maxauthsize = AES_BLOCK_SIZE,
  1463. },
  1464. .caam = {
  1465. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1466. },
  1467. },
  1468. /* Galois Counter Mode */
  1469. {
  1470. .aead = {
  1471. .base = {
  1472. .cra_name = "gcm(aes)",
  1473. .cra_driver_name = "gcm-aes-caam-qi",
  1474. .cra_blocksize = 1,
  1475. },
  1476. .setkey = gcm_setkey,
  1477. .setauthsize = gcm_setauthsize,
  1478. .encrypt = aead_encrypt,
  1479. .decrypt = aead_decrypt,
  1480. .ivsize = 12,
  1481. .maxauthsize = AES_BLOCK_SIZE,
  1482. },
  1483. .caam = {
  1484. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1485. }
  1486. },
  1487. /* single-pass ipsec_esp descriptor */
  1488. {
  1489. .aead = {
  1490. .base = {
  1491. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1492. .cra_driver_name = "authenc-hmac-md5-"
  1493. "cbc-aes-caam-qi",
  1494. .cra_blocksize = AES_BLOCK_SIZE,
  1495. },
  1496. .setkey = aead_setkey,
  1497. .setauthsize = aead_setauthsize,
  1498. .encrypt = aead_encrypt,
  1499. .decrypt = aead_decrypt,
  1500. .ivsize = AES_BLOCK_SIZE,
  1501. .maxauthsize = MD5_DIGEST_SIZE,
  1502. },
  1503. .caam = {
  1504. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1505. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1506. OP_ALG_AAI_HMAC_PRECOMP,
  1507. }
  1508. },
  1509. {
  1510. .aead = {
  1511. .base = {
  1512. .cra_name = "echainiv(authenc(hmac(md5),"
  1513. "cbc(aes)))",
  1514. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1515. "cbc-aes-caam-qi",
  1516. .cra_blocksize = AES_BLOCK_SIZE,
  1517. },
  1518. .setkey = aead_setkey,
  1519. .setauthsize = aead_setauthsize,
  1520. .encrypt = aead_encrypt,
  1521. .decrypt = aead_decrypt,
  1522. .ivsize = AES_BLOCK_SIZE,
  1523. .maxauthsize = MD5_DIGEST_SIZE,
  1524. },
  1525. .caam = {
  1526. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1527. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1528. OP_ALG_AAI_HMAC_PRECOMP,
  1529. .geniv = true,
  1530. }
  1531. },
  1532. {
  1533. .aead = {
  1534. .base = {
  1535. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1536. .cra_driver_name = "authenc-hmac-sha1-"
  1537. "cbc-aes-caam-qi",
  1538. .cra_blocksize = AES_BLOCK_SIZE,
  1539. },
  1540. .setkey = aead_setkey,
  1541. .setauthsize = aead_setauthsize,
  1542. .encrypt = aead_encrypt,
  1543. .decrypt = aead_decrypt,
  1544. .ivsize = AES_BLOCK_SIZE,
  1545. .maxauthsize = SHA1_DIGEST_SIZE,
  1546. },
  1547. .caam = {
  1548. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1549. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1550. OP_ALG_AAI_HMAC_PRECOMP,
  1551. }
  1552. },
  1553. {
  1554. .aead = {
  1555. .base = {
  1556. .cra_name = "echainiv(authenc(hmac(sha1),"
  1557. "cbc(aes)))",
  1558. .cra_driver_name = "echainiv-authenc-"
  1559. "hmac-sha1-cbc-aes-caam-qi",
  1560. .cra_blocksize = AES_BLOCK_SIZE,
  1561. },
  1562. .setkey = aead_setkey,
  1563. .setauthsize = aead_setauthsize,
  1564. .encrypt = aead_encrypt,
  1565. .decrypt = aead_decrypt,
  1566. .ivsize = AES_BLOCK_SIZE,
  1567. .maxauthsize = SHA1_DIGEST_SIZE,
  1568. },
  1569. .caam = {
  1570. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1571. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1572. OP_ALG_AAI_HMAC_PRECOMP,
  1573. .geniv = true,
  1574. },
  1575. },
  1576. {
  1577. .aead = {
  1578. .base = {
  1579. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1580. .cra_driver_name = "authenc-hmac-sha224-"
  1581. "cbc-aes-caam-qi",
  1582. .cra_blocksize = AES_BLOCK_SIZE,
  1583. },
  1584. .setkey = aead_setkey,
  1585. .setauthsize = aead_setauthsize,
  1586. .encrypt = aead_encrypt,
  1587. .decrypt = aead_decrypt,
  1588. .ivsize = AES_BLOCK_SIZE,
  1589. .maxauthsize = SHA224_DIGEST_SIZE,
  1590. },
  1591. .caam = {
  1592. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1593. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1594. OP_ALG_AAI_HMAC_PRECOMP,
  1595. }
  1596. },
  1597. {
  1598. .aead = {
  1599. .base = {
  1600. .cra_name = "echainiv(authenc(hmac(sha224),"
  1601. "cbc(aes)))",
  1602. .cra_driver_name = "echainiv-authenc-"
  1603. "hmac-sha224-cbc-aes-caam-qi",
  1604. .cra_blocksize = AES_BLOCK_SIZE,
  1605. },
  1606. .setkey = aead_setkey,
  1607. .setauthsize = aead_setauthsize,
  1608. .encrypt = aead_encrypt,
  1609. .decrypt = aead_decrypt,
  1610. .ivsize = AES_BLOCK_SIZE,
  1611. .maxauthsize = SHA224_DIGEST_SIZE,
  1612. },
  1613. .caam = {
  1614. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1615. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1616. OP_ALG_AAI_HMAC_PRECOMP,
  1617. .geniv = true,
  1618. }
  1619. },
  1620. {
  1621. .aead = {
  1622. .base = {
  1623. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1624. .cra_driver_name = "authenc-hmac-sha256-"
  1625. "cbc-aes-caam-qi",
  1626. .cra_blocksize = AES_BLOCK_SIZE,
  1627. },
  1628. .setkey = aead_setkey,
  1629. .setauthsize = aead_setauthsize,
  1630. .encrypt = aead_encrypt,
  1631. .decrypt = aead_decrypt,
  1632. .ivsize = AES_BLOCK_SIZE,
  1633. .maxauthsize = SHA256_DIGEST_SIZE,
  1634. },
  1635. .caam = {
  1636. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1637. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1638. OP_ALG_AAI_HMAC_PRECOMP,
  1639. }
  1640. },
  1641. {
  1642. .aead = {
  1643. .base = {
  1644. .cra_name = "echainiv(authenc(hmac(sha256),"
  1645. "cbc(aes)))",
  1646. .cra_driver_name = "echainiv-authenc-"
  1647. "hmac-sha256-cbc-aes-"
  1648. "caam-qi",
  1649. .cra_blocksize = AES_BLOCK_SIZE,
  1650. },
  1651. .setkey = aead_setkey,
  1652. .setauthsize = aead_setauthsize,
  1653. .encrypt = aead_encrypt,
  1654. .decrypt = aead_decrypt,
  1655. .ivsize = AES_BLOCK_SIZE,
  1656. .maxauthsize = SHA256_DIGEST_SIZE,
  1657. },
  1658. .caam = {
  1659. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1660. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1661. OP_ALG_AAI_HMAC_PRECOMP,
  1662. .geniv = true,
  1663. }
  1664. },
  1665. {
  1666. .aead = {
  1667. .base = {
  1668. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  1669. .cra_driver_name = "authenc-hmac-sha384-"
  1670. "cbc-aes-caam-qi",
  1671. .cra_blocksize = AES_BLOCK_SIZE,
  1672. },
  1673. .setkey = aead_setkey,
  1674. .setauthsize = aead_setauthsize,
  1675. .encrypt = aead_encrypt,
  1676. .decrypt = aead_decrypt,
  1677. .ivsize = AES_BLOCK_SIZE,
  1678. .maxauthsize = SHA384_DIGEST_SIZE,
  1679. },
  1680. .caam = {
  1681. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1682. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1683. OP_ALG_AAI_HMAC_PRECOMP,
  1684. }
  1685. },
  1686. {
  1687. .aead = {
  1688. .base = {
  1689. .cra_name = "echainiv(authenc(hmac(sha384),"
  1690. "cbc(aes)))",
  1691. .cra_driver_name = "echainiv-authenc-"
  1692. "hmac-sha384-cbc-aes-"
  1693. "caam-qi",
  1694. .cra_blocksize = AES_BLOCK_SIZE,
  1695. },
  1696. .setkey = aead_setkey,
  1697. .setauthsize = aead_setauthsize,
  1698. .encrypt = aead_encrypt,
  1699. .decrypt = aead_decrypt,
  1700. .ivsize = AES_BLOCK_SIZE,
  1701. .maxauthsize = SHA384_DIGEST_SIZE,
  1702. },
  1703. .caam = {
  1704. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1705. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1706. OP_ALG_AAI_HMAC_PRECOMP,
  1707. .geniv = true,
  1708. }
  1709. },
  1710. {
  1711. .aead = {
  1712. .base = {
  1713. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  1714. .cra_driver_name = "authenc-hmac-sha512-"
  1715. "cbc-aes-caam-qi",
  1716. .cra_blocksize = AES_BLOCK_SIZE,
  1717. },
  1718. .setkey = aead_setkey,
  1719. .setauthsize = aead_setauthsize,
  1720. .encrypt = aead_encrypt,
  1721. .decrypt = aead_decrypt,
  1722. .ivsize = AES_BLOCK_SIZE,
  1723. .maxauthsize = SHA512_DIGEST_SIZE,
  1724. },
  1725. .caam = {
  1726. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1727. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1728. OP_ALG_AAI_HMAC_PRECOMP,
  1729. }
  1730. },
  1731. {
  1732. .aead = {
  1733. .base = {
  1734. .cra_name = "echainiv(authenc(hmac(sha512),"
  1735. "cbc(aes)))",
  1736. .cra_driver_name = "echainiv-authenc-"
  1737. "hmac-sha512-cbc-aes-"
  1738. "caam-qi",
  1739. .cra_blocksize = AES_BLOCK_SIZE,
  1740. },
  1741. .setkey = aead_setkey,
  1742. .setauthsize = aead_setauthsize,
  1743. .encrypt = aead_encrypt,
  1744. .decrypt = aead_decrypt,
  1745. .ivsize = AES_BLOCK_SIZE,
  1746. .maxauthsize = SHA512_DIGEST_SIZE,
  1747. },
  1748. .caam = {
  1749. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1750. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1751. OP_ALG_AAI_HMAC_PRECOMP,
  1752. .geniv = true,
  1753. }
  1754. },
  1755. {
  1756. .aead = {
  1757. .base = {
  1758. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1759. .cra_driver_name = "authenc-hmac-md5-"
  1760. "cbc-des3_ede-caam-qi",
  1761. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1762. },
  1763. .setkey = aead_setkey,
  1764. .setauthsize = aead_setauthsize,
  1765. .encrypt = aead_encrypt,
  1766. .decrypt = aead_decrypt,
  1767. .ivsize = DES3_EDE_BLOCK_SIZE,
  1768. .maxauthsize = MD5_DIGEST_SIZE,
  1769. },
  1770. .caam = {
  1771. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1772. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1773. OP_ALG_AAI_HMAC_PRECOMP,
  1774. }
  1775. },
  1776. {
  1777. .aead = {
  1778. .base = {
  1779. .cra_name = "echainiv(authenc(hmac(md5),"
  1780. "cbc(des3_ede)))",
  1781. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1782. "cbc-des3_ede-caam-qi",
  1783. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1784. },
  1785. .setkey = aead_setkey,
  1786. .setauthsize = aead_setauthsize,
  1787. .encrypt = aead_encrypt,
  1788. .decrypt = aead_decrypt,
  1789. .ivsize = DES3_EDE_BLOCK_SIZE,
  1790. .maxauthsize = MD5_DIGEST_SIZE,
  1791. },
  1792. .caam = {
  1793. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1794. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1795. OP_ALG_AAI_HMAC_PRECOMP,
  1796. .geniv = true,
  1797. }
  1798. },
  1799. {
  1800. .aead = {
  1801. .base = {
  1802. .cra_name = "authenc(hmac(sha1),"
  1803. "cbc(des3_ede))",
  1804. .cra_driver_name = "authenc-hmac-sha1-"
  1805. "cbc-des3_ede-caam-qi",
  1806. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1807. },
  1808. .setkey = aead_setkey,
  1809. .setauthsize = aead_setauthsize,
  1810. .encrypt = aead_encrypt,
  1811. .decrypt = aead_decrypt,
  1812. .ivsize = DES3_EDE_BLOCK_SIZE,
  1813. .maxauthsize = SHA1_DIGEST_SIZE,
  1814. },
  1815. .caam = {
  1816. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1817. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1818. OP_ALG_AAI_HMAC_PRECOMP,
  1819. },
  1820. },
  1821. {
  1822. .aead = {
  1823. .base = {
  1824. .cra_name = "echainiv(authenc(hmac(sha1),"
  1825. "cbc(des3_ede)))",
  1826. .cra_driver_name = "echainiv-authenc-"
  1827. "hmac-sha1-"
  1828. "cbc-des3_ede-caam-qi",
  1829. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1830. },
  1831. .setkey = aead_setkey,
  1832. .setauthsize = aead_setauthsize,
  1833. .encrypt = aead_encrypt,
  1834. .decrypt = aead_decrypt,
  1835. .ivsize = DES3_EDE_BLOCK_SIZE,
  1836. .maxauthsize = SHA1_DIGEST_SIZE,
  1837. },
  1838. .caam = {
  1839. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1840. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1841. OP_ALG_AAI_HMAC_PRECOMP,
  1842. .geniv = true,
  1843. }
  1844. },
  1845. {
  1846. .aead = {
  1847. .base = {
  1848. .cra_name = "authenc(hmac(sha224),"
  1849. "cbc(des3_ede))",
  1850. .cra_driver_name = "authenc-hmac-sha224-"
  1851. "cbc-des3_ede-caam-qi",
  1852. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1853. },
  1854. .setkey = aead_setkey,
  1855. .setauthsize = aead_setauthsize,
  1856. .encrypt = aead_encrypt,
  1857. .decrypt = aead_decrypt,
  1858. .ivsize = DES3_EDE_BLOCK_SIZE,
  1859. .maxauthsize = SHA224_DIGEST_SIZE,
  1860. },
  1861. .caam = {
  1862. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1863. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1864. OP_ALG_AAI_HMAC_PRECOMP,
  1865. },
  1866. },
  1867. {
  1868. .aead = {
  1869. .base = {
  1870. .cra_name = "echainiv(authenc(hmac(sha224),"
  1871. "cbc(des3_ede)))",
  1872. .cra_driver_name = "echainiv-authenc-"
  1873. "hmac-sha224-"
  1874. "cbc-des3_ede-caam-qi",
  1875. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1876. },
  1877. .setkey = aead_setkey,
  1878. .setauthsize = aead_setauthsize,
  1879. .encrypt = aead_encrypt,
  1880. .decrypt = aead_decrypt,
  1881. .ivsize = DES3_EDE_BLOCK_SIZE,
  1882. .maxauthsize = SHA224_DIGEST_SIZE,
  1883. },
  1884. .caam = {
  1885. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1886. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1887. OP_ALG_AAI_HMAC_PRECOMP,
  1888. .geniv = true,
  1889. }
  1890. },
  1891. {
  1892. .aead = {
  1893. .base = {
  1894. .cra_name = "authenc(hmac(sha256),"
  1895. "cbc(des3_ede))",
  1896. .cra_driver_name = "authenc-hmac-sha256-"
  1897. "cbc-des3_ede-caam-qi",
  1898. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1899. },
  1900. .setkey = aead_setkey,
  1901. .setauthsize = aead_setauthsize,
  1902. .encrypt = aead_encrypt,
  1903. .decrypt = aead_decrypt,
  1904. .ivsize = DES3_EDE_BLOCK_SIZE,
  1905. .maxauthsize = SHA256_DIGEST_SIZE,
  1906. },
  1907. .caam = {
  1908. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1909. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1910. OP_ALG_AAI_HMAC_PRECOMP,
  1911. },
  1912. },
  1913. {
  1914. .aead = {
  1915. .base = {
  1916. .cra_name = "echainiv(authenc(hmac(sha256),"
  1917. "cbc(des3_ede)))",
  1918. .cra_driver_name = "echainiv-authenc-"
  1919. "hmac-sha256-"
  1920. "cbc-des3_ede-caam-qi",
  1921. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1922. },
  1923. .setkey = aead_setkey,
  1924. .setauthsize = aead_setauthsize,
  1925. .encrypt = aead_encrypt,
  1926. .decrypt = aead_decrypt,
  1927. .ivsize = DES3_EDE_BLOCK_SIZE,
  1928. .maxauthsize = SHA256_DIGEST_SIZE,
  1929. },
  1930. .caam = {
  1931. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1932. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1933. OP_ALG_AAI_HMAC_PRECOMP,
  1934. .geniv = true,
  1935. }
  1936. },
  1937. {
  1938. .aead = {
  1939. .base = {
  1940. .cra_name = "authenc(hmac(sha384),"
  1941. "cbc(des3_ede))",
  1942. .cra_driver_name = "authenc-hmac-sha384-"
  1943. "cbc-des3_ede-caam-qi",
  1944. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1945. },
  1946. .setkey = aead_setkey,
  1947. .setauthsize = aead_setauthsize,
  1948. .encrypt = aead_encrypt,
  1949. .decrypt = aead_decrypt,
  1950. .ivsize = DES3_EDE_BLOCK_SIZE,
  1951. .maxauthsize = SHA384_DIGEST_SIZE,
  1952. },
  1953. .caam = {
  1954. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1955. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1956. OP_ALG_AAI_HMAC_PRECOMP,
  1957. },
  1958. },
  1959. {
  1960. .aead = {
  1961. .base = {
  1962. .cra_name = "echainiv(authenc(hmac(sha384),"
  1963. "cbc(des3_ede)))",
  1964. .cra_driver_name = "echainiv-authenc-"
  1965. "hmac-sha384-"
  1966. "cbc-des3_ede-caam-qi",
  1967. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1968. },
  1969. .setkey = aead_setkey,
  1970. .setauthsize = aead_setauthsize,
  1971. .encrypt = aead_encrypt,
  1972. .decrypt = aead_decrypt,
  1973. .ivsize = DES3_EDE_BLOCK_SIZE,
  1974. .maxauthsize = SHA384_DIGEST_SIZE,
  1975. },
  1976. .caam = {
  1977. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1978. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1979. OP_ALG_AAI_HMAC_PRECOMP,
  1980. .geniv = true,
  1981. }
  1982. },
  1983. {
  1984. .aead = {
  1985. .base = {
  1986. .cra_name = "authenc(hmac(sha512),"
  1987. "cbc(des3_ede))",
  1988. .cra_driver_name = "authenc-hmac-sha512-"
  1989. "cbc-des3_ede-caam-qi",
  1990. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1991. },
  1992. .setkey = aead_setkey,
  1993. .setauthsize = aead_setauthsize,
  1994. .encrypt = aead_encrypt,
  1995. .decrypt = aead_decrypt,
  1996. .ivsize = DES3_EDE_BLOCK_SIZE,
  1997. .maxauthsize = SHA512_DIGEST_SIZE,
  1998. },
  1999. .caam = {
  2000. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2001. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2002. OP_ALG_AAI_HMAC_PRECOMP,
  2003. },
  2004. },
  2005. {
  2006. .aead = {
  2007. .base = {
  2008. .cra_name = "echainiv(authenc(hmac(sha512),"
  2009. "cbc(des3_ede)))",
  2010. .cra_driver_name = "echainiv-authenc-"
  2011. "hmac-sha512-"
  2012. "cbc-des3_ede-caam-qi",
  2013. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2014. },
  2015. .setkey = aead_setkey,
  2016. .setauthsize = aead_setauthsize,
  2017. .encrypt = aead_encrypt,
  2018. .decrypt = aead_decrypt,
  2019. .ivsize = DES3_EDE_BLOCK_SIZE,
  2020. .maxauthsize = SHA512_DIGEST_SIZE,
  2021. },
  2022. .caam = {
  2023. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2024. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2025. OP_ALG_AAI_HMAC_PRECOMP,
  2026. .geniv = true,
  2027. }
  2028. },
  2029. {
  2030. .aead = {
  2031. .base = {
  2032. .cra_name = "authenc(hmac(md5),cbc(des))",
  2033. .cra_driver_name = "authenc-hmac-md5-"
  2034. "cbc-des-caam-qi",
  2035. .cra_blocksize = DES_BLOCK_SIZE,
  2036. },
  2037. .setkey = aead_setkey,
  2038. .setauthsize = aead_setauthsize,
  2039. .encrypt = aead_encrypt,
  2040. .decrypt = aead_decrypt,
  2041. .ivsize = DES_BLOCK_SIZE,
  2042. .maxauthsize = MD5_DIGEST_SIZE,
  2043. },
  2044. .caam = {
  2045. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2046. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2047. OP_ALG_AAI_HMAC_PRECOMP,
  2048. },
  2049. },
  2050. {
  2051. .aead = {
  2052. .base = {
  2053. .cra_name = "echainiv(authenc(hmac(md5),"
  2054. "cbc(des)))",
  2055. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2056. "cbc-des-caam-qi",
  2057. .cra_blocksize = DES_BLOCK_SIZE,
  2058. },
  2059. .setkey = aead_setkey,
  2060. .setauthsize = aead_setauthsize,
  2061. .encrypt = aead_encrypt,
  2062. .decrypt = aead_decrypt,
  2063. .ivsize = DES_BLOCK_SIZE,
  2064. .maxauthsize = MD5_DIGEST_SIZE,
  2065. },
  2066. .caam = {
  2067. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2068. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2069. OP_ALG_AAI_HMAC_PRECOMP,
  2070. .geniv = true,
  2071. }
  2072. },
  2073. {
  2074. .aead = {
  2075. .base = {
  2076. .cra_name = "authenc(hmac(sha1),cbc(des))",
  2077. .cra_driver_name = "authenc-hmac-sha1-"
  2078. "cbc-des-caam-qi",
  2079. .cra_blocksize = DES_BLOCK_SIZE,
  2080. },
  2081. .setkey = aead_setkey,
  2082. .setauthsize = aead_setauthsize,
  2083. .encrypt = aead_encrypt,
  2084. .decrypt = aead_decrypt,
  2085. .ivsize = DES_BLOCK_SIZE,
  2086. .maxauthsize = SHA1_DIGEST_SIZE,
  2087. },
  2088. .caam = {
  2089. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2090. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2091. OP_ALG_AAI_HMAC_PRECOMP,
  2092. },
  2093. },
  2094. {
  2095. .aead = {
  2096. .base = {
  2097. .cra_name = "echainiv(authenc(hmac(sha1),"
  2098. "cbc(des)))",
  2099. .cra_driver_name = "echainiv-authenc-"
  2100. "hmac-sha1-cbc-des-caam-qi",
  2101. .cra_blocksize = DES_BLOCK_SIZE,
  2102. },
  2103. .setkey = aead_setkey,
  2104. .setauthsize = aead_setauthsize,
  2105. .encrypt = aead_encrypt,
  2106. .decrypt = aead_decrypt,
  2107. .ivsize = DES_BLOCK_SIZE,
  2108. .maxauthsize = SHA1_DIGEST_SIZE,
  2109. },
  2110. .caam = {
  2111. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2112. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2113. OP_ALG_AAI_HMAC_PRECOMP,
  2114. .geniv = true,
  2115. }
  2116. },
  2117. {
  2118. .aead = {
  2119. .base = {
  2120. .cra_name = "authenc(hmac(sha224),cbc(des))",
  2121. .cra_driver_name = "authenc-hmac-sha224-"
  2122. "cbc-des-caam-qi",
  2123. .cra_blocksize = DES_BLOCK_SIZE,
  2124. },
  2125. .setkey = aead_setkey,
  2126. .setauthsize = aead_setauthsize,
  2127. .encrypt = aead_encrypt,
  2128. .decrypt = aead_decrypt,
  2129. .ivsize = DES_BLOCK_SIZE,
  2130. .maxauthsize = SHA224_DIGEST_SIZE,
  2131. },
  2132. .caam = {
  2133. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2134. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2135. OP_ALG_AAI_HMAC_PRECOMP,
  2136. },
  2137. },
  2138. {
  2139. .aead = {
  2140. .base = {
  2141. .cra_name = "echainiv(authenc(hmac(sha224),"
  2142. "cbc(des)))",
  2143. .cra_driver_name = "echainiv-authenc-"
  2144. "hmac-sha224-cbc-des-"
  2145. "caam-qi",
  2146. .cra_blocksize = DES_BLOCK_SIZE,
  2147. },
  2148. .setkey = aead_setkey,
  2149. .setauthsize = aead_setauthsize,
  2150. .encrypt = aead_encrypt,
  2151. .decrypt = aead_decrypt,
  2152. .ivsize = DES_BLOCK_SIZE,
  2153. .maxauthsize = SHA224_DIGEST_SIZE,
  2154. },
  2155. .caam = {
  2156. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2157. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2158. OP_ALG_AAI_HMAC_PRECOMP,
  2159. .geniv = true,
  2160. }
  2161. },
  2162. {
  2163. .aead = {
  2164. .base = {
  2165. .cra_name = "authenc(hmac(sha256),cbc(des))",
  2166. .cra_driver_name = "authenc-hmac-sha256-"
  2167. "cbc-des-caam-qi",
  2168. .cra_blocksize = DES_BLOCK_SIZE,
  2169. },
  2170. .setkey = aead_setkey,
  2171. .setauthsize = aead_setauthsize,
  2172. .encrypt = aead_encrypt,
  2173. .decrypt = aead_decrypt,
  2174. .ivsize = DES_BLOCK_SIZE,
  2175. .maxauthsize = SHA256_DIGEST_SIZE,
  2176. },
  2177. .caam = {
  2178. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2179. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2180. OP_ALG_AAI_HMAC_PRECOMP,
  2181. },
  2182. },
  2183. {
  2184. .aead = {
  2185. .base = {
  2186. .cra_name = "echainiv(authenc(hmac(sha256),"
  2187. "cbc(des)))",
  2188. .cra_driver_name = "echainiv-authenc-"
  2189. "hmac-sha256-cbc-des-"
  2190. "caam-qi",
  2191. .cra_blocksize = DES_BLOCK_SIZE,
  2192. },
  2193. .setkey = aead_setkey,
  2194. .setauthsize = aead_setauthsize,
  2195. .encrypt = aead_encrypt,
  2196. .decrypt = aead_decrypt,
  2197. .ivsize = DES_BLOCK_SIZE,
  2198. .maxauthsize = SHA256_DIGEST_SIZE,
  2199. },
  2200. .caam = {
  2201. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2202. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2203. OP_ALG_AAI_HMAC_PRECOMP,
  2204. .geniv = true,
  2205. },
  2206. },
  2207. {
  2208. .aead = {
  2209. .base = {
  2210. .cra_name = "authenc(hmac(sha384),cbc(des))",
  2211. .cra_driver_name = "authenc-hmac-sha384-"
  2212. "cbc-des-caam-qi",
  2213. .cra_blocksize = DES_BLOCK_SIZE,
  2214. },
  2215. .setkey = aead_setkey,
  2216. .setauthsize = aead_setauthsize,
  2217. .encrypt = aead_encrypt,
  2218. .decrypt = aead_decrypt,
  2219. .ivsize = DES_BLOCK_SIZE,
  2220. .maxauthsize = SHA384_DIGEST_SIZE,
  2221. },
  2222. .caam = {
  2223. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2224. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2225. OP_ALG_AAI_HMAC_PRECOMP,
  2226. },
  2227. },
  2228. {
  2229. .aead = {
  2230. .base = {
  2231. .cra_name = "echainiv(authenc(hmac(sha384),"
  2232. "cbc(des)))",
  2233. .cra_driver_name = "echainiv-authenc-"
  2234. "hmac-sha384-cbc-des-"
  2235. "caam-qi",
  2236. .cra_blocksize = DES_BLOCK_SIZE,
  2237. },
  2238. .setkey = aead_setkey,
  2239. .setauthsize = aead_setauthsize,
  2240. .encrypt = aead_encrypt,
  2241. .decrypt = aead_decrypt,
  2242. .ivsize = DES_BLOCK_SIZE,
  2243. .maxauthsize = SHA384_DIGEST_SIZE,
  2244. },
  2245. .caam = {
  2246. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2247. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2248. OP_ALG_AAI_HMAC_PRECOMP,
  2249. .geniv = true,
  2250. }
  2251. },
  2252. {
  2253. .aead = {
  2254. .base = {
  2255. .cra_name = "authenc(hmac(sha512),cbc(des))",
  2256. .cra_driver_name = "authenc-hmac-sha512-"
  2257. "cbc-des-caam-qi",
  2258. .cra_blocksize = DES_BLOCK_SIZE,
  2259. },
  2260. .setkey = aead_setkey,
  2261. .setauthsize = aead_setauthsize,
  2262. .encrypt = aead_encrypt,
  2263. .decrypt = aead_decrypt,
  2264. .ivsize = DES_BLOCK_SIZE,
  2265. .maxauthsize = SHA512_DIGEST_SIZE,
  2266. },
  2267. .caam = {
  2268. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2269. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2270. OP_ALG_AAI_HMAC_PRECOMP,
  2271. }
  2272. },
  2273. {
  2274. .aead = {
  2275. .base = {
  2276. .cra_name = "echainiv(authenc(hmac(sha512),"
  2277. "cbc(des)))",
  2278. .cra_driver_name = "echainiv-authenc-"
  2279. "hmac-sha512-cbc-des-"
  2280. "caam-qi",
  2281. .cra_blocksize = DES_BLOCK_SIZE,
  2282. },
  2283. .setkey = aead_setkey,
  2284. .setauthsize = aead_setauthsize,
  2285. .encrypt = aead_encrypt,
  2286. .decrypt = aead_decrypt,
  2287. .ivsize = DES_BLOCK_SIZE,
  2288. .maxauthsize = SHA512_DIGEST_SIZE,
  2289. },
  2290. .caam = {
  2291. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2292. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2293. OP_ALG_AAI_HMAC_PRECOMP,
  2294. .geniv = true,
  2295. }
  2296. },
  2297. };
  2298. struct caam_crypto_alg {
  2299. struct list_head entry;
  2300. struct crypto_alg crypto_alg;
  2301. struct caam_alg_entry caam;
  2302. };
  2303. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  2304. bool uses_dkp)
  2305. {
  2306. struct caam_drv_private *priv;
  2307. /*
  2308. * distribute tfms across job rings to ensure in-order
  2309. * crypto request processing per tfm
  2310. */
  2311. ctx->jrdev = caam_jr_alloc();
  2312. if (IS_ERR(ctx->jrdev)) {
  2313. pr_err("Job Ring Device allocation for transform failed\n");
  2314. return PTR_ERR(ctx->jrdev);
  2315. }
  2316. priv = dev_get_drvdata(ctx->jrdev->parent);
  2317. if (priv->era >= 6 && uses_dkp)
  2318. ctx->dir = DMA_BIDIRECTIONAL;
  2319. else
  2320. ctx->dir = DMA_TO_DEVICE;
  2321. ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
  2322. ctx->dir);
  2323. if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
  2324. dev_err(ctx->jrdev, "unable to map key\n");
  2325. caam_jr_free(ctx->jrdev);
  2326. return -ENOMEM;
  2327. }
  2328. /* copy descriptor header template value */
  2329. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  2330. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  2331. ctx->qidev = priv->qidev;
  2332. spin_lock_init(&ctx->lock);
  2333. ctx->drv_ctx[ENCRYPT] = NULL;
  2334. ctx->drv_ctx[DECRYPT] = NULL;
  2335. ctx->drv_ctx[GIVENCRYPT] = NULL;
  2336. return 0;
  2337. }
  2338. static int caam_cra_init(struct crypto_tfm *tfm)
  2339. {
  2340. struct crypto_alg *alg = tfm->__crt_alg;
  2341. struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  2342. crypto_alg);
  2343. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  2344. return caam_init_common(ctx, &caam_alg->caam, false);
  2345. }
  2346. static int caam_aead_init(struct crypto_aead *tfm)
  2347. {
  2348. struct aead_alg *alg = crypto_aead_alg(tfm);
  2349. struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  2350. aead);
  2351. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  2352. return caam_init_common(ctx, &caam_alg->caam,
  2353. alg->setkey == aead_setkey);
  2354. }
  2355. static void caam_exit_common(struct caam_ctx *ctx)
  2356. {
  2357. caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
  2358. caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
  2359. caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
  2360. dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
  2361. caam_jr_free(ctx->jrdev);
  2362. }
  2363. static void caam_cra_exit(struct crypto_tfm *tfm)
  2364. {
  2365. caam_exit_common(crypto_tfm_ctx(tfm));
  2366. }
  2367. static void caam_aead_exit(struct crypto_aead *tfm)
  2368. {
  2369. caam_exit_common(crypto_aead_ctx(tfm));
  2370. }
  2371. static struct list_head alg_list;
  2372. static void __exit caam_qi_algapi_exit(void)
  2373. {
  2374. struct caam_crypto_alg *t_alg, *n;
  2375. int i;
  2376. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2377. struct caam_aead_alg *t_alg = driver_aeads + i;
  2378. if (t_alg->registered)
  2379. crypto_unregister_aead(&t_alg->aead);
  2380. }
  2381. if (!alg_list.next)
  2382. return;
  2383. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  2384. crypto_unregister_alg(&t_alg->crypto_alg);
  2385. list_del(&t_alg->entry);
  2386. kfree(t_alg);
  2387. }
  2388. }
  2389. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  2390. *template)
  2391. {
  2392. struct caam_crypto_alg *t_alg;
  2393. struct crypto_alg *alg;
  2394. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  2395. if (!t_alg)
  2396. return ERR_PTR(-ENOMEM);
  2397. alg = &t_alg->crypto_alg;
  2398. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  2399. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  2400. template->driver_name);
  2401. alg->cra_module = THIS_MODULE;
  2402. alg->cra_init = caam_cra_init;
  2403. alg->cra_exit = caam_cra_exit;
  2404. alg->cra_priority = CAAM_CRA_PRIORITY;
  2405. alg->cra_blocksize = template->blocksize;
  2406. alg->cra_alignmask = 0;
  2407. alg->cra_ctxsize = sizeof(struct caam_ctx);
  2408. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  2409. template->type;
  2410. switch (template->type) {
  2411. case CRYPTO_ALG_TYPE_GIVCIPHER:
  2412. alg->cra_type = &crypto_givcipher_type;
  2413. alg->cra_ablkcipher = template->template_ablkcipher;
  2414. break;
  2415. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  2416. alg->cra_type = &crypto_ablkcipher_type;
  2417. alg->cra_ablkcipher = template->template_ablkcipher;
  2418. break;
  2419. }
  2420. t_alg->caam.class1_alg_type = template->class1_alg_type;
  2421. t_alg->caam.class2_alg_type = template->class2_alg_type;
  2422. return t_alg;
  2423. }
  2424. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  2425. {
  2426. struct aead_alg *alg = &t_alg->aead;
  2427. alg->base.cra_module = THIS_MODULE;
  2428. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2429. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  2430. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  2431. alg->init = caam_aead_init;
  2432. alg->exit = caam_aead_exit;
  2433. }
  2434. static int __init caam_qi_algapi_init(void)
  2435. {
  2436. struct device_node *dev_node;
  2437. struct platform_device *pdev;
  2438. struct device *ctrldev;
  2439. struct caam_drv_private *priv;
  2440. int i = 0, err = 0;
  2441. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  2442. unsigned int md_limit = SHA512_DIGEST_SIZE;
  2443. bool registered = false;
  2444. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  2445. if (!dev_node) {
  2446. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  2447. if (!dev_node)
  2448. return -ENODEV;
  2449. }
  2450. pdev = of_find_device_by_node(dev_node);
  2451. of_node_put(dev_node);
  2452. if (!pdev)
  2453. return -ENODEV;
  2454. ctrldev = &pdev->dev;
  2455. priv = dev_get_drvdata(ctrldev);
  2456. /*
  2457. * If priv is NULL, it's probably because the caam driver wasn't
  2458. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  2459. */
  2460. if (!priv || !priv->qi_present)
  2461. return -ENODEV;
  2462. if (caam_dpaa2) {
  2463. dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
  2464. return -ENODEV;
  2465. }
  2466. INIT_LIST_HEAD(&alg_list);
  2467. /*
  2468. * Register crypto algorithms the device supports.
  2469. * First, detect presence and attributes of DES, AES, and MD blocks.
  2470. */
  2471. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  2472. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  2473. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  2474. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  2475. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  2476. /* If MD is present, limit digest size based on LP256 */
  2477. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  2478. md_limit = SHA256_DIGEST_SIZE;
  2479. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2480. struct caam_crypto_alg *t_alg;
  2481. struct caam_alg_template *alg = driver_algs + i;
  2482. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  2483. /* Skip DES algorithms if not supported by device */
  2484. if (!des_inst &&
  2485. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  2486. (alg_sel == OP_ALG_ALGSEL_DES)))
  2487. continue;
  2488. /* Skip AES algorithms if not supported by device */
  2489. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  2490. continue;
  2491. t_alg = caam_alg_alloc(alg);
  2492. if (IS_ERR(t_alg)) {
  2493. err = PTR_ERR(t_alg);
  2494. dev_warn(priv->qidev, "%s alg allocation failed\n",
  2495. alg->driver_name);
  2496. continue;
  2497. }
  2498. err = crypto_register_alg(&t_alg->crypto_alg);
  2499. if (err) {
  2500. dev_warn(priv->qidev, "%s alg registration failed\n",
  2501. t_alg->crypto_alg.cra_driver_name);
  2502. kfree(t_alg);
  2503. continue;
  2504. }
  2505. list_add_tail(&t_alg->entry, &alg_list);
  2506. registered = true;
  2507. }
  2508. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2509. struct caam_aead_alg *t_alg = driver_aeads + i;
  2510. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  2511. OP_ALG_ALGSEL_MASK;
  2512. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  2513. OP_ALG_ALGSEL_MASK;
  2514. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  2515. /* Skip DES algorithms if not supported by device */
  2516. if (!des_inst &&
  2517. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  2518. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  2519. continue;
  2520. /* Skip AES algorithms if not supported by device */
  2521. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  2522. continue;
  2523. /*
  2524. * Check support for AES algorithms not available
  2525. * on LP devices.
  2526. */
  2527. if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
  2528. (alg_aai == OP_ALG_AAI_GCM))
  2529. continue;
  2530. /*
  2531. * Skip algorithms requiring message digests
  2532. * if MD or MD size is not supported by device.
  2533. */
  2534. if (c2_alg_sel &&
  2535. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  2536. continue;
  2537. caam_aead_alg_init(t_alg);
  2538. err = crypto_register_aead(&t_alg->aead);
  2539. if (err) {
  2540. pr_warn("%s alg registration failed\n",
  2541. t_alg->aead.base.cra_driver_name);
  2542. continue;
  2543. }
  2544. t_alg->registered = true;
  2545. registered = true;
  2546. }
  2547. if (registered)
  2548. dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
  2549. return err;
  2550. }
  2551. module_init(caam_qi_algapi_init);
  2552. module_exit(caam_qi_algapi_exit);
  2553. MODULE_LICENSE("GPL");
  2554. MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
  2555. MODULE_AUTHOR("Freescale Semiconductor");