caamalg.c 94 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. * Copyright 2016 NXP
  6. *
  7. * Based on talitos crypto API driver.
  8. *
  9. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  10. *
  11. * --------------- ---------------
  12. * | JobDesc #1 |-------------------->| ShareDesc |
  13. * | *(packet 1) | | (PDB) |
  14. * --------------- |------------->| (hashKey) |
  15. * . | | (cipherKey) |
  16. * . | |-------->| (operation) |
  17. * --------------- | | ---------------
  18. * | JobDesc #2 |------| |
  19. * | *(packet 2) | |
  20. * --------------- |
  21. * . |
  22. * . |
  23. * --------------- |
  24. * | JobDesc #3 |------------
  25. * | *(packet 3) |
  26. * ---------------
  27. *
  28. * The SharedDesc never changes for a connection unless rekeyed, but
  29. * each packet will likely be in a different place. So all we need
  30. * to know to process the packet is where the input is, where the
  31. * output goes, and what context we want to process with. Context is
  32. * in the SharedDesc, packet references in the JobDesc.
  33. *
  34. * So, a job desc looks like:
  35. *
  36. * ---------------------
  37. * | Header |
  38. * | ShareDesc Pointer |
  39. * | SEQ_OUT_PTR |
  40. * | (output buffer) |
  41. * | (output length) |
  42. * | SEQ_IN_PTR |
  43. * | (input buffer) |
  44. * | (input length) |
  45. * ---------------------
  46. */
  47. #include "compat.h"
  48. #include "regs.h"
  49. #include "intern.h"
  50. #include "desc_constr.h"
  51. #include "jr.h"
  52. #include "error.h"
  53. #include "sg_sw_sec4.h"
  54. #include "key_gen.h"
  55. #include "caamalg_desc.h"
  56. /*
  57. * crypto alg
  58. */
  59. #define CAAM_CRA_PRIORITY 3000
  60. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  61. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  62. CTR_RFC3686_NONCE_SIZE + \
  63. SHA512_DIGEST_SIZE * 2)
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  68. CAAM_CMD_SZ * 5)
  69. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  70. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  71. #ifdef DEBUG
  72. /* for print_hex_dumps with line references */
  73. #define debug(format, arg...) printk(format, arg)
  74. #else
  75. #define debug(format, arg...)
  76. #endif
  77. static struct list_head alg_list;
  78. struct caam_alg_entry {
  79. int class1_alg_type;
  80. int class2_alg_type;
  81. bool rfc3686;
  82. bool geniv;
  83. };
  84. struct caam_aead_alg {
  85. struct aead_alg aead;
  86. struct caam_alg_entry caam;
  87. bool registered;
  88. };
  89. /*
  90. * per-session context
  91. */
  92. struct caam_ctx {
  93. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  94. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  95. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  96. u8 key[CAAM_MAX_KEY_SIZE];
  97. dma_addr_t sh_desc_enc_dma;
  98. dma_addr_t sh_desc_dec_dma;
  99. dma_addr_t sh_desc_givenc_dma;
  100. dma_addr_t key_dma;
  101. enum dma_data_direction dir;
  102. struct device *jrdev;
  103. struct alginfo adata;
  104. struct alginfo cdata;
  105. unsigned int authsize;
  106. };
  107. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  108. {
  109. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  110. struct device *jrdev = ctx->jrdev;
  111. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  112. u32 *desc;
  113. int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
  114. ctx->adata.keylen_pad;
  115. /*
  116. * Job Descriptor and Shared Descriptors
  117. * must all fit into the 64-word Descriptor h/w Buffer
  118. */
  119. if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
  120. ctx->adata.key_inline = true;
  121. ctx->adata.key_virt = ctx->key;
  122. } else {
  123. ctx->adata.key_inline = false;
  124. ctx->adata.key_dma = ctx->key_dma;
  125. }
  126. /* aead_encrypt shared descriptor */
  127. desc = ctx->sh_desc_enc;
  128. cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
  129. ctrlpriv->era);
  130. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  131. desc_bytes(desc), ctx->dir);
  132. /*
  133. * Job Descriptor and Shared Descriptors
  134. * must all fit into the 64-word Descriptor h/w Buffer
  135. */
  136. if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
  137. ctx->adata.key_inline = true;
  138. ctx->adata.key_virt = ctx->key;
  139. } else {
  140. ctx->adata.key_inline = false;
  141. ctx->adata.key_dma = ctx->key_dma;
  142. }
  143. /* aead_decrypt shared descriptor */
  144. desc = ctx->sh_desc_dec;
  145. cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
  146. ctrlpriv->era);
  147. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  148. desc_bytes(desc), ctx->dir);
  149. return 0;
  150. }
  151. static int aead_set_sh_desc(struct crypto_aead *aead)
  152. {
  153. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  154. struct caam_aead_alg, aead);
  155. unsigned int ivsize = crypto_aead_ivsize(aead);
  156. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  157. struct device *jrdev = ctx->jrdev;
  158. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  159. u32 ctx1_iv_off = 0;
  160. u32 *desc, *nonce = NULL;
  161. u32 inl_mask;
  162. unsigned int data_len[2];
  163. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  164. OP_ALG_AAI_CTR_MOD128);
  165. const bool is_rfc3686 = alg->caam.rfc3686;
  166. if (!ctx->authsize)
  167. return 0;
  168. /* NULL encryption / decryption */
  169. if (!ctx->cdata.keylen)
  170. return aead_null_set_sh_desc(aead);
  171. /*
  172. * AES-CTR needs to load IV in CONTEXT1 reg
  173. * at an offset of 128bits (16bytes)
  174. * CONTEXT1[255:128] = IV
  175. */
  176. if (ctr_mode)
  177. ctx1_iv_off = 16;
  178. /*
  179. * RFC3686 specific:
  180. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  181. */
  182. if (is_rfc3686) {
  183. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  184. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  185. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  186. }
  187. data_len[0] = ctx->adata.keylen_pad;
  188. data_len[1] = ctx->cdata.keylen;
  189. if (alg->caam.geniv)
  190. goto skip_enc;
  191. /*
  192. * Job Descriptor and Shared Descriptors
  193. * must all fit into the 64-word Descriptor h/w Buffer
  194. */
  195. if (desc_inline_query(DESC_AEAD_ENC_LEN +
  196. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  197. AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
  198. ARRAY_SIZE(data_len)) < 0)
  199. return -EINVAL;
  200. if (inl_mask & 1)
  201. ctx->adata.key_virt = ctx->key;
  202. else
  203. ctx->adata.key_dma = ctx->key_dma;
  204. if (inl_mask & 2)
  205. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  206. else
  207. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  208. ctx->adata.key_inline = !!(inl_mask & 1);
  209. ctx->cdata.key_inline = !!(inl_mask & 2);
  210. /* aead_encrypt shared descriptor */
  211. desc = ctx->sh_desc_enc;
  212. cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
  213. ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
  214. false, ctrlpriv->era);
  215. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  216. desc_bytes(desc), ctx->dir);
  217. skip_enc:
  218. /*
  219. * Job Descriptor and Shared Descriptors
  220. * must all fit into the 64-word Descriptor h/w Buffer
  221. */
  222. if (desc_inline_query(DESC_AEAD_DEC_LEN +
  223. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  224. AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
  225. ARRAY_SIZE(data_len)) < 0)
  226. return -EINVAL;
  227. if (inl_mask & 1)
  228. ctx->adata.key_virt = ctx->key;
  229. else
  230. ctx->adata.key_dma = ctx->key_dma;
  231. if (inl_mask & 2)
  232. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  233. else
  234. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  235. ctx->adata.key_inline = !!(inl_mask & 1);
  236. ctx->cdata.key_inline = !!(inl_mask & 2);
  237. /* aead_decrypt shared descriptor */
  238. desc = ctx->sh_desc_dec;
  239. cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
  240. ctx->authsize, alg->caam.geniv, is_rfc3686,
  241. nonce, ctx1_iv_off, false, ctrlpriv->era);
  242. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  243. desc_bytes(desc), ctx->dir);
  244. if (!alg->caam.geniv)
  245. goto skip_givenc;
  246. /*
  247. * Job Descriptor and Shared Descriptors
  248. * must all fit into the 64-word Descriptor h/w Buffer
  249. */
  250. if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
  251. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  252. AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
  253. ARRAY_SIZE(data_len)) < 0)
  254. return -EINVAL;
  255. if (inl_mask & 1)
  256. ctx->adata.key_virt = ctx->key;
  257. else
  258. ctx->adata.key_dma = ctx->key_dma;
  259. if (inl_mask & 2)
  260. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  261. else
  262. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  263. ctx->adata.key_inline = !!(inl_mask & 1);
  264. ctx->cdata.key_inline = !!(inl_mask & 2);
  265. /* aead_givencrypt shared descriptor */
  266. desc = ctx->sh_desc_enc;
  267. cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
  268. ctx->authsize, is_rfc3686, nonce,
  269. ctx1_iv_off, false, ctrlpriv->era);
  270. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  271. desc_bytes(desc), ctx->dir);
  272. skip_givenc:
  273. return 0;
  274. }
  275. static int aead_setauthsize(struct crypto_aead *authenc,
  276. unsigned int authsize)
  277. {
  278. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  279. ctx->authsize = authsize;
  280. aead_set_sh_desc(authenc);
  281. return 0;
  282. }
  283. static int gcm_set_sh_desc(struct crypto_aead *aead)
  284. {
  285. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  286. struct device *jrdev = ctx->jrdev;
  287. unsigned int ivsize = crypto_aead_ivsize(aead);
  288. u32 *desc;
  289. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  290. ctx->cdata.keylen;
  291. if (!ctx->cdata.keylen || !ctx->authsize)
  292. return 0;
  293. /*
  294. * AES GCM encrypt shared descriptor
  295. * Job Descriptor and Shared Descriptor
  296. * must fit into the 64-word Descriptor h/w Buffer
  297. */
  298. if (rem_bytes >= DESC_GCM_ENC_LEN) {
  299. ctx->cdata.key_inline = true;
  300. ctx->cdata.key_virt = ctx->key;
  301. } else {
  302. ctx->cdata.key_inline = false;
  303. ctx->cdata.key_dma = ctx->key_dma;
  304. }
  305. desc = ctx->sh_desc_enc;
  306. cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
  307. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  308. desc_bytes(desc), ctx->dir);
  309. /*
  310. * Job Descriptor and Shared Descriptors
  311. * must all fit into the 64-word Descriptor h/w Buffer
  312. */
  313. if (rem_bytes >= DESC_GCM_DEC_LEN) {
  314. ctx->cdata.key_inline = true;
  315. ctx->cdata.key_virt = ctx->key;
  316. } else {
  317. ctx->cdata.key_inline = false;
  318. ctx->cdata.key_dma = ctx->key_dma;
  319. }
  320. desc = ctx->sh_desc_dec;
  321. cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
  322. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  323. desc_bytes(desc), ctx->dir);
  324. return 0;
  325. }
  326. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  327. {
  328. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  329. ctx->authsize = authsize;
  330. gcm_set_sh_desc(authenc);
  331. return 0;
  332. }
  333. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  334. {
  335. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  336. struct device *jrdev = ctx->jrdev;
  337. unsigned int ivsize = crypto_aead_ivsize(aead);
  338. u32 *desc;
  339. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  340. ctx->cdata.keylen;
  341. if (!ctx->cdata.keylen || !ctx->authsize)
  342. return 0;
  343. /*
  344. * RFC4106 encrypt shared descriptor
  345. * Job Descriptor and Shared Descriptor
  346. * must fit into the 64-word Descriptor h/w Buffer
  347. */
  348. if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
  349. ctx->cdata.key_inline = true;
  350. ctx->cdata.key_virt = ctx->key;
  351. } else {
  352. ctx->cdata.key_inline = false;
  353. ctx->cdata.key_dma = ctx->key_dma;
  354. }
  355. desc = ctx->sh_desc_enc;
  356. cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  357. false);
  358. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  359. desc_bytes(desc), ctx->dir);
  360. /*
  361. * Job Descriptor and Shared Descriptors
  362. * must all fit into the 64-word Descriptor h/w Buffer
  363. */
  364. if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
  365. ctx->cdata.key_inline = true;
  366. ctx->cdata.key_virt = ctx->key;
  367. } else {
  368. ctx->cdata.key_inline = false;
  369. ctx->cdata.key_dma = ctx->key_dma;
  370. }
  371. desc = ctx->sh_desc_dec;
  372. cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  373. false);
  374. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  375. desc_bytes(desc), ctx->dir);
  376. return 0;
  377. }
  378. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  379. unsigned int authsize)
  380. {
  381. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  382. ctx->authsize = authsize;
  383. rfc4106_set_sh_desc(authenc);
  384. return 0;
  385. }
  386. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  387. {
  388. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  389. struct device *jrdev = ctx->jrdev;
  390. unsigned int ivsize = crypto_aead_ivsize(aead);
  391. u32 *desc;
  392. int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
  393. ctx->cdata.keylen;
  394. if (!ctx->cdata.keylen || !ctx->authsize)
  395. return 0;
  396. /*
  397. * RFC4543 encrypt shared descriptor
  398. * Job Descriptor and Shared Descriptor
  399. * must fit into the 64-word Descriptor h/w Buffer
  400. */
  401. if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
  402. ctx->cdata.key_inline = true;
  403. ctx->cdata.key_virt = ctx->key;
  404. } else {
  405. ctx->cdata.key_inline = false;
  406. ctx->cdata.key_dma = ctx->key_dma;
  407. }
  408. desc = ctx->sh_desc_enc;
  409. cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
  410. false);
  411. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  412. desc_bytes(desc), ctx->dir);
  413. /*
  414. * Job Descriptor and Shared Descriptors
  415. * must all fit into the 64-word Descriptor h/w Buffer
  416. */
  417. if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
  418. ctx->cdata.key_inline = true;
  419. ctx->cdata.key_virt = ctx->key;
  420. } else {
  421. ctx->cdata.key_inline = false;
  422. ctx->cdata.key_dma = ctx->key_dma;
  423. }
  424. desc = ctx->sh_desc_dec;
  425. cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
  426. false);
  427. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  428. desc_bytes(desc), ctx->dir);
  429. return 0;
  430. }
  431. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  432. unsigned int authsize)
  433. {
  434. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  435. ctx->authsize = authsize;
  436. rfc4543_set_sh_desc(authenc);
  437. return 0;
  438. }
  439. static int aead_setkey(struct crypto_aead *aead,
  440. const u8 *key, unsigned int keylen)
  441. {
  442. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  443. struct device *jrdev = ctx->jrdev;
  444. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  445. struct crypto_authenc_keys keys;
  446. int ret = 0;
  447. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  448. goto badkey;
  449. #ifdef DEBUG
  450. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  451. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  452. keys.authkeylen);
  453. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  454. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  455. #endif
  456. /*
  457. * If DKP is supported, use it in the shared descriptor to generate
  458. * the split key.
  459. */
  460. if (ctrlpriv->era >= 6) {
  461. ctx->adata.keylen = keys.authkeylen;
  462. ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  463. OP_ALG_ALGSEL_MASK);
  464. if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  465. goto badkey;
  466. memcpy(ctx->key, keys.authkey, keys.authkeylen);
  467. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  468. keys.enckeylen);
  469. dma_sync_single_for_device(jrdev, ctx->key_dma,
  470. ctx->adata.keylen_pad +
  471. keys.enckeylen, ctx->dir);
  472. goto skip_split_key;
  473. }
  474. ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
  475. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  476. keys.enckeylen);
  477. if (ret) {
  478. goto badkey;
  479. }
  480. /* postpend encryption key to auth split key */
  481. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  482. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  483. keys.enckeylen, ctx->dir);
  484. #ifdef DEBUG
  485. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  486. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  487. ctx->adata.keylen_pad + keys.enckeylen, 1);
  488. #endif
  489. skip_split_key:
  490. ctx->cdata.keylen = keys.enckeylen;
  491. memzero_explicit(&keys, sizeof(keys));
  492. return aead_set_sh_desc(aead);
  493. badkey:
  494. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  495. memzero_explicit(&keys, sizeof(keys));
  496. return -EINVAL;
  497. }
  498. static int gcm_setkey(struct crypto_aead *aead,
  499. const u8 *key, unsigned int keylen)
  500. {
  501. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  502. struct device *jrdev = ctx->jrdev;
  503. #ifdef DEBUG
  504. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  505. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  506. #endif
  507. memcpy(ctx->key, key, keylen);
  508. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
  509. ctx->cdata.keylen = keylen;
  510. return gcm_set_sh_desc(aead);
  511. }
  512. static int rfc4106_setkey(struct crypto_aead *aead,
  513. const u8 *key, unsigned int keylen)
  514. {
  515. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  516. struct device *jrdev = ctx->jrdev;
  517. if (keylen < 4)
  518. return -EINVAL;
  519. #ifdef DEBUG
  520. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  521. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  522. #endif
  523. memcpy(ctx->key, key, keylen);
  524. /*
  525. * The last four bytes of the key material are used as the salt value
  526. * in the nonce. Update the AES key length.
  527. */
  528. ctx->cdata.keylen = keylen - 4;
  529. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  530. ctx->dir);
  531. return rfc4106_set_sh_desc(aead);
  532. }
  533. static int rfc4543_setkey(struct crypto_aead *aead,
  534. const u8 *key, unsigned int keylen)
  535. {
  536. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  537. struct device *jrdev = ctx->jrdev;
  538. if (keylen < 4)
  539. return -EINVAL;
  540. #ifdef DEBUG
  541. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  542. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  543. #endif
  544. memcpy(ctx->key, key, keylen);
  545. /*
  546. * The last four bytes of the key material are used as the salt value
  547. * in the nonce. Update the AES key length.
  548. */
  549. ctx->cdata.keylen = keylen - 4;
  550. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
  551. ctx->dir);
  552. return rfc4543_set_sh_desc(aead);
  553. }
  554. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  555. const u8 *key, unsigned int keylen)
  556. {
  557. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  558. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  559. const char *alg_name = crypto_tfm_alg_name(tfm);
  560. struct device *jrdev = ctx->jrdev;
  561. unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  562. u32 *desc;
  563. u32 ctx1_iv_off = 0;
  564. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  565. OP_ALG_AAI_CTR_MOD128);
  566. const bool is_rfc3686 = (ctr_mode &&
  567. (strstr(alg_name, "rfc3686") != NULL));
  568. #ifdef DEBUG
  569. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  570. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  571. #endif
  572. /*
  573. * AES-CTR needs to load IV in CONTEXT1 reg
  574. * at an offset of 128bits (16bytes)
  575. * CONTEXT1[255:128] = IV
  576. */
  577. if (ctr_mode)
  578. ctx1_iv_off = 16;
  579. /*
  580. * RFC3686 specific:
  581. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  582. * | *key = {KEY, NONCE}
  583. */
  584. if (is_rfc3686) {
  585. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  586. keylen -= CTR_RFC3686_NONCE_SIZE;
  587. }
  588. ctx->cdata.keylen = keylen;
  589. ctx->cdata.key_virt = key;
  590. ctx->cdata.key_inline = true;
  591. /* ablkcipher_encrypt shared descriptor */
  592. desc = ctx->sh_desc_enc;
  593. cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
  594. ctx1_iv_off);
  595. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  596. desc_bytes(desc), ctx->dir);
  597. /* ablkcipher_decrypt shared descriptor */
  598. desc = ctx->sh_desc_dec;
  599. cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
  600. ctx1_iv_off);
  601. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  602. desc_bytes(desc), ctx->dir);
  603. /* ablkcipher_givencrypt shared descriptor */
  604. desc = ctx->sh_desc_givenc;
  605. cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
  606. ctx1_iv_off);
  607. dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
  608. desc_bytes(desc), ctx->dir);
  609. return 0;
  610. }
  611. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  612. const u8 *key, unsigned int keylen)
  613. {
  614. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  615. struct device *jrdev = ctx->jrdev;
  616. u32 *desc;
  617. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  618. crypto_ablkcipher_set_flags(ablkcipher,
  619. CRYPTO_TFM_RES_BAD_KEY_LEN);
  620. dev_err(jrdev, "key size mismatch\n");
  621. return -EINVAL;
  622. }
  623. ctx->cdata.keylen = keylen;
  624. ctx->cdata.key_virt = key;
  625. ctx->cdata.key_inline = true;
  626. /* xts_ablkcipher_encrypt shared descriptor */
  627. desc = ctx->sh_desc_enc;
  628. cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
  629. dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
  630. desc_bytes(desc), ctx->dir);
  631. /* xts_ablkcipher_decrypt shared descriptor */
  632. desc = ctx->sh_desc_dec;
  633. cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
  634. dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
  635. desc_bytes(desc), ctx->dir);
  636. return 0;
  637. }
  638. /*
  639. * aead_edesc - s/w-extended aead descriptor
  640. * @src_nents: number of segments in input s/w scatterlist
  641. * @dst_nents: number of segments in output s/w scatterlist
  642. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  643. * @sec4_sg_dma: bus physical mapped address of h/w link table
  644. * @sec4_sg: pointer to h/w link table
  645. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  646. */
  647. struct aead_edesc {
  648. int src_nents;
  649. int dst_nents;
  650. int sec4_sg_bytes;
  651. dma_addr_t sec4_sg_dma;
  652. struct sec4_sg_entry *sec4_sg;
  653. u32 hw_desc[];
  654. };
  655. /*
  656. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  657. * @src_nents: number of segments in input s/w scatterlist
  658. * @dst_nents: number of segments in output s/w scatterlist
  659. * @iv_dma: dma address of iv for checking continuity and link table
  660. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  661. * @sec4_sg_dma: bus physical mapped address of h/w link table
  662. * @sec4_sg: pointer to h/w link table
  663. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  664. */
  665. struct ablkcipher_edesc {
  666. int src_nents;
  667. int dst_nents;
  668. dma_addr_t iv_dma;
  669. int sec4_sg_bytes;
  670. dma_addr_t sec4_sg_dma;
  671. struct sec4_sg_entry *sec4_sg;
  672. u32 hw_desc[0];
  673. };
  674. static void caam_unmap(struct device *dev, struct scatterlist *src,
  675. struct scatterlist *dst, int src_nents,
  676. int dst_nents,
  677. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  678. int sec4_sg_bytes)
  679. {
  680. if (dst != src) {
  681. if (src_nents)
  682. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  683. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  684. } else {
  685. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  686. }
  687. if (iv_dma)
  688. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  689. if (sec4_sg_bytes)
  690. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  691. DMA_TO_DEVICE);
  692. }
  693. static void aead_unmap(struct device *dev,
  694. struct aead_edesc *edesc,
  695. struct aead_request *req)
  696. {
  697. caam_unmap(dev, req->src, req->dst,
  698. edesc->src_nents, edesc->dst_nents, 0, 0,
  699. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  700. }
  701. static void ablkcipher_unmap(struct device *dev,
  702. struct ablkcipher_edesc *edesc,
  703. struct ablkcipher_request *req)
  704. {
  705. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  706. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  707. caam_unmap(dev, req->src, req->dst,
  708. edesc->src_nents, edesc->dst_nents,
  709. edesc->iv_dma, ivsize,
  710. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  711. }
  712. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  713. void *context)
  714. {
  715. struct aead_request *req = context;
  716. struct aead_edesc *edesc;
  717. #ifdef DEBUG
  718. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  719. #endif
  720. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  721. if (err)
  722. caam_jr_strstatus(jrdev, err);
  723. aead_unmap(jrdev, edesc, req);
  724. kfree(edesc);
  725. aead_request_complete(req, err);
  726. }
  727. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  728. void *context)
  729. {
  730. struct aead_request *req = context;
  731. struct aead_edesc *edesc;
  732. #ifdef DEBUG
  733. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  734. #endif
  735. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  736. if (err)
  737. caam_jr_strstatus(jrdev, err);
  738. aead_unmap(jrdev, edesc, req);
  739. /*
  740. * verify hw auth check passed else return -EBADMSG
  741. */
  742. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  743. err = -EBADMSG;
  744. kfree(edesc);
  745. aead_request_complete(req, err);
  746. }
  747. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  748. void *context)
  749. {
  750. struct ablkcipher_request *req = context;
  751. struct ablkcipher_edesc *edesc;
  752. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  753. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  754. #ifdef DEBUG
  755. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  756. #endif
  757. edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
  758. if (err)
  759. caam_jr_strstatus(jrdev, err);
  760. #ifdef DEBUG
  761. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  762. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  763. edesc->src_nents > 1 ? 100 : ivsize, 1);
  764. #endif
  765. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  766. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  767. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  768. ablkcipher_unmap(jrdev, edesc, req);
  769. /*
  770. * The crypto API expects us to set the IV (req->info) to the last
  771. * ciphertext block. This is used e.g. by the CTS mode.
  772. */
  773. scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
  774. ivsize, 0);
  775. kfree(edesc);
  776. ablkcipher_request_complete(req, err);
  777. }
  778. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  779. void *context)
  780. {
  781. struct ablkcipher_request *req = context;
  782. struct ablkcipher_edesc *edesc;
  783. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  784. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  785. #ifdef DEBUG
  786. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  787. #endif
  788. edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
  789. if (err)
  790. caam_jr_strstatus(jrdev, err);
  791. #ifdef DEBUG
  792. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  793. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  794. ivsize, 1);
  795. #endif
  796. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  797. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  798. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  799. ablkcipher_unmap(jrdev, edesc, req);
  800. /*
  801. * The crypto API expects us to set the IV (req->info) to the last
  802. * ciphertext block.
  803. */
  804. scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
  805. ivsize, 0);
  806. kfree(edesc);
  807. ablkcipher_request_complete(req, err);
  808. }
  809. /*
  810. * Fill in aead job descriptor
  811. */
  812. static void init_aead_job(struct aead_request *req,
  813. struct aead_edesc *edesc,
  814. bool all_contig, bool encrypt)
  815. {
  816. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  817. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  818. int authsize = ctx->authsize;
  819. u32 *desc = edesc->hw_desc;
  820. u32 out_options, in_options;
  821. dma_addr_t dst_dma, src_dma;
  822. int len, sec4_sg_index = 0;
  823. dma_addr_t ptr;
  824. u32 *sh_desc;
  825. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  826. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  827. len = desc_len(sh_desc);
  828. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  829. if (all_contig) {
  830. src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
  831. in_options = 0;
  832. } else {
  833. src_dma = edesc->sec4_sg_dma;
  834. sec4_sg_index += edesc->src_nents;
  835. in_options = LDST_SGF;
  836. }
  837. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  838. in_options);
  839. dst_dma = src_dma;
  840. out_options = in_options;
  841. if (unlikely(req->src != req->dst)) {
  842. if (edesc->dst_nents == 1) {
  843. dst_dma = sg_dma_address(req->dst);
  844. } else {
  845. dst_dma = edesc->sec4_sg_dma +
  846. sec4_sg_index *
  847. sizeof(struct sec4_sg_entry);
  848. out_options = LDST_SGF;
  849. }
  850. }
  851. if (encrypt)
  852. append_seq_out_ptr(desc, dst_dma,
  853. req->assoclen + req->cryptlen + authsize,
  854. out_options);
  855. else
  856. append_seq_out_ptr(desc, dst_dma,
  857. req->assoclen + req->cryptlen - authsize,
  858. out_options);
  859. }
  860. static void init_gcm_job(struct aead_request *req,
  861. struct aead_edesc *edesc,
  862. bool all_contig, bool encrypt)
  863. {
  864. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  865. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  866. unsigned int ivsize = crypto_aead_ivsize(aead);
  867. u32 *desc = edesc->hw_desc;
  868. bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
  869. unsigned int last;
  870. init_aead_job(req, edesc, all_contig, encrypt);
  871. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  872. /* BUG This should not be specific to generic GCM. */
  873. last = 0;
  874. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  875. last = FIFOLD_TYPE_LAST1;
  876. /* Read GCM IV */
  877. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  878. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
  879. /* Append Salt */
  880. if (!generic_gcm)
  881. append_data(desc, ctx->key + ctx->cdata.keylen, 4);
  882. /* Append IV */
  883. append_data(desc, req->iv, ivsize);
  884. /* End of blank commands */
  885. }
  886. static void init_authenc_job(struct aead_request *req,
  887. struct aead_edesc *edesc,
  888. bool all_contig, bool encrypt)
  889. {
  890. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  891. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  892. struct caam_aead_alg, aead);
  893. unsigned int ivsize = crypto_aead_ivsize(aead);
  894. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  895. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  896. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  897. OP_ALG_AAI_CTR_MOD128);
  898. const bool is_rfc3686 = alg->caam.rfc3686;
  899. u32 *desc = edesc->hw_desc;
  900. u32 ivoffset = 0;
  901. /*
  902. * AES-CTR needs to load IV in CONTEXT1 reg
  903. * at an offset of 128bits (16bytes)
  904. * CONTEXT1[255:128] = IV
  905. */
  906. if (ctr_mode)
  907. ivoffset = 16;
  908. /*
  909. * RFC3686 specific:
  910. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  911. */
  912. if (is_rfc3686)
  913. ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
  914. init_aead_job(req, edesc, all_contig, encrypt);
  915. /*
  916. * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
  917. * having DPOVRD as destination.
  918. */
  919. if (ctrlpriv->era < 3)
  920. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  921. else
  922. append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
  923. if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
  924. append_load_as_imm(desc, req->iv, ivsize,
  925. LDST_CLASS_1_CCB |
  926. LDST_SRCDST_BYTE_CONTEXT |
  927. (ivoffset << LDST_OFFSET_SHIFT));
  928. }
  929. /*
  930. * Fill in ablkcipher job descriptor
  931. */
  932. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  933. struct ablkcipher_edesc *edesc,
  934. struct ablkcipher_request *req,
  935. bool iv_contig)
  936. {
  937. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  938. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  939. u32 *desc = edesc->hw_desc;
  940. u32 out_options = 0, in_options;
  941. dma_addr_t dst_dma, src_dma;
  942. int len, sec4_sg_index = 0;
  943. #ifdef DEBUG
  944. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  945. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  946. ivsize, 1);
  947. pr_err("asked=%d, nbytes%d\n",
  948. (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
  949. #endif
  950. caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
  951. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  952. edesc->src_nents > 1 ? 100 : req->nbytes, 1);
  953. len = desc_len(sh_desc);
  954. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  955. if (iv_contig) {
  956. src_dma = edesc->iv_dma;
  957. in_options = 0;
  958. } else {
  959. src_dma = edesc->sec4_sg_dma;
  960. sec4_sg_index += edesc->src_nents + 1;
  961. in_options = LDST_SGF;
  962. }
  963. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  964. if (likely(req->src == req->dst)) {
  965. if (edesc->src_nents == 1 && iv_contig) {
  966. dst_dma = sg_dma_address(req->src);
  967. } else {
  968. dst_dma = edesc->sec4_sg_dma +
  969. sizeof(struct sec4_sg_entry);
  970. out_options = LDST_SGF;
  971. }
  972. } else {
  973. if (edesc->dst_nents == 1) {
  974. dst_dma = sg_dma_address(req->dst);
  975. } else {
  976. dst_dma = edesc->sec4_sg_dma +
  977. sec4_sg_index * sizeof(struct sec4_sg_entry);
  978. out_options = LDST_SGF;
  979. }
  980. }
  981. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  982. }
  983. /*
  984. * Fill in ablkcipher givencrypt job descriptor
  985. */
  986. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  987. struct ablkcipher_edesc *edesc,
  988. struct ablkcipher_request *req,
  989. bool iv_contig)
  990. {
  991. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  992. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  993. u32 *desc = edesc->hw_desc;
  994. u32 out_options, in_options;
  995. dma_addr_t dst_dma, src_dma;
  996. int len, sec4_sg_index = 0;
  997. #ifdef DEBUG
  998. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  999. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1000. ivsize, 1);
  1001. #endif
  1002. caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1003. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  1004. edesc->src_nents > 1 ? 100 : req->nbytes, 1);
  1005. len = desc_len(sh_desc);
  1006. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1007. if (edesc->src_nents == 1) {
  1008. src_dma = sg_dma_address(req->src);
  1009. in_options = 0;
  1010. } else {
  1011. src_dma = edesc->sec4_sg_dma;
  1012. sec4_sg_index += edesc->src_nents;
  1013. in_options = LDST_SGF;
  1014. }
  1015. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1016. if (iv_contig) {
  1017. dst_dma = edesc->iv_dma;
  1018. out_options = 0;
  1019. } else {
  1020. dst_dma = edesc->sec4_sg_dma +
  1021. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1022. out_options = LDST_SGF;
  1023. }
  1024. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
  1025. }
  1026. /*
  1027. * allocate and map the aead extended descriptor
  1028. */
  1029. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1030. int desc_bytes, bool *all_contig_ptr,
  1031. bool encrypt)
  1032. {
  1033. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1034. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1035. struct device *jrdev = ctx->jrdev;
  1036. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1037. GFP_KERNEL : GFP_ATOMIC;
  1038. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  1039. struct aead_edesc *edesc;
  1040. int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
  1041. unsigned int authsize = ctx->authsize;
  1042. if (unlikely(req->dst != req->src)) {
  1043. src_nents = sg_nents_for_len(req->src, req->assoclen +
  1044. req->cryptlen);
  1045. if (unlikely(src_nents < 0)) {
  1046. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1047. req->assoclen + req->cryptlen);
  1048. return ERR_PTR(src_nents);
  1049. }
  1050. dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  1051. req->cryptlen +
  1052. (encrypt ? authsize :
  1053. (-authsize)));
  1054. if (unlikely(dst_nents < 0)) {
  1055. dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
  1056. req->assoclen + req->cryptlen +
  1057. (encrypt ? authsize : (-authsize)));
  1058. return ERR_PTR(dst_nents);
  1059. }
  1060. } else {
  1061. src_nents = sg_nents_for_len(req->src, req->assoclen +
  1062. req->cryptlen +
  1063. (encrypt ? authsize : 0));
  1064. if (unlikely(src_nents < 0)) {
  1065. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1066. req->assoclen + req->cryptlen +
  1067. (encrypt ? authsize : 0));
  1068. return ERR_PTR(src_nents);
  1069. }
  1070. }
  1071. if (likely(req->src == req->dst)) {
  1072. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1073. DMA_BIDIRECTIONAL);
  1074. if (unlikely(!mapped_src_nents)) {
  1075. dev_err(jrdev, "unable to map source\n");
  1076. return ERR_PTR(-ENOMEM);
  1077. }
  1078. } else {
  1079. /* Cover also the case of null (zero length) input data */
  1080. if (src_nents) {
  1081. mapped_src_nents = dma_map_sg(jrdev, req->src,
  1082. src_nents, DMA_TO_DEVICE);
  1083. if (unlikely(!mapped_src_nents)) {
  1084. dev_err(jrdev, "unable to map source\n");
  1085. return ERR_PTR(-ENOMEM);
  1086. }
  1087. } else {
  1088. mapped_src_nents = 0;
  1089. }
  1090. mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
  1091. DMA_FROM_DEVICE);
  1092. if (unlikely(!mapped_dst_nents)) {
  1093. dev_err(jrdev, "unable to map destination\n");
  1094. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1095. return ERR_PTR(-ENOMEM);
  1096. }
  1097. }
  1098. sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
  1099. sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  1100. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1101. /* allocate space for base edesc and hw desc commands, link tables */
  1102. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1103. GFP_DMA | flags);
  1104. if (!edesc) {
  1105. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1106. 0, 0, 0);
  1107. return ERR_PTR(-ENOMEM);
  1108. }
  1109. edesc->src_nents = src_nents;
  1110. edesc->dst_nents = dst_nents;
  1111. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1112. desc_bytes;
  1113. *all_contig_ptr = !(mapped_src_nents > 1);
  1114. sec4_sg_index = 0;
  1115. if (mapped_src_nents > 1) {
  1116. sg_to_sec4_sg_last(req->src, mapped_src_nents,
  1117. edesc->sec4_sg + sec4_sg_index, 0);
  1118. sec4_sg_index += mapped_src_nents;
  1119. }
  1120. if (mapped_dst_nents > 1) {
  1121. sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
  1122. edesc->sec4_sg + sec4_sg_index, 0);
  1123. }
  1124. if (!sec4_sg_bytes)
  1125. return edesc;
  1126. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1127. sec4_sg_bytes, DMA_TO_DEVICE);
  1128. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1129. dev_err(jrdev, "unable to map S/G table\n");
  1130. aead_unmap(jrdev, edesc, req);
  1131. kfree(edesc);
  1132. return ERR_PTR(-ENOMEM);
  1133. }
  1134. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1135. return edesc;
  1136. }
  1137. static int gcm_encrypt(struct aead_request *req)
  1138. {
  1139. struct aead_edesc *edesc;
  1140. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1141. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1142. struct device *jrdev = ctx->jrdev;
  1143. bool all_contig;
  1144. u32 *desc;
  1145. int ret = 0;
  1146. /* allocate extended descriptor */
  1147. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  1148. if (IS_ERR(edesc))
  1149. return PTR_ERR(edesc);
  1150. /* Create and submit job descriptor */
  1151. init_gcm_job(req, edesc, all_contig, true);
  1152. #ifdef DEBUG
  1153. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1154. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1155. desc_bytes(edesc->hw_desc), 1);
  1156. #endif
  1157. desc = edesc->hw_desc;
  1158. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1159. if (!ret) {
  1160. ret = -EINPROGRESS;
  1161. } else {
  1162. aead_unmap(jrdev, edesc, req);
  1163. kfree(edesc);
  1164. }
  1165. return ret;
  1166. }
  1167. static int ipsec_gcm_encrypt(struct aead_request *req)
  1168. {
  1169. if (req->assoclen < 8)
  1170. return -EINVAL;
  1171. return gcm_encrypt(req);
  1172. }
  1173. static int aead_encrypt(struct aead_request *req)
  1174. {
  1175. struct aead_edesc *edesc;
  1176. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1177. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1178. struct device *jrdev = ctx->jrdev;
  1179. bool all_contig;
  1180. u32 *desc;
  1181. int ret = 0;
  1182. /* allocate extended descriptor */
  1183. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  1184. &all_contig, true);
  1185. if (IS_ERR(edesc))
  1186. return PTR_ERR(edesc);
  1187. /* Create and submit job descriptor */
  1188. init_authenc_job(req, edesc, all_contig, true);
  1189. #ifdef DEBUG
  1190. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1191. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1192. desc_bytes(edesc->hw_desc), 1);
  1193. #endif
  1194. desc = edesc->hw_desc;
  1195. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1196. if (!ret) {
  1197. ret = -EINPROGRESS;
  1198. } else {
  1199. aead_unmap(jrdev, edesc, req);
  1200. kfree(edesc);
  1201. }
  1202. return ret;
  1203. }
  1204. static int gcm_decrypt(struct aead_request *req)
  1205. {
  1206. struct aead_edesc *edesc;
  1207. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1208. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1209. struct device *jrdev = ctx->jrdev;
  1210. bool all_contig;
  1211. u32 *desc;
  1212. int ret = 0;
  1213. /* allocate extended descriptor */
  1214. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  1215. if (IS_ERR(edesc))
  1216. return PTR_ERR(edesc);
  1217. /* Create and submit job descriptor*/
  1218. init_gcm_job(req, edesc, all_contig, false);
  1219. #ifdef DEBUG
  1220. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1221. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1222. desc_bytes(edesc->hw_desc), 1);
  1223. #endif
  1224. desc = edesc->hw_desc;
  1225. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1226. if (!ret) {
  1227. ret = -EINPROGRESS;
  1228. } else {
  1229. aead_unmap(jrdev, edesc, req);
  1230. kfree(edesc);
  1231. }
  1232. return ret;
  1233. }
  1234. static int ipsec_gcm_decrypt(struct aead_request *req)
  1235. {
  1236. if (req->assoclen < 8)
  1237. return -EINVAL;
  1238. return gcm_decrypt(req);
  1239. }
  1240. static int aead_decrypt(struct aead_request *req)
  1241. {
  1242. struct aead_edesc *edesc;
  1243. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1244. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1245. struct device *jrdev = ctx->jrdev;
  1246. bool all_contig;
  1247. u32 *desc;
  1248. int ret = 0;
  1249. caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
  1250. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  1251. req->assoclen + req->cryptlen, 1);
  1252. /* allocate extended descriptor */
  1253. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  1254. &all_contig, false);
  1255. if (IS_ERR(edesc))
  1256. return PTR_ERR(edesc);
  1257. /* Create and submit job descriptor*/
  1258. init_authenc_job(req, edesc, all_contig, false);
  1259. #ifdef DEBUG
  1260. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1261. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1262. desc_bytes(edesc->hw_desc), 1);
  1263. #endif
  1264. desc = edesc->hw_desc;
  1265. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1266. if (!ret) {
  1267. ret = -EINPROGRESS;
  1268. } else {
  1269. aead_unmap(jrdev, edesc, req);
  1270. kfree(edesc);
  1271. }
  1272. return ret;
  1273. }
  1274. /*
  1275. * allocate and map the ablkcipher extended descriptor for ablkcipher
  1276. */
  1277. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  1278. *req, int desc_bytes,
  1279. bool *iv_contig_out)
  1280. {
  1281. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1282. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1283. struct device *jrdev = ctx->jrdev;
  1284. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1285. GFP_KERNEL : GFP_ATOMIC;
  1286. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  1287. struct ablkcipher_edesc *edesc;
  1288. dma_addr_t iv_dma = 0;
  1289. bool in_contig;
  1290. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1291. int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
  1292. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1293. if (unlikely(src_nents < 0)) {
  1294. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1295. req->nbytes);
  1296. return ERR_PTR(src_nents);
  1297. }
  1298. if (req->dst != req->src) {
  1299. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  1300. if (unlikely(dst_nents < 0)) {
  1301. dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
  1302. req->nbytes);
  1303. return ERR_PTR(dst_nents);
  1304. }
  1305. }
  1306. if (likely(req->src == req->dst)) {
  1307. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1308. DMA_BIDIRECTIONAL);
  1309. if (unlikely(!mapped_src_nents)) {
  1310. dev_err(jrdev, "unable to map source\n");
  1311. return ERR_PTR(-ENOMEM);
  1312. }
  1313. } else {
  1314. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1315. DMA_TO_DEVICE);
  1316. if (unlikely(!mapped_src_nents)) {
  1317. dev_err(jrdev, "unable to map source\n");
  1318. return ERR_PTR(-ENOMEM);
  1319. }
  1320. mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
  1321. DMA_FROM_DEVICE);
  1322. if (unlikely(!mapped_dst_nents)) {
  1323. dev_err(jrdev, "unable to map destination\n");
  1324. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1325. return ERR_PTR(-ENOMEM);
  1326. }
  1327. }
  1328. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  1329. if (dma_mapping_error(jrdev, iv_dma)) {
  1330. dev_err(jrdev, "unable to map IV\n");
  1331. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1332. 0, 0, 0);
  1333. return ERR_PTR(-ENOMEM);
  1334. }
  1335. if (mapped_src_nents == 1 &&
  1336. iv_dma + ivsize == sg_dma_address(req->src)) {
  1337. in_contig = true;
  1338. sec4_sg_ents = 0;
  1339. } else {
  1340. in_contig = false;
  1341. sec4_sg_ents = 1 + mapped_src_nents;
  1342. }
  1343. dst_sg_idx = sec4_sg_ents;
  1344. sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  1345. sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
  1346. /* allocate space for base edesc and hw desc commands, link tables */
  1347. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1348. GFP_DMA | flags);
  1349. if (!edesc) {
  1350. dev_err(jrdev, "could not allocate extended descriptor\n");
  1351. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
  1352. iv_dma, ivsize, 0, 0);
  1353. return ERR_PTR(-ENOMEM);
  1354. }
  1355. edesc->src_nents = src_nents;
  1356. edesc->dst_nents = dst_nents;
  1357. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1358. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  1359. desc_bytes;
  1360. if (!in_contig) {
  1361. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  1362. sg_to_sec4_sg_last(req->src, mapped_src_nents,
  1363. edesc->sec4_sg + 1, 0);
  1364. }
  1365. if (mapped_dst_nents > 1) {
  1366. sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
  1367. edesc->sec4_sg + dst_sg_idx, 0);
  1368. }
  1369. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1370. sec4_sg_bytes, DMA_TO_DEVICE);
  1371. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1372. dev_err(jrdev, "unable to map S/G table\n");
  1373. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
  1374. iv_dma, ivsize, 0, 0);
  1375. kfree(edesc);
  1376. return ERR_PTR(-ENOMEM);
  1377. }
  1378. edesc->iv_dma = iv_dma;
  1379. #ifdef DEBUG
  1380. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  1381. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  1382. sec4_sg_bytes, 1);
  1383. #endif
  1384. *iv_contig_out = in_contig;
  1385. return edesc;
  1386. }
  1387. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  1388. {
  1389. struct ablkcipher_edesc *edesc;
  1390. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1391. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1392. struct device *jrdev = ctx->jrdev;
  1393. bool iv_contig;
  1394. u32 *desc;
  1395. int ret = 0;
  1396. /* allocate extended descriptor */
  1397. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  1398. CAAM_CMD_SZ, &iv_contig);
  1399. if (IS_ERR(edesc))
  1400. return PTR_ERR(edesc);
  1401. /* Create and submit job descriptor*/
  1402. init_ablkcipher_job(ctx->sh_desc_enc,
  1403. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  1404. #ifdef DEBUG
  1405. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1406. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1407. desc_bytes(edesc->hw_desc), 1);
  1408. #endif
  1409. desc = edesc->hw_desc;
  1410. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  1411. if (!ret) {
  1412. ret = -EINPROGRESS;
  1413. } else {
  1414. ablkcipher_unmap(jrdev, edesc, req);
  1415. kfree(edesc);
  1416. }
  1417. return ret;
  1418. }
  1419. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  1420. {
  1421. struct ablkcipher_edesc *edesc;
  1422. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1423. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1424. struct device *jrdev = ctx->jrdev;
  1425. bool iv_contig;
  1426. u32 *desc;
  1427. int ret = 0;
  1428. /* allocate extended descriptor */
  1429. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  1430. CAAM_CMD_SZ, &iv_contig);
  1431. if (IS_ERR(edesc))
  1432. return PTR_ERR(edesc);
  1433. /* Create and submit job descriptor*/
  1434. init_ablkcipher_job(ctx->sh_desc_dec,
  1435. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  1436. desc = edesc->hw_desc;
  1437. #ifdef DEBUG
  1438. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  1439. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1440. desc_bytes(edesc->hw_desc), 1);
  1441. #endif
  1442. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  1443. if (!ret) {
  1444. ret = -EINPROGRESS;
  1445. } else {
  1446. ablkcipher_unmap(jrdev, edesc, req);
  1447. kfree(edesc);
  1448. }
  1449. return ret;
  1450. }
  1451. /*
  1452. * allocate and map the ablkcipher extended descriptor
  1453. * for ablkcipher givencrypt
  1454. */
  1455. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  1456. struct skcipher_givcrypt_request *greq,
  1457. int desc_bytes,
  1458. bool *iv_contig_out)
  1459. {
  1460. struct ablkcipher_request *req = &greq->creq;
  1461. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1462. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1463. struct device *jrdev = ctx->jrdev;
  1464. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1465. GFP_KERNEL : GFP_ATOMIC;
  1466. int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
  1467. struct ablkcipher_edesc *edesc;
  1468. dma_addr_t iv_dma = 0;
  1469. bool out_contig;
  1470. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1471. int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
  1472. src_nents = sg_nents_for_len(req->src, req->nbytes);
  1473. if (unlikely(src_nents < 0)) {
  1474. dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
  1475. req->nbytes);
  1476. return ERR_PTR(src_nents);
  1477. }
  1478. if (likely(req->src == req->dst)) {
  1479. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1480. DMA_BIDIRECTIONAL);
  1481. if (unlikely(!mapped_src_nents)) {
  1482. dev_err(jrdev, "unable to map source\n");
  1483. return ERR_PTR(-ENOMEM);
  1484. }
  1485. dst_nents = src_nents;
  1486. mapped_dst_nents = src_nents;
  1487. } else {
  1488. mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
  1489. DMA_TO_DEVICE);
  1490. if (unlikely(!mapped_src_nents)) {
  1491. dev_err(jrdev, "unable to map source\n");
  1492. return ERR_PTR(-ENOMEM);
  1493. }
  1494. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  1495. if (unlikely(dst_nents < 0)) {
  1496. dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
  1497. req->nbytes);
  1498. return ERR_PTR(dst_nents);
  1499. }
  1500. mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
  1501. DMA_FROM_DEVICE);
  1502. if (unlikely(!mapped_dst_nents)) {
  1503. dev_err(jrdev, "unable to map destination\n");
  1504. dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
  1505. return ERR_PTR(-ENOMEM);
  1506. }
  1507. }
  1508. /*
  1509. * Check if iv can be contiguous with source and destination.
  1510. * If so, include it. If not, create scatterlist.
  1511. */
  1512. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  1513. if (dma_mapping_error(jrdev, iv_dma)) {
  1514. dev_err(jrdev, "unable to map IV\n");
  1515. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
  1516. 0, 0, 0);
  1517. return ERR_PTR(-ENOMEM);
  1518. }
  1519. sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
  1520. dst_sg_idx = sec4_sg_ents;
  1521. if (mapped_dst_nents == 1 &&
  1522. iv_dma + ivsize == sg_dma_address(req->dst)) {
  1523. out_contig = true;
  1524. } else {
  1525. out_contig = false;
  1526. sec4_sg_ents += 1 + mapped_dst_nents;
  1527. }
  1528. /* allocate space for base edesc and hw desc commands, link tables */
  1529. sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
  1530. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1531. GFP_DMA | flags);
  1532. if (!edesc) {
  1533. dev_err(jrdev, "could not allocate extended descriptor\n");
  1534. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
  1535. iv_dma, ivsize, 0, 0);
  1536. return ERR_PTR(-ENOMEM);
  1537. }
  1538. edesc->src_nents = src_nents;
  1539. edesc->dst_nents = dst_nents;
  1540. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1541. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  1542. desc_bytes;
  1543. if (mapped_src_nents > 1)
  1544. sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
  1545. 0);
  1546. if (!out_contig) {
  1547. dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
  1548. iv_dma, ivsize, 0);
  1549. sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
  1550. edesc->sec4_sg + dst_sg_idx + 1, 0);
  1551. }
  1552. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1553. sec4_sg_bytes, DMA_TO_DEVICE);
  1554. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1555. dev_err(jrdev, "unable to map S/G table\n");
  1556. caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
  1557. iv_dma, ivsize, 0, 0);
  1558. kfree(edesc);
  1559. return ERR_PTR(-ENOMEM);
  1560. }
  1561. edesc->iv_dma = iv_dma;
  1562. #ifdef DEBUG
  1563. print_hex_dump(KERN_ERR,
  1564. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  1565. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  1566. sec4_sg_bytes, 1);
  1567. #endif
  1568. *iv_contig_out = out_contig;
  1569. return edesc;
  1570. }
  1571. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  1572. {
  1573. struct ablkcipher_request *req = &creq->creq;
  1574. struct ablkcipher_edesc *edesc;
  1575. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1576. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1577. struct device *jrdev = ctx->jrdev;
  1578. bool iv_contig = false;
  1579. u32 *desc;
  1580. int ret = 0;
  1581. /* allocate extended descriptor */
  1582. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
  1583. CAAM_CMD_SZ, &iv_contig);
  1584. if (IS_ERR(edesc))
  1585. return PTR_ERR(edesc);
  1586. /* Create and submit job descriptor*/
  1587. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  1588. edesc, req, iv_contig);
  1589. #ifdef DEBUG
  1590. print_hex_dump(KERN_ERR,
  1591. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  1592. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1593. desc_bytes(edesc->hw_desc), 1);
  1594. #endif
  1595. desc = edesc->hw_desc;
  1596. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  1597. if (!ret) {
  1598. ret = -EINPROGRESS;
  1599. } else {
  1600. ablkcipher_unmap(jrdev, edesc, req);
  1601. kfree(edesc);
  1602. }
  1603. return ret;
  1604. }
  1605. #define template_aead template_u.aead
  1606. #define template_ablkcipher template_u.ablkcipher
  1607. struct caam_alg_template {
  1608. char name[CRYPTO_MAX_ALG_NAME];
  1609. char driver_name[CRYPTO_MAX_ALG_NAME];
  1610. unsigned int blocksize;
  1611. u32 type;
  1612. union {
  1613. struct ablkcipher_alg ablkcipher;
  1614. } template_u;
  1615. u32 class1_alg_type;
  1616. u32 class2_alg_type;
  1617. };
  1618. static struct caam_alg_template driver_algs[] = {
  1619. /* ablkcipher descriptor */
  1620. {
  1621. .name = "cbc(aes)",
  1622. .driver_name = "cbc-aes-caam",
  1623. .blocksize = AES_BLOCK_SIZE,
  1624. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1625. .template_ablkcipher = {
  1626. .setkey = ablkcipher_setkey,
  1627. .encrypt = ablkcipher_encrypt,
  1628. .decrypt = ablkcipher_decrypt,
  1629. .givencrypt = ablkcipher_givencrypt,
  1630. .geniv = "<built-in>",
  1631. .min_keysize = AES_MIN_KEY_SIZE,
  1632. .max_keysize = AES_MAX_KEY_SIZE,
  1633. .ivsize = AES_BLOCK_SIZE,
  1634. },
  1635. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1636. },
  1637. {
  1638. .name = "cbc(des3_ede)",
  1639. .driver_name = "cbc-3des-caam",
  1640. .blocksize = DES3_EDE_BLOCK_SIZE,
  1641. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1642. .template_ablkcipher = {
  1643. .setkey = ablkcipher_setkey,
  1644. .encrypt = ablkcipher_encrypt,
  1645. .decrypt = ablkcipher_decrypt,
  1646. .givencrypt = ablkcipher_givencrypt,
  1647. .geniv = "<built-in>",
  1648. .min_keysize = DES3_EDE_KEY_SIZE,
  1649. .max_keysize = DES3_EDE_KEY_SIZE,
  1650. .ivsize = DES3_EDE_BLOCK_SIZE,
  1651. },
  1652. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1653. },
  1654. {
  1655. .name = "cbc(des)",
  1656. .driver_name = "cbc-des-caam",
  1657. .blocksize = DES_BLOCK_SIZE,
  1658. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1659. .template_ablkcipher = {
  1660. .setkey = ablkcipher_setkey,
  1661. .encrypt = ablkcipher_encrypt,
  1662. .decrypt = ablkcipher_decrypt,
  1663. .givencrypt = ablkcipher_givencrypt,
  1664. .geniv = "<built-in>",
  1665. .min_keysize = DES_KEY_SIZE,
  1666. .max_keysize = DES_KEY_SIZE,
  1667. .ivsize = DES_BLOCK_SIZE,
  1668. },
  1669. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1670. },
  1671. {
  1672. .name = "ctr(aes)",
  1673. .driver_name = "ctr-aes-caam",
  1674. .blocksize = 1,
  1675. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1676. .template_ablkcipher = {
  1677. .setkey = ablkcipher_setkey,
  1678. .encrypt = ablkcipher_encrypt,
  1679. .decrypt = ablkcipher_decrypt,
  1680. .geniv = "chainiv",
  1681. .min_keysize = AES_MIN_KEY_SIZE,
  1682. .max_keysize = AES_MAX_KEY_SIZE,
  1683. .ivsize = AES_BLOCK_SIZE,
  1684. },
  1685. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1686. },
  1687. {
  1688. .name = "rfc3686(ctr(aes))",
  1689. .driver_name = "rfc3686-ctr-aes-caam",
  1690. .blocksize = 1,
  1691. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1692. .template_ablkcipher = {
  1693. .setkey = ablkcipher_setkey,
  1694. .encrypt = ablkcipher_encrypt,
  1695. .decrypt = ablkcipher_decrypt,
  1696. .givencrypt = ablkcipher_givencrypt,
  1697. .geniv = "<built-in>",
  1698. .min_keysize = AES_MIN_KEY_SIZE +
  1699. CTR_RFC3686_NONCE_SIZE,
  1700. .max_keysize = AES_MAX_KEY_SIZE +
  1701. CTR_RFC3686_NONCE_SIZE,
  1702. .ivsize = CTR_RFC3686_IV_SIZE,
  1703. },
  1704. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1705. },
  1706. {
  1707. .name = "xts(aes)",
  1708. .driver_name = "xts-aes-caam",
  1709. .blocksize = AES_BLOCK_SIZE,
  1710. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1711. .template_ablkcipher = {
  1712. .setkey = xts_ablkcipher_setkey,
  1713. .encrypt = ablkcipher_encrypt,
  1714. .decrypt = ablkcipher_decrypt,
  1715. .geniv = "eseqiv",
  1716. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1717. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1718. .ivsize = AES_BLOCK_SIZE,
  1719. },
  1720. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1721. },
  1722. };
  1723. static struct caam_aead_alg driver_aeads[] = {
  1724. {
  1725. .aead = {
  1726. .base = {
  1727. .cra_name = "rfc4106(gcm(aes))",
  1728. .cra_driver_name = "rfc4106-gcm-aes-caam",
  1729. .cra_blocksize = 1,
  1730. },
  1731. .setkey = rfc4106_setkey,
  1732. .setauthsize = rfc4106_setauthsize,
  1733. .encrypt = ipsec_gcm_encrypt,
  1734. .decrypt = ipsec_gcm_decrypt,
  1735. .ivsize = GCM_RFC4106_IV_SIZE,
  1736. .maxauthsize = AES_BLOCK_SIZE,
  1737. },
  1738. .caam = {
  1739. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1740. },
  1741. },
  1742. {
  1743. .aead = {
  1744. .base = {
  1745. .cra_name = "rfc4543(gcm(aes))",
  1746. .cra_driver_name = "rfc4543-gcm-aes-caam",
  1747. .cra_blocksize = 1,
  1748. },
  1749. .setkey = rfc4543_setkey,
  1750. .setauthsize = rfc4543_setauthsize,
  1751. .encrypt = ipsec_gcm_encrypt,
  1752. .decrypt = ipsec_gcm_decrypt,
  1753. .ivsize = GCM_RFC4543_IV_SIZE,
  1754. .maxauthsize = AES_BLOCK_SIZE,
  1755. },
  1756. .caam = {
  1757. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1758. },
  1759. },
  1760. /* Galois Counter Mode */
  1761. {
  1762. .aead = {
  1763. .base = {
  1764. .cra_name = "gcm(aes)",
  1765. .cra_driver_name = "gcm-aes-caam",
  1766. .cra_blocksize = 1,
  1767. },
  1768. .setkey = gcm_setkey,
  1769. .setauthsize = gcm_setauthsize,
  1770. .encrypt = gcm_encrypt,
  1771. .decrypt = gcm_decrypt,
  1772. .ivsize = GCM_AES_IV_SIZE,
  1773. .maxauthsize = AES_BLOCK_SIZE,
  1774. },
  1775. .caam = {
  1776. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1777. },
  1778. },
  1779. /* single-pass ipsec_esp descriptor */
  1780. {
  1781. .aead = {
  1782. .base = {
  1783. .cra_name = "authenc(hmac(md5),"
  1784. "ecb(cipher_null))",
  1785. .cra_driver_name = "authenc-hmac-md5-"
  1786. "ecb-cipher_null-caam",
  1787. .cra_blocksize = NULL_BLOCK_SIZE,
  1788. },
  1789. .setkey = aead_setkey,
  1790. .setauthsize = aead_setauthsize,
  1791. .encrypt = aead_encrypt,
  1792. .decrypt = aead_decrypt,
  1793. .ivsize = NULL_IV_SIZE,
  1794. .maxauthsize = MD5_DIGEST_SIZE,
  1795. },
  1796. .caam = {
  1797. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1798. OP_ALG_AAI_HMAC_PRECOMP,
  1799. },
  1800. },
  1801. {
  1802. .aead = {
  1803. .base = {
  1804. .cra_name = "authenc(hmac(sha1),"
  1805. "ecb(cipher_null))",
  1806. .cra_driver_name = "authenc-hmac-sha1-"
  1807. "ecb-cipher_null-caam",
  1808. .cra_blocksize = NULL_BLOCK_SIZE,
  1809. },
  1810. .setkey = aead_setkey,
  1811. .setauthsize = aead_setauthsize,
  1812. .encrypt = aead_encrypt,
  1813. .decrypt = aead_decrypt,
  1814. .ivsize = NULL_IV_SIZE,
  1815. .maxauthsize = SHA1_DIGEST_SIZE,
  1816. },
  1817. .caam = {
  1818. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1819. OP_ALG_AAI_HMAC_PRECOMP,
  1820. },
  1821. },
  1822. {
  1823. .aead = {
  1824. .base = {
  1825. .cra_name = "authenc(hmac(sha224),"
  1826. "ecb(cipher_null))",
  1827. .cra_driver_name = "authenc-hmac-sha224-"
  1828. "ecb-cipher_null-caam",
  1829. .cra_blocksize = NULL_BLOCK_SIZE,
  1830. },
  1831. .setkey = aead_setkey,
  1832. .setauthsize = aead_setauthsize,
  1833. .encrypt = aead_encrypt,
  1834. .decrypt = aead_decrypt,
  1835. .ivsize = NULL_IV_SIZE,
  1836. .maxauthsize = SHA224_DIGEST_SIZE,
  1837. },
  1838. .caam = {
  1839. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1840. OP_ALG_AAI_HMAC_PRECOMP,
  1841. },
  1842. },
  1843. {
  1844. .aead = {
  1845. .base = {
  1846. .cra_name = "authenc(hmac(sha256),"
  1847. "ecb(cipher_null))",
  1848. .cra_driver_name = "authenc-hmac-sha256-"
  1849. "ecb-cipher_null-caam",
  1850. .cra_blocksize = NULL_BLOCK_SIZE,
  1851. },
  1852. .setkey = aead_setkey,
  1853. .setauthsize = aead_setauthsize,
  1854. .encrypt = aead_encrypt,
  1855. .decrypt = aead_decrypt,
  1856. .ivsize = NULL_IV_SIZE,
  1857. .maxauthsize = SHA256_DIGEST_SIZE,
  1858. },
  1859. .caam = {
  1860. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1861. OP_ALG_AAI_HMAC_PRECOMP,
  1862. },
  1863. },
  1864. {
  1865. .aead = {
  1866. .base = {
  1867. .cra_name = "authenc(hmac(sha384),"
  1868. "ecb(cipher_null))",
  1869. .cra_driver_name = "authenc-hmac-sha384-"
  1870. "ecb-cipher_null-caam",
  1871. .cra_blocksize = NULL_BLOCK_SIZE,
  1872. },
  1873. .setkey = aead_setkey,
  1874. .setauthsize = aead_setauthsize,
  1875. .encrypt = aead_encrypt,
  1876. .decrypt = aead_decrypt,
  1877. .ivsize = NULL_IV_SIZE,
  1878. .maxauthsize = SHA384_DIGEST_SIZE,
  1879. },
  1880. .caam = {
  1881. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1882. OP_ALG_AAI_HMAC_PRECOMP,
  1883. },
  1884. },
  1885. {
  1886. .aead = {
  1887. .base = {
  1888. .cra_name = "authenc(hmac(sha512),"
  1889. "ecb(cipher_null))",
  1890. .cra_driver_name = "authenc-hmac-sha512-"
  1891. "ecb-cipher_null-caam",
  1892. .cra_blocksize = NULL_BLOCK_SIZE,
  1893. },
  1894. .setkey = aead_setkey,
  1895. .setauthsize = aead_setauthsize,
  1896. .encrypt = aead_encrypt,
  1897. .decrypt = aead_decrypt,
  1898. .ivsize = NULL_IV_SIZE,
  1899. .maxauthsize = SHA512_DIGEST_SIZE,
  1900. },
  1901. .caam = {
  1902. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1903. OP_ALG_AAI_HMAC_PRECOMP,
  1904. },
  1905. },
  1906. {
  1907. .aead = {
  1908. .base = {
  1909. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1910. .cra_driver_name = "authenc-hmac-md5-"
  1911. "cbc-aes-caam",
  1912. .cra_blocksize = AES_BLOCK_SIZE,
  1913. },
  1914. .setkey = aead_setkey,
  1915. .setauthsize = aead_setauthsize,
  1916. .encrypt = aead_encrypt,
  1917. .decrypt = aead_decrypt,
  1918. .ivsize = AES_BLOCK_SIZE,
  1919. .maxauthsize = MD5_DIGEST_SIZE,
  1920. },
  1921. .caam = {
  1922. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1923. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1924. OP_ALG_AAI_HMAC_PRECOMP,
  1925. },
  1926. },
  1927. {
  1928. .aead = {
  1929. .base = {
  1930. .cra_name = "echainiv(authenc(hmac(md5),"
  1931. "cbc(aes)))",
  1932. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1933. "cbc-aes-caam",
  1934. .cra_blocksize = AES_BLOCK_SIZE,
  1935. },
  1936. .setkey = aead_setkey,
  1937. .setauthsize = aead_setauthsize,
  1938. .encrypt = aead_encrypt,
  1939. .decrypt = aead_decrypt,
  1940. .ivsize = AES_BLOCK_SIZE,
  1941. .maxauthsize = MD5_DIGEST_SIZE,
  1942. },
  1943. .caam = {
  1944. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1945. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1946. OP_ALG_AAI_HMAC_PRECOMP,
  1947. .geniv = true,
  1948. },
  1949. },
  1950. {
  1951. .aead = {
  1952. .base = {
  1953. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1954. .cra_driver_name = "authenc-hmac-sha1-"
  1955. "cbc-aes-caam",
  1956. .cra_blocksize = AES_BLOCK_SIZE,
  1957. },
  1958. .setkey = aead_setkey,
  1959. .setauthsize = aead_setauthsize,
  1960. .encrypt = aead_encrypt,
  1961. .decrypt = aead_decrypt,
  1962. .ivsize = AES_BLOCK_SIZE,
  1963. .maxauthsize = SHA1_DIGEST_SIZE,
  1964. },
  1965. .caam = {
  1966. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1967. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1968. OP_ALG_AAI_HMAC_PRECOMP,
  1969. },
  1970. },
  1971. {
  1972. .aead = {
  1973. .base = {
  1974. .cra_name = "echainiv(authenc(hmac(sha1),"
  1975. "cbc(aes)))",
  1976. .cra_driver_name = "echainiv-authenc-"
  1977. "hmac-sha1-cbc-aes-caam",
  1978. .cra_blocksize = AES_BLOCK_SIZE,
  1979. },
  1980. .setkey = aead_setkey,
  1981. .setauthsize = aead_setauthsize,
  1982. .encrypt = aead_encrypt,
  1983. .decrypt = aead_decrypt,
  1984. .ivsize = AES_BLOCK_SIZE,
  1985. .maxauthsize = SHA1_DIGEST_SIZE,
  1986. },
  1987. .caam = {
  1988. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1989. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1990. OP_ALG_AAI_HMAC_PRECOMP,
  1991. .geniv = true,
  1992. },
  1993. },
  1994. {
  1995. .aead = {
  1996. .base = {
  1997. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1998. .cra_driver_name = "authenc-hmac-sha224-"
  1999. "cbc-aes-caam",
  2000. .cra_blocksize = AES_BLOCK_SIZE,
  2001. },
  2002. .setkey = aead_setkey,
  2003. .setauthsize = aead_setauthsize,
  2004. .encrypt = aead_encrypt,
  2005. .decrypt = aead_decrypt,
  2006. .ivsize = AES_BLOCK_SIZE,
  2007. .maxauthsize = SHA224_DIGEST_SIZE,
  2008. },
  2009. .caam = {
  2010. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2011. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2012. OP_ALG_AAI_HMAC_PRECOMP,
  2013. },
  2014. },
  2015. {
  2016. .aead = {
  2017. .base = {
  2018. .cra_name = "echainiv(authenc(hmac(sha224),"
  2019. "cbc(aes)))",
  2020. .cra_driver_name = "echainiv-authenc-"
  2021. "hmac-sha224-cbc-aes-caam",
  2022. .cra_blocksize = AES_BLOCK_SIZE,
  2023. },
  2024. .setkey = aead_setkey,
  2025. .setauthsize = aead_setauthsize,
  2026. .encrypt = aead_encrypt,
  2027. .decrypt = aead_decrypt,
  2028. .ivsize = AES_BLOCK_SIZE,
  2029. .maxauthsize = SHA224_DIGEST_SIZE,
  2030. },
  2031. .caam = {
  2032. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2033. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2034. OP_ALG_AAI_HMAC_PRECOMP,
  2035. .geniv = true,
  2036. },
  2037. },
  2038. {
  2039. .aead = {
  2040. .base = {
  2041. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2042. .cra_driver_name = "authenc-hmac-sha256-"
  2043. "cbc-aes-caam",
  2044. .cra_blocksize = AES_BLOCK_SIZE,
  2045. },
  2046. .setkey = aead_setkey,
  2047. .setauthsize = aead_setauthsize,
  2048. .encrypt = aead_encrypt,
  2049. .decrypt = aead_decrypt,
  2050. .ivsize = AES_BLOCK_SIZE,
  2051. .maxauthsize = SHA256_DIGEST_SIZE,
  2052. },
  2053. .caam = {
  2054. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2055. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2056. OP_ALG_AAI_HMAC_PRECOMP,
  2057. },
  2058. },
  2059. {
  2060. .aead = {
  2061. .base = {
  2062. .cra_name = "echainiv(authenc(hmac(sha256),"
  2063. "cbc(aes)))",
  2064. .cra_driver_name = "echainiv-authenc-"
  2065. "hmac-sha256-cbc-aes-caam",
  2066. .cra_blocksize = AES_BLOCK_SIZE,
  2067. },
  2068. .setkey = aead_setkey,
  2069. .setauthsize = aead_setauthsize,
  2070. .encrypt = aead_encrypt,
  2071. .decrypt = aead_decrypt,
  2072. .ivsize = AES_BLOCK_SIZE,
  2073. .maxauthsize = SHA256_DIGEST_SIZE,
  2074. },
  2075. .caam = {
  2076. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2077. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2078. OP_ALG_AAI_HMAC_PRECOMP,
  2079. .geniv = true,
  2080. },
  2081. },
  2082. {
  2083. .aead = {
  2084. .base = {
  2085. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2086. .cra_driver_name = "authenc-hmac-sha384-"
  2087. "cbc-aes-caam",
  2088. .cra_blocksize = AES_BLOCK_SIZE,
  2089. },
  2090. .setkey = aead_setkey,
  2091. .setauthsize = aead_setauthsize,
  2092. .encrypt = aead_encrypt,
  2093. .decrypt = aead_decrypt,
  2094. .ivsize = AES_BLOCK_SIZE,
  2095. .maxauthsize = SHA384_DIGEST_SIZE,
  2096. },
  2097. .caam = {
  2098. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2099. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2100. OP_ALG_AAI_HMAC_PRECOMP,
  2101. },
  2102. },
  2103. {
  2104. .aead = {
  2105. .base = {
  2106. .cra_name = "echainiv(authenc(hmac(sha384),"
  2107. "cbc(aes)))",
  2108. .cra_driver_name = "echainiv-authenc-"
  2109. "hmac-sha384-cbc-aes-caam",
  2110. .cra_blocksize = AES_BLOCK_SIZE,
  2111. },
  2112. .setkey = aead_setkey,
  2113. .setauthsize = aead_setauthsize,
  2114. .encrypt = aead_encrypt,
  2115. .decrypt = aead_decrypt,
  2116. .ivsize = AES_BLOCK_SIZE,
  2117. .maxauthsize = SHA384_DIGEST_SIZE,
  2118. },
  2119. .caam = {
  2120. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2121. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2122. OP_ALG_AAI_HMAC_PRECOMP,
  2123. .geniv = true,
  2124. },
  2125. },
  2126. {
  2127. .aead = {
  2128. .base = {
  2129. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2130. .cra_driver_name = "authenc-hmac-sha512-"
  2131. "cbc-aes-caam",
  2132. .cra_blocksize = AES_BLOCK_SIZE,
  2133. },
  2134. .setkey = aead_setkey,
  2135. .setauthsize = aead_setauthsize,
  2136. .encrypt = aead_encrypt,
  2137. .decrypt = aead_decrypt,
  2138. .ivsize = AES_BLOCK_SIZE,
  2139. .maxauthsize = SHA512_DIGEST_SIZE,
  2140. },
  2141. .caam = {
  2142. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2143. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2144. OP_ALG_AAI_HMAC_PRECOMP,
  2145. },
  2146. },
  2147. {
  2148. .aead = {
  2149. .base = {
  2150. .cra_name = "echainiv(authenc(hmac(sha512),"
  2151. "cbc(aes)))",
  2152. .cra_driver_name = "echainiv-authenc-"
  2153. "hmac-sha512-cbc-aes-caam",
  2154. .cra_blocksize = AES_BLOCK_SIZE,
  2155. },
  2156. .setkey = aead_setkey,
  2157. .setauthsize = aead_setauthsize,
  2158. .encrypt = aead_encrypt,
  2159. .decrypt = aead_decrypt,
  2160. .ivsize = AES_BLOCK_SIZE,
  2161. .maxauthsize = SHA512_DIGEST_SIZE,
  2162. },
  2163. .caam = {
  2164. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2165. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2166. OP_ALG_AAI_HMAC_PRECOMP,
  2167. .geniv = true,
  2168. },
  2169. },
  2170. {
  2171. .aead = {
  2172. .base = {
  2173. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2174. .cra_driver_name = "authenc-hmac-md5-"
  2175. "cbc-des3_ede-caam",
  2176. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2177. },
  2178. .setkey = aead_setkey,
  2179. .setauthsize = aead_setauthsize,
  2180. .encrypt = aead_encrypt,
  2181. .decrypt = aead_decrypt,
  2182. .ivsize = DES3_EDE_BLOCK_SIZE,
  2183. .maxauthsize = MD5_DIGEST_SIZE,
  2184. },
  2185. .caam = {
  2186. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2187. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2188. OP_ALG_AAI_HMAC_PRECOMP,
  2189. }
  2190. },
  2191. {
  2192. .aead = {
  2193. .base = {
  2194. .cra_name = "echainiv(authenc(hmac(md5),"
  2195. "cbc(des3_ede)))",
  2196. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2197. "cbc-des3_ede-caam",
  2198. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2199. },
  2200. .setkey = aead_setkey,
  2201. .setauthsize = aead_setauthsize,
  2202. .encrypt = aead_encrypt,
  2203. .decrypt = aead_decrypt,
  2204. .ivsize = DES3_EDE_BLOCK_SIZE,
  2205. .maxauthsize = MD5_DIGEST_SIZE,
  2206. },
  2207. .caam = {
  2208. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2209. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2210. OP_ALG_AAI_HMAC_PRECOMP,
  2211. .geniv = true,
  2212. }
  2213. },
  2214. {
  2215. .aead = {
  2216. .base = {
  2217. .cra_name = "authenc(hmac(sha1),"
  2218. "cbc(des3_ede))",
  2219. .cra_driver_name = "authenc-hmac-sha1-"
  2220. "cbc-des3_ede-caam",
  2221. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2222. },
  2223. .setkey = aead_setkey,
  2224. .setauthsize = aead_setauthsize,
  2225. .encrypt = aead_encrypt,
  2226. .decrypt = aead_decrypt,
  2227. .ivsize = DES3_EDE_BLOCK_SIZE,
  2228. .maxauthsize = SHA1_DIGEST_SIZE,
  2229. },
  2230. .caam = {
  2231. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2232. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2233. OP_ALG_AAI_HMAC_PRECOMP,
  2234. },
  2235. },
  2236. {
  2237. .aead = {
  2238. .base = {
  2239. .cra_name = "echainiv(authenc(hmac(sha1),"
  2240. "cbc(des3_ede)))",
  2241. .cra_driver_name = "echainiv-authenc-"
  2242. "hmac-sha1-"
  2243. "cbc-des3_ede-caam",
  2244. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2245. },
  2246. .setkey = aead_setkey,
  2247. .setauthsize = aead_setauthsize,
  2248. .encrypt = aead_encrypt,
  2249. .decrypt = aead_decrypt,
  2250. .ivsize = DES3_EDE_BLOCK_SIZE,
  2251. .maxauthsize = SHA1_DIGEST_SIZE,
  2252. },
  2253. .caam = {
  2254. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2255. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2256. OP_ALG_AAI_HMAC_PRECOMP,
  2257. .geniv = true,
  2258. },
  2259. },
  2260. {
  2261. .aead = {
  2262. .base = {
  2263. .cra_name = "authenc(hmac(sha224),"
  2264. "cbc(des3_ede))",
  2265. .cra_driver_name = "authenc-hmac-sha224-"
  2266. "cbc-des3_ede-caam",
  2267. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2268. },
  2269. .setkey = aead_setkey,
  2270. .setauthsize = aead_setauthsize,
  2271. .encrypt = aead_encrypt,
  2272. .decrypt = aead_decrypt,
  2273. .ivsize = DES3_EDE_BLOCK_SIZE,
  2274. .maxauthsize = SHA224_DIGEST_SIZE,
  2275. },
  2276. .caam = {
  2277. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2278. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2279. OP_ALG_AAI_HMAC_PRECOMP,
  2280. },
  2281. },
  2282. {
  2283. .aead = {
  2284. .base = {
  2285. .cra_name = "echainiv(authenc(hmac(sha224),"
  2286. "cbc(des3_ede)))",
  2287. .cra_driver_name = "echainiv-authenc-"
  2288. "hmac-sha224-"
  2289. "cbc-des3_ede-caam",
  2290. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2291. },
  2292. .setkey = aead_setkey,
  2293. .setauthsize = aead_setauthsize,
  2294. .encrypt = aead_encrypt,
  2295. .decrypt = aead_decrypt,
  2296. .ivsize = DES3_EDE_BLOCK_SIZE,
  2297. .maxauthsize = SHA224_DIGEST_SIZE,
  2298. },
  2299. .caam = {
  2300. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2301. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2302. OP_ALG_AAI_HMAC_PRECOMP,
  2303. .geniv = true,
  2304. },
  2305. },
  2306. {
  2307. .aead = {
  2308. .base = {
  2309. .cra_name = "authenc(hmac(sha256),"
  2310. "cbc(des3_ede))",
  2311. .cra_driver_name = "authenc-hmac-sha256-"
  2312. "cbc-des3_ede-caam",
  2313. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2314. },
  2315. .setkey = aead_setkey,
  2316. .setauthsize = aead_setauthsize,
  2317. .encrypt = aead_encrypt,
  2318. .decrypt = aead_decrypt,
  2319. .ivsize = DES3_EDE_BLOCK_SIZE,
  2320. .maxauthsize = SHA256_DIGEST_SIZE,
  2321. },
  2322. .caam = {
  2323. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2324. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2325. OP_ALG_AAI_HMAC_PRECOMP,
  2326. },
  2327. },
  2328. {
  2329. .aead = {
  2330. .base = {
  2331. .cra_name = "echainiv(authenc(hmac(sha256),"
  2332. "cbc(des3_ede)))",
  2333. .cra_driver_name = "echainiv-authenc-"
  2334. "hmac-sha256-"
  2335. "cbc-des3_ede-caam",
  2336. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2337. },
  2338. .setkey = aead_setkey,
  2339. .setauthsize = aead_setauthsize,
  2340. .encrypt = aead_encrypt,
  2341. .decrypt = aead_decrypt,
  2342. .ivsize = DES3_EDE_BLOCK_SIZE,
  2343. .maxauthsize = SHA256_DIGEST_SIZE,
  2344. },
  2345. .caam = {
  2346. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2347. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2348. OP_ALG_AAI_HMAC_PRECOMP,
  2349. .geniv = true,
  2350. },
  2351. },
  2352. {
  2353. .aead = {
  2354. .base = {
  2355. .cra_name = "authenc(hmac(sha384),"
  2356. "cbc(des3_ede))",
  2357. .cra_driver_name = "authenc-hmac-sha384-"
  2358. "cbc-des3_ede-caam",
  2359. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2360. },
  2361. .setkey = aead_setkey,
  2362. .setauthsize = aead_setauthsize,
  2363. .encrypt = aead_encrypt,
  2364. .decrypt = aead_decrypt,
  2365. .ivsize = DES3_EDE_BLOCK_SIZE,
  2366. .maxauthsize = SHA384_DIGEST_SIZE,
  2367. },
  2368. .caam = {
  2369. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2370. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2371. OP_ALG_AAI_HMAC_PRECOMP,
  2372. },
  2373. },
  2374. {
  2375. .aead = {
  2376. .base = {
  2377. .cra_name = "echainiv(authenc(hmac(sha384),"
  2378. "cbc(des3_ede)))",
  2379. .cra_driver_name = "echainiv-authenc-"
  2380. "hmac-sha384-"
  2381. "cbc-des3_ede-caam",
  2382. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2383. },
  2384. .setkey = aead_setkey,
  2385. .setauthsize = aead_setauthsize,
  2386. .encrypt = aead_encrypt,
  2387. .decrypt = aead_decrypt,
  2388. .ivsize = DES3_EDE_BLOCK_SIZE,
  2389. .maxauthsize = SHA384_DIGEST_SIZE,
  2390. },
  2391. .caam = {
  2392. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2393. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2394. OP_ALG_AAI_HMAC_PRECOMP,
  2395. .geniv = true,
  2396. },
  2397. },
  2398. {
  2399. .aead = {
  2400. .base = {
  2401. .cra_name = "authenc(hmac(sha512),"
  2402. "cbc(des3_ede))",
  2403. .cra_driver_name = "authenc-hmac-sha512-"
  2404. "cbc-des3_ede-caam",
  2405. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2406. },
  2407. .setkey = aead_setkey,
  2408. .setauthsize = aead_setauthsize,
  2409. .encrypt = aead_encrypt,
  2410. .decrypt = aead_decrypt,
  2411. .ivsize = DES3_EDE_BLOCK_SIZE,
  2412. .maxauthsize = SHA512_DIGEST_SIZE,
  2413. },
  2414. .caam = {
  2415. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2416. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2417. OP_ALG_AAI_HMAC_PRECOMP,
  2418. },
  2419. },
  2420. {
  2421. .aead = {
  2422. .base = {
  2423. .cra_name = "echainiv(authenc(hmac(sha512),"
  2424. "cbc(des3_ede)))",
  2425. .cra_driver_name = "echainiv-authenc-"
  2426. "hmac-sha512-"
  2427. "cbc-des3_ede-caam",
  2428. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2429. },
  2430. .setkey = aead_setkey,
  2431. .setauthsize = aead_setauthsize,
  2432. .encrypt = aead_encrypt,
  2433. .decrypt = aead_decrypt,
  2434. .ivsize = DES3_EDE_BLOCK_SIZE,
  2435. .maxauthsize = SHA512_DIGEST_SIZE,
  2436. },
  2437. .caam = {
  2438. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2439. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2440. OP_ALG_AAI_HMAC_PRECOMP,
  2441. .geniv = true,
  2442. },
  2443. },
  2444. {
  2445. .aead = {
  2446. .base = {
  2447. .cra_name = "authenc(hmac(md5),cbc(des))",
  2448. .cra_driver_name = "authenc-hmac-md5-"
  2449. "cbc-des-caam",
  2450. .cra_blocksize = DES_BLOCK_SIZE,
  2451. },
  2452. .setkey = aead_setkey,
  2453. .setauthsize = aead_setauthsize,
  2454. .encrypt = aead_encrypt,
  2455. .decrypt = aead_decrypt,
  2456. .ivsize = DES_BLOCK_SIZE,
  2457. .maxauthsize = MD5_DIGEST_SIZE,
  2458. },
  2459. .caam = {
  2460. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2461. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2462. OP_ALG_AAI_HMAC_PRECOMP,
  2463. },
  2464. },
  2465. {
  2466. .aead = {
  2467. .base = {
  2468. .cra_name = "echainiv(authenc(hmac(md5),"
  2469. "cbc(des)))",
  2470. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2471. "cbc-des-caam",
  2472. .cra_blocksize = DES_BLOCK_SIZE,
  2473. },
  2474. .setkey = aead_setkey,
  2475. .setauthsize = aead_setauthsize,
  2476. .encrypt = aead_encrypt,
  2477. .decrypt = aead_decrypt,
  2478. .ivsize = DES_BLOCK_SIZE,
  2479. .maxauthsize = MD5_DIGEST_SIZE,
  2480. },
  2481. .caam = {
  2482. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2483. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2484. OP_ALG_AAI_HMAC_PRECOMP,
  2485. .geniv = true,
  2486. },
  2487. },
  2488. {
  2489. .aead = {
  2490. .base = {
  2491. .cra_name = "authenc(hmac(sha1),cbc(des))",
  2492. .cra_driver_name = "authenc-hmac-sha1-"
  2493. "cbc-des-caam",
  2494. .cra_blocksize = DES_BLOCK_SIZE,
  2495. },
  2496. .setkey = aead_setkey,
  2497. .setauthsize = aead_setauthsize,
  2498. .encrypt = aead_encrypt,
  2499. .decrypt = aead_decrypt,
  2500. .ivsize = DES_BLOCK_SIZE,
  2501. .maxauthsize = SHA1_DIGEST_SIZE,
  2502. },
  2503. .caam = {
  2504. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2505. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2506. OP_ALG_AAI_HMAC_PRECOMP,
  2507. },
  2508. },
  2509. {
  2510. .aead = {
  2511. .base = {
  2512. .cra_name = "echainiv(authenc(hmac(sha1),"
  2513. "cbc(des)))",
  2514. .cra_driver_name = "echainiv-authenc-"
  2515. "hmac-sha1-cbc-des-caam",
  2516. .cra_blocksize = DES_BLOCK_SIZE,
  2517. },
  2518. .setkey = aead_setkey,
  2519. .setauthsize = aead_setauthsize,
  2520. .encrypt = aead_encrypt,
  2521. .decrypt = aead_decrypt,
  2522. .ivsize = DES_BLOCK_SIZE,
  2523. .maxauthsize = SHA1_DIGEST_SIZE,
  2524. },
  2525. .caam = {
  2526. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2527. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2528. OP_ALG_AAI_HMAC_PRECOMP,
  2529. .geniv = true,
  2530. },
  2531. },
  2532. {
  2533. .aead = {
  2534. .base = {
  2535. .cra_name = "authenc(hmac(sha224),cbc(des))",
  2536. .cra_driver_name = "authenc-hmac-sha224-"
  2537. "cbc-des-caam",
  2538. .cra_blocksize = DES_BLOCK_SIZE,
  2539. },
  2540. .setkey = aead_setkey,
  2541. .setauthsize = aead_setauthsize,
  2542. .encrypt = aead_encrypt,
  2543. .decrypt = aead_decrypt,
  2544. .ivsize = DES_BLOCK_SIZE,
  2545. .maxauthsize = SHA224_DIGEST_SIZE,
  2546. },
  2547. .caam = {
  2548. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2549. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2550. OP_ALG_AAI_HMAC_PRECOMP,
  2551. },
  2552. },
  2553. {
  2554. .aead = {
  2555. .base = {
  2556. .cra_name = "echainiv(authenc(hmac(sha224),"
  2557. "cbc(des)))",
  2558. .cra_driver_name = "echainiv-authenc-"
  2559. "hmac-sha224-cbc-des-caam",
  2560. .cra_blocksize = DES_BLOCK_SIZE,
  2561. },
  2562. .setkey = aead_setkey,
  2563. .setauthsize = aead_setauthsize,
  2564. .encrypt = aead_encrypt,
  2565. .decrypt = aead_decrypt,
  2566. .ivsize = DES_BLOCK_SIZE,
  2567. .maxauthsize = SHA224_DIGEST_SIZE,
  2568. },
  2569. .caam = {
  2570. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2571. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2572. OP_ALG_AAI_HMAC_PRECOMP,
  2573. .geniv = true,
  2574. },
  2575. },
  2576. {
  2577. .aead = {
  2578. .base = {
  2579. .cra_name = "authenc(hmac(sha256),cbc(des))",
  2580. .cra_driver_name = "authenc-hmac-sha256-"
  2581. "cbc-des-caam",
  2582. .cra_blocksize = DES_BLOCK_SIZE,
  2583. },
  2584. .setkey = aead_setkey,
  2585. .setauthsize = aead_setauthsize,
  2586. .encrypt = aead_encrypt,
  2587. .decrypt = aead_decrypt,
  2588. .ivsize = DES_BLOCK_SIZE,
  2589. .maxauthsize = SHA256_DIGEST_SIZE,
  2590. },
  2591. .caam = {
  2592. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2593. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2594. OP_ALG_AAI_HMAC_PRECOMP,
  2595. },
  2596. },
  2597. {
  2598. .aead = {
  2599. .base = {
  2600. .cra_name = "echainiv(authenc(hmac(sha256),"
  2601. "cbc(des)))",
  2602. .cra_driver_name = "echainiv-authenc-"
  2603. "hmac-sha256-cbc-des-caam",
  2604. .cra_blocksize = DES_BLOCK_SIZE,
  2605. },
  2606. .setkey = aead_setkey,
  2607. .setauthsize = aead_setauthsize,
  2608. .encrypt = aead_encrypt,
  2609. .decrypt = aead_decrypt,
  2610. .ivsize = DES_BLOCK_SIZE,
  2611. .maxauthsize = SHA256_DIGEST_SIZE,
  2612. },
  2613. .caam = {
  2614. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2615. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2616. OP_ALG_AAI_HMAC_PRECOMP,
  2617. .geniv = true,
  2618. },
  2619. },
  2620. {
  2621. .aead = {
  2622. .base = {
  2623. .cra_name = "authenc(hmac(sha384),cbc(des))",
  2624. .cra_driver_name = "authenc-hmac-sha384-"
  2625. "cbc-des-caam",
  2626. .cra_blocksize = DES_BLOCK_SIZE,
  2627. },
  2628. .setkey = aead_setkey,
  2629. .setauthsize = aead_setauthsize,
  2630. .encrypt = aead_encrypt,
  2631. .decrypt = aead_decrypt,
  2632. .ivsize = DES_BLOCK_SIZE,
  2633. .maxauthsize = SHA384_DIGEST_SIZE,
  2634. },
  2635. .caam = {
  2636. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2637. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2638. OP_ALG_AAI_HMAC_PRECOMP,
  2639. },
  2640. },
  2641. {
  2642. .aead = {
  2643. .base = {
  2644. .cra_name = "echainiv(authenc(hmac(sha384),"
  2645. "cbc(des)))",
  2646. .cra_driver_name = "echainiv-authenc-"
  2647. "hmac-sha384-cbc-des-caam",
  2648. .cra_blocksize = DES_BLOCK_SIZE,
  2649. },
  2650. .setkey = aead_setkey,
  2651. .setauthsize = aead_setauthsize,
  2652. .encrypt = aead_encrypt,
  2653. .decrypt = aead_decrypt,
  2654. .ivsize = DES_BLOCK_SIZE,
  2655. .maxauthsize = SHA384_DIGEST_SIZE,
  2656. },
  2657. .caam = {
  2658. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2659. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2660. OP_ALG_AAI_HMAC_PRECOMP,
  2661. .geniv = true,
  2662. },
  2663. },
  2664. {
  2665. .aead = {
  2666. .base = {
  2667. .cra_name = "authenc(hmac(sha512),cbc(des))",
  2668. .cra_driver_name = "authenc-hmac-sha512-"
  2669. "cbc-des-caam",
  2670. .cra_blocksize = DES_BLOCK_SIZE,
  2671. },
  2672. .setkey = aead_setkey,
  2673. .setauthsize = aead_setauthsize,
  2674. .encrypt = aead_encrypt,
  2675. .decrypt = aead_decrypt,
  2676. .ivsize = DES_BLOCK_SIZE,
  2677. .maxauthsize = SHA512_DIGEST_SIZE,
  2678. },
  2679. .caam = {
  2680. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2681. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2682. OP_ALG_AAI_HMAC_PRECOMP,
  2683. },
  2684. },
  2685. {
  2686. .aead = {
  2687. .base = {
  2688. .cra_name = "echainiv(authenc(hmac(sha512),"
  2689. "cbc(des)))",
  2690. .cra_driver_name = "echainiv-authenc-"
  2691. "hmac-sha512-cbc-des-caam",
  2692. .cra_blocksize = DES_BLOCK_SIZE,
  2693. },
  2694. .setkey = aead_setkey,
  2695. .setauthsize = aead_setauthsize,
  2696. .encrypt = aead_encrypt,
  2697. .decrypt = aead_decrypt,
  2698. .ivsize = DES_BLOCK_SIZE,
  2699. .maxauthsize = SHA512_DIGEST_SIZE,
  2700. },
  2701. .caam = {
  2702. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2703. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2704. OP_ALG_AAI_HMAC_PRECOMP,
  2705. .geniv = true,
  2706. },
  2707. },
  2708. {
  2709. .aead = {
  2710. .base = {
  2711. .cra_name = "authenc(hmac(md5),"
  2712. "rfc3686(ctr(aes)))",
  2713. .cra_driver_name = "authenc-hmac-md5-"
  2714. "rfc3686-ctr-aes-caam",
  2715. .cra_blocksize = 1,
  2716. },
  2717. .setkey = aead_setkey,
  2718. .setauthsize = aead_setauthsize,
  2719. .encrypt = aead_encrypt,
  2720. .decrypt = aead_decrypt,
  2721. .ivsize = CTR_RFC3686_IV_SIZE,
  2722. .maxauthsize = MD5_DIGEST_SIZE,
  2723. },
  2724. .caam = {
  2725. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2726. OP_ALG_AAI_CTR_MOD128,
  2727. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2728. OP_ALG_AAI_HMAC_PRECOMP,
  2729. .rfc3686 = true,
  2730. },
  2731. },
  2732. {
  2733. .aead = {
  2734. .base = {
  2735. .cra_name = "seqiv(authenc("
  2736. "hmac(md5),rfc3686(ctr(aes))))",
  2737. .cra_driver_name = "seqiv-authenc-hmac-md5-"
  2738. "rfc3686-ctr-aes-caam",
  2739. .cra_blocksize = 1,
  2740. },
  2741. .setkey = aead_setkey,
  2742. .setauthsize = aead_setauthsize,
  2743. .encrypt = aead_encrypt,
  2744. .decrypt = aead_decrypt,
  2745. .ivsize = CTR_RFC3686_IV_SIZE,
  2746. .maxauthsize = MD5_DIGEST_SIZE,
  2747. },
  2748. .caam = {
  2749. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2750. OP_ALG_AAI_CTR_MOD128,
  2751. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2752. OP_ALG_AAI_HMAC_PRECOMP,
  2753. .rfc3686 = true,
  2754. .geniv = true,
  2755. },
  2756. },
  2757. {
  2758. .aead = {
  2759. .base = {
  2760. .cra_name = "authenc(hmac(sha1),"
  2761. "rfc3686(ctr(aes)))",
  2762. .cra_driver_name = "authenc-hmac-sha1-"
  2763. "rfc3686-ctr-aes-caam",
  2764. .cra_blocksize = 1,
  2765. },
  2766. .setkey = aead_setkey,
  2767. .setauthsize = aead_setauthsize,
  2768. .encrypt = aead_encrypt,
  2769. .decrypt = aead_decrypt,
  2770. .ivsize = CTR_RFC3686_IV_SIZE,
  2771. .maxauthsize = SHA1_DIGEST_SIZE,
  2772. },
  2773. .caam = {
  2774. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2775. OP_ALG_AAI_CTR_MOD128,
  2776. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2777. OP_ALG_AAI_HMAC_PRECOMP,
  2778. .rfc3686 = true,
  2779. },
  2780. },
  2781. {
  2782. .aead = {
  2783. .base = {
  2784. .cra_name = "seqiv(authenc("
  2785. "hmac(sha1),rfc3686(ctr(aes))))",
  2786. .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  2787. "rfc3686-ctr-aes-caam",
  2788. .cra_blocksize = 1,
  2789. },
  2790. .setkey = aead_setkey,
  2791. .setauthsize = aead_setauthsize,
  2792. .encrypt = aead_encrypt,
  2793. .decrypt = aead_decrypt,
  2794. .ivsize = CTR_RFC3686_IV_SIZE,
  2795. .maxauthsize = SHA1_DIGEST_SIZE,
  2796. },
  2797. .caam = {
  2798. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2799. OP_ALG_AAI_CTR_MOD128,
  2800. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2801. OP_ALG_AAI_HMAC_PRECOMP,
  2802. .rfc3686 = true,
  2803. .geniv = true,
  2804. },
  2805. },
  2806. {
  2807. .aead = {
  2808. .base = {
  2809. .cra_name = "authenc(hmac(sha224),"
  2810. "rfc3686(ctr(aes)))",
  2811. .cra_driver_name = "authenc-hmac-sha224-"
  2812. "rfc3686-ctr-aes-caam",
  2813. .cra_blocksize = 1,
  2814. },
  2815. .setkey = aead_setkey,
  2816. .setauthsize = aead_setauthsize,
  2817. .encrypt = aead_encrypt,
  2818. .decrypt = aead_decrypt,
  2819. .ivsize = CTR_RFC3686_IV_SIZE,
  2820. .maxauthsize = SHA224_DIGEST_SIZE,
  2821. },
  2822. .caam = {
  2823. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2824. OP_ALG_AAI_CTR_MOD128,
  2825. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2826. OP_ALG_AAI_HMAC_PRECOMP,
  2827. .rfc3686 = true,
  2828. },
  2829. },
  2830. {
  2831. .aead = {
  2832. .base = {
  2833. .cra_name = "seqiv(authenc("
  2834. "hmac(sha224),rfc3686(ctr(aes))))",
  2835. .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  2836. "rfc3686-ctr-aes-caam",
  2837. .cra_blocksize = 1,
  2838. },
  2839. .setkey = aead_setkey,
  2840. .setauthsize = aead_setauthsize,
  2841. .encrypt = aead_encrypt,
  2842. .decrypt = aead_decrypt,
  2843. .ivsize = CTR_RFC3686_IV_SIZE,
  2844. .maxauthsize = SHA224_DIGEST_SIZE,
  2845. },
  2846. .caam = {
  2847. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2848. OP_ALG_AAI_CTR_MOD128,
  2849. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2850. OP_ALG_AAI_HMAC_PRECOMP,
  2851. .rfc3686 = true,
  2852. .geniv = true,
  2853. },
  2854. },
  2855. {
  2856. .aead = {
  2857. .base = {
  2858. .cra_name = "authenc(hmac(sha256),"
  2859. "rfc3686(ctr(aes)))",
  2860. .cra_driver_name = "authenc-hmac-sha256-"
  2861. "rfc3686-ctr-aes-caam",
  2862. .cra_blocksize = 1,
  2863. },
  2864. .setkey = aead_setkey,
  2865. .setauthsize = aead_setauthsize,
  2866. .encrypt = aead_encrypt,
  2867. .decrypt = aead_decrypt,
  2868. .ivsize = CTR_RFC3686_IV_SIZE,
  2869. .maxauthsize = SHA256_DIGEST_SIZE,
  2870. },
  2871. .caam = {
  2872. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2873. OP_ALG_AAI_CTR_MOD128,
  2874. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2875. OP_ALG_AAI_HMAC_PRECOMP,
  2876. .rfc3686 = true,
  2877. },
  2878. },
  2879. {
  2880. .aead = {
  2881. .base = {
  2882. .cra_name = "seqiv(authenc(hmac(sha256),"
  2883. "rfc3686(ctr(aes))))",
  2884. .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  2885. "rfc3686-ctr-aes-caam",
  2886. .cra_blocksize = 1,
  2887. },
  2888. .setkey = aead_setkey,
  2889. .setauthsize = aead_setauthsize,
  2890. .encrypt = aead_encrypt,
  2891. .decrypt = aead_decrypt,
  2892. .ivsize = CTR_RFC3686_IV_SIZE,
  2893. .maxauthsize = SHA256_DIGEST_SIZE,
  2894. },
  2895. .caam = {
  2896. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2897. OP_ALG_AAI_CTR_MOD128,
  2898. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2899. OP_ALG_AAI_HMAC_PRECOMP,
  2900. .rfc3686 = true,
  2901. .geniv = true,
  2902. },
  2903. },
  2904. {
  2905. .aead = {
  2906. .base = {
  2907. .cra_name = "authenc(hmac(sha384),"
  2908. "rfc3686(ctr(aes)))",
  2909. .cra_driver_name = "authenc-hmac-sha384-"
  2910. "rfc3686-ctr-aes-caam",
  2911. .cra_blocksize = 1,
  2912. },
  2913. .setkey = aead_setkey,
  2914. .setauthsize = aead_setauthsize,
  2915. .encrypt = aead_encrypt,
  2916. .decrypt = aead_decrypt,
  2917. .ivsize = CTR_RFC3686_IV_SIZE,
  2918. .maxauthsize = SHA384_DIGEST_SIZE,
  2919. },
  2920. .caam = {
  2921. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2922. OP_ALG_AAI_CTR_MOD128,
  2923. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2924. OP_ALG_AAI_HMAC_PRECOMP,
  2925. .rfc3686 = true,
  2926. },
  2927. },
  2928. {
  2929. .aead = {
  2930. .base = {
  2931. .cra_name = "seqiv(authenc(hmac(sha384),"
  2932. "rfc3686(ctr(aes))))",
  2933. .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  2934. "rfc3686-ctr-aes-caam",
  2935. .cra_blocksize = 1,
  2936. },
  2937. .setkey = aead_setkey,
  2938. .setauthsize = aead_setauthsize,
  2939. .encrypt = aead_encrypt,
  2940. .decrypt = aead_decrypt,
  2941. .ivsize = CTR_RFC3686_IV_SIZE,
  2942. .maxauthsize = SHA384_DIGEST_SIZE,
  2943. },
  2944. .caam = {
  2945. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2946. OP_ALG_AAI_CTR_MOD128,
  2947. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2948. OP_ALG_AAI_HMAC_PRECOMP,
  2949. .rfc3686 = true,
  2950. .geniv = true,
  2951. },
  2952. },
  2953. {
  2954. .aead = {
  2955. .base = {
  2956. .cra_name = "authenc(hmac(sha512),"
  2957. "rfc3686(ctr(aes)))",
  2958. .cra_driver_name = "authenc-hmac-sha512-"
  2959. "rfc3686-ctr-aes-caam",
  2960. .cra_blocksize = 1,
  2961. },
  2962. .setkey = aead_setkey,
  2963. .setauthsize = aead_setauthsize,
  2964. .encrypt = aead_encrypt,
  2965. .decrypt = aead_decrypt,
  2966. .ivsize = CTR_RFC3686_IV_SIZE,
  2967. .maxauthsize = SHA512_DIGEST_SIZE,
  2968. },
  2969. .caam = {
  2970. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2971. OP_ALG_AAI_CTR_MOD128,
  2972. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2973. OP_ALG_AAI_HMAC_PRECOMP,
  2974. .rfc3686 = true,
  2975. },
  2976. },
  2977. {
  2978. .aead = {
  2979. .base = {
  2980. .cra_name = "seqiv(authenc(hmac(sha512),"
  2981. "rfc3686(ctr(aes))))",
  2982. .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  2983. "rfc3686-ctr-aes-caam",
  2984. .cra_blocksize = 1,
  2985. },
  2986. .setkey = aead_setkey,
  2987. .setauthsize = aead_setauthsize,
  2988. .encrypt = aead_encrypt,
  2989. .decrypt = aead_decrypt,
  2990. .ivsize = CTR_RFC3686_IV_SIZE,
  2991. .maxauthsize = SHA512_DIGEST_SIZE,
  2992. },
  2993. .caam = {
  2994. .class1_alg_type = OP_ALG_ALGSEL_AES |
  2995. OP_ALG_AAI_CTR_MOD128,
  2996. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2997. OP_ALG_AAI_HMAC_PRECOMP,
  2998. .rfc3686 = true,
  2999. .geniv = true,
  3000. },
  3001. },
  3002. };
  3003. struct caam_crypto_alg {
  3004. struct crypto_alg crypto_alg;
  3005. struct list_head entry;
  3006. struct caam_alg_entry caam;
  3007. };
  3008. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  3009. bool uses_dkp)
  3010. {
  3011. dma_addr_t dma_addr;
  3012. struct caam_drv_private *priv;
  3013. ctx->jrdev = caam_jr_alloc();
  3014. if (IS_ERR(ctx->jrdev)) {
  3015. pr_err("Job Ring Device allocation for transform failed\n");
  3016. return PTR_ERR(ctx->jrdev);
  3017. }
  3018. priv = dev_get_drvdata(ctx->jrdev->parent);
  3019. if (priv->era >= 6 && uses_dkp)
  3020. ctx->dir = DMA_BIDIRECTIONAL;
  3021. else
  3022. ctx->dir = DMA_TO_DEVICE;
  3023. dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
  3024. offsetof(struct caam_ctx,
  3025. sh_desc_enc_dma),
  3026. ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  3027. if (dma_mapping_error(ctx->jrdev, dma_addr)) {
  3028. dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
  3029. caam_jr_free(ctx->jrdev);
  3030. return -ENOMEM;
  3031. }
  3032. ctx->sh_desc_enc_dma = dma_addr;
  3033. ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
  3034. sh_desc_dec);
  3035. ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
  3036. sh_desc_givenc);
  3037. ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
  3038. /* copy descriptor header template value */
  3039. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3040. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3041. return 0;
  3042. }
  3043. static int caam_cra_init(struct crypto_tfm *tfm)
  3044. {
  3045. struct crypto_alg *alg = tfm->__crt_alg;
  3046. struct caam_crypto_alg *caam_alg =
  3047. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3048. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3049. return caam_init_common(ctx, &caam_alg->caam, false);
  3050. }
  3051. static int caam_aead_init(struct crypto_aead *tfm)
  3052. {
  3053. struct aead_alg *alg = crypto_aead_alg(tfm);
  3054. struct caam_aead_alg *caam_alg =
  3055. container_of(alg, struct caam_aead_alg, aead);
  3056. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3057. return caam_init_common(ctx, &caam_alg->caam,
  3058. alg->setkey == aead_setkey);
  3059. }
  3060. static void caam_exit_common(struct caam_ctx *ctx)
  3061. {
  3062. dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
  3063. offsetof(struct caam_ctx, sh_desc_enc_dma),
  3064. ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
  3065. caam_jr_free(ctx->jrdev);
  3066. }
  3067. static void caam_cra_exit(struct crypto_tfm *tfm)
  3068. {
  3069. caam_exit_common(crypto_tfm_ctx(tfm));
  3070. }
  3071. static void caam_aead_exit(struct crypto_aead *tfm)
  3072. {
  3073. caam_exit_common(crypto_aead_ctx(tfm));
  3074. }
  3075. static void __exit caam_algapi_exit(void)
  3076. {
  3077. struct caam_crypto_alg *t_alg, *n;
  3078. int i;
  3079. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3080. struct caam_aead_alg *t_alg = driver_aeads + i;
  3081. if (t_alg->registered)
  3082. crypto_unregister_aead(&t_alg->aead);
  3083. }
  3084. if (!alg_list.next)
  3085. return;
  3086. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3087. crypto_unregister_alg(&t_alg->crypto_alg);
  3088. list_del(&t_alg->entry);
  3089. kfree(t_alg);
  3090. }
  3091. }
  3092. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3093. *template)
  3094. {
  3095. struct caam_crypto_alg *t_alg;
  3096. struct crypto_alg *alg;
  3097. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  3098. if (!t_alg) {
  3099. pr_err("failed to allocate t_alg\n");
  3100. return ERR_PTR(-ENOMEM);
  3101. }
  3102. alg = &t_alg->crypto_alg;
  3103. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3104. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  3105. template->driver_name);
  3106. alg->cra_module = THIS_MODULE;
  3107. alg->cra_init = caam_cra_init;
  3108. alg->cra_exit = caam_cra_exit;
  3109. alg->cra_priority = CAAM_CRA_PRIORITY;
  3110. alg->cra_blocksize = template->blocksize;
  3111. alg->cra_alignmask = 0;
  3112. alg->cra_ctxsize = sizeof(struct caam_ctx);
  3113. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  3114. template->type;
  3115. switch (template->type) {
  3116. case CRYPTO_ALG_TYPE_GIVCIPHER:
  3117. alg->cra_type = &crypto_givcipher_type;
  3118. alg->cra_ablkcipher = template->template_ablkcipher;
  3119. break;
  3120. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3121. alg->cra_type = &crypto_ablkcipher_type;
  3122. alg->cra_ablkcipher = template->template_ablkcipher;
  3123. break;
  3124. }
  3125. t_alg->caam.class1_alg_type = template->class1_alg_type;
  3126. t_alg->caam.class2_alg_type = template->class2_alg_type;
  3127. return t_alg;
  3128. }
  3129. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  3130. {
  3131. struct aead_alg *alg = &t_alg->aead;
  3132. alg->base.cra_module = THIS_MODULE;
  3133. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  3134. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  3135. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  3136. alg->init = caam_aead_init;
  3137. alg->exit = caam_aead_exit;
  3138. }
  3139. static int __init caam_algapi_init(void)
  3140. {
  3141. struct device_node *dev_node;
  3142. struct platform_device *pdev;
  3143. struct device *ctrldev;
  3144. struct caam_drv_private *priv;
  3145. int i = 0, err = 0;
  3146. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  3147. unsigned int md_limit = SHA512_DIGEST_SIZE;
  3148. bool registered = false;
  3149. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  3150. if (!dev_node) {
  3151. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  3152. if (!dev_node)
  3153. return -ENODEV;
  3154. }
  3155. pdev = of_find_device_by_node(dev_node);
  3156. if (!pdev) {
  3157. of_node_put(dev_node);
  3158. return -ENODEV;
  3159. }
  3160. ctrldev = &pdev->dev;
  3161. priv = dev_get_drvdata(ctrldev);
  3162. of_node_put(dev_node);
  3163. /*
  3164. * If priv is NULL, it's probably because the caam driver wasn't
  3165. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  3166. */
  3167. if (!priv)
  3168. return -ENODEV;
  3169. INIT_LIST_HEAD(&alg_list);
  3170. /*
  3171. * Register crypto algorithms the device supports.
  3172. * First, detect presence and attributes of DES, AES, and MD blocks.
  3173. */
  3174. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  3175. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  3176. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  3177. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  3178. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  3179. /* If MD is present, limit digest size based on LP256 */
  3180. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  3181. md_limit = SHA256_DIGEST_SIZE;
  3182. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3183. struct caam_crypto_alg *t_alg;
  3184. struct caam_alg_template *alg = driver_algs + i;
  3185. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  3186. /* Skip DES algorithms if not supported by device */
  3187. if (!des_inst &&
  3188. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  3189. (alg_sel == OP_ALG_ALGSEL_DES)))
  3190. continue;
  3191. /* Skip AES algorithms if not supported by device */
  3192. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  3193. continue;
  3194. /*
  3195. * Check support for AES modes not available
  3196. * on LP devices.
  3197. */
  3198. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  3199. if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
  3200. OP_ALG_AAI_XTS)
  3201. continue;
  3202. t_alg = caam_alg_alloc(alg);
  3203. if (IS_ERR(t_alg)) {
  3204. err = PTR_ERR(t_alg);
  3205. pr_warn("%s alg allocation failed\n", alg->driver_name);
  3206. continue;
  3207. }
  3208. err = crypto_register_alg(&t_alg->crypto_alg);
  3209. if (err) {
  3210. pr_warn("%s alg registration failed\n",
  3211. t_alg->crypto_alg.cra_driver_name);
  3212. kfree(t_alg);
  3213. continue;
  3214. }
  3215. list_add_tail(&t_alg->entry, &alg_list);
  3216. registered = true;
  3217. }
  3218. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3219. struct caam_aead_alg *t_alg = driver_aeads + i;
  3220. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  3221. OP_ALG_ALGSEL_MASK;
  3222. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  3223. OP_ALG_ALGSEL_MASK;
  3224. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  3225. /* Skip DES algorithms if not supported by device */
  3226. if (!des_inst &&
  3227. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  3228. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  3229. continue;
  3230. /* Skip AES algorithms if not supported by device */
  3231. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  3232. continue;
  3233. /*
  3234. * Check support for AES algorithms not available
  3235. * on LP devices.
  3236. */
  3237. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  3238. if (alg_aai == OP_ALG_AAI_GCM)
  3239. continue;
  3240. /*
  3241. * Skip algorithms requiring message digests
  3242. * if MD or MD size is not supported by device.
  3243. */
  3244. if (c2_alg_sel &&
  3245. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  3246. continue;
  3247. caam_aead_alg_init(t_alg);
  3248. err = crypto_register_aead(&t_alg->aead);
  3249. if (err) {
  3250. pr_warn("%s alg registration failed\n",
  3251. t_alg->aead.base.cra_driver_name);
  3252. continue;
  3253. }
  3254. t_alg->registered = true;
  3255. registered = true;
  3256. }
  3257. if (registered)
  3258. pr_info("caam algorithms registered in /proc/crypto\n");
  3259. return err;
  3260. }
  3261. module_init(caam_algapi_init);
  3262. module_exit(caam_algapi_exit);
  3263. MODULE_LICENSE("GPL");
  3264. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  3265. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");