cc_aead.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <crypto/algapi.h>
  6. #include <crypto/internal/aead.h>
  7. #include <crypto/authenc.h>
  8. #include <crypto/des.h>
  9. #include <linux/rtnetlink.h>
  10. #include "cc_driver.h"
  11. #include "cc_buffer_mgr.h"
  12. #include "cc_aead.h"
  13. #include "cc_request_mgr.h"
  14. #include "cc_hash.h"
  15. #include "cc_sram_mgr.h"
  16. #define template_aead template_u.aead
  17. #define MAX_AEAD_SETKEY_SEQ 12
  18. #define MAX_AEAD_PROCESS_SEQ 23
  19. #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  20. #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  21. #define AES_CCM_RFC4309_NONCE_SIZE 3
  22. #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  23. /* Value of each ICV_CMP byte (of 8) in case of success */
  24. #define ICV_VERIF_OK 0x01
  25. struct cc_aead_handle {
  26. cc_sram_addr_t sram_workspace_addr;
  27. struct list_head aead_list;
  28. };
  29. struct cc_hmac_s {
  30. u8 *padded_authkey;
  31. u8 *ipad_opad; /* IPAD, OPAD*/
  32. dma_addr_t padded_authkey_dma_addr;
  33. dma_addr_t ipad_opad_dma_addr;
  34. };
  35. struct cc_xcbc_s {
  36. u8 *xcbc_keys; /* K1,K2,K3 */
  37. dma_addr_t xcbc_keys_dma_addr;
  38. };
  39. struct cc_aead_ctx {
  40. struct cc_drvdata *drvdata;
  41. u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  42. u8 *enckey;
  43. dma_addr_t enckey_dma_addr;
  44. union {
  45. struct cc_hmac_s hmac;
  46. struct cc_xcbc_s xcbc;
  47. } auth_state;
  48. unsigned int enc_keylen;
  49. unsigned int auth_keylen;
  50. unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  51. enum drv_cipher_mode cipher_mode;
  52. enum cc_flow_mode flow_mode;
  53. enum drv_hash_mode auth_mode;
  54. };
  55. static inline bool valid_assoclen(struct aead_request *req)
  56. {
  57. return ((req->assoclen == 16) || (req->assoclen == 20));
  58. }
  59. static void cc_aead_exit(struct crypto_aead *tfm)
  60. {
  61. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  62. struct device *dev = drvdata_to_dev(ctx->drvdata);
  63. dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  64. crypto_tfm_alg_name(&tfm->base));
  65. /* Unmap enckey buffer */
  66. if (ctx->enckey) {
  67. dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  68. ctx->enckey_dma_addr);
  69. dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  70. &ctx->enckey_dma_addr);
  71. ctx->enckey_dma_addr = 0;
  72. ctx->enckey = NULL;
  73. }
  74. if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  75. struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  76. if (xcbc->xcbc_keys) {
  77. dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  78. xcbc->xcbc_keys,
  79. xcbc->xcbc_keys_dma_addr);
  80. }
  81. dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  82. &xcbc->xcbc_keys_dma_addr);
  83. xcbc->xcbc_keys_dma_addr = 0;
  84. xcbc->xcbc_keys = NULL;
  85. } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
  86. struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
  87. if (hmac->ipad_opad) {
  88. dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
  89. hmac->ipad_opad,
  90. hmac->ipad_opad_dma_addr);
  91. dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
  92. &hmac->ipad_opad_dma_addr);
  93. hmac->ipad_opad_dma_addr = 0;
  94. hmac->ipad_opad = NULL;
  95. }
  96. if (hmac->padded_authkey) {
  97. dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
  98. hmac->padded_authkey,
  99. hmac->padded_authkey_dma_addr);
  100. dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
  101. &hmac->padded_authkey_dma_addr);
  102. hmac->padded_authkey_dma_addr = 0;
  103. hmac->padded_authkey = NULL;
  104. }
  105. }
  106. }
  107. static int cc_aead_init(struct crypto_aead *tfm)
  108. {
  109. struct aead_alg *alg = crypto_aead_alg(tfm);
  110. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  111. struct cc_crypto_alg *cc_alg =
  112. container_of(alg, struct cc_crypto_alg, aead_alg);
  113. struct device *dev = drvdata_to_dev(cc_alg->drvdata);
  114. dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
  115. crypto_tfm_alg_name(&tfm->base));
  116. /* Initialize modes in instance */
  117. ctx->cipher_mode = cc_alg->cipher_mode;
  118. ctx->flow_mode = cc_alg->flow_mode;
  119. ctx->auth_mode = cc_alg->auth_mode;
  120. ctx->drvdata = cc_alg->drvdata;
  121. crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
  122. /* Allocate key buffer, cache line aligned */
  123. ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
  124. &ctx->enckey_dma_addr, GFP_KERNEL);
  125. if (!ctx->enckey) {
  126. dev_err(dev, "Failed allocating key buffer\n");
  127. goto init_failed;
  128. }
  129. dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
  130. ctx->enckey);
  131. /* Set default authlen value */
  132. if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  133. struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  134. const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
  135. /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
  136. /* (and temporary for user key - up to 256b) */
  137. xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
  138. &xcbc->xcbc_keys_dma_addr,
  139. GFP_KERNEL);
  140. if (!xcbc->xcbc_keys) {
  141. dev_err(dev, "Failed allocating buffer for XCBC keys\n");
  142. goto init_failed;
  143. }
  144. } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
  145. struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
  146. const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
  147. dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
  148. /* Allocate dma-coherent buffer for IPAD + OPAD */
  149. hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
  150. &hmac->ipad_opad_dma_addr,
  151. GFP_KERNEL);
  152. if (!hmac->ipad_opad) {
  153. dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
  154. goto init_failed;
  155. }
  156. dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
  157. hmac->ipad_opad);
  158. hmac->padded_authkey = dma_alloc_coherent(dev,
  159. MAX_HMAC_BLOCK_SIZE,
  160. pkey_dma,
  161. GFP_KERNEL);
  162. if (!hmac->padded_authkey) {
  163. dev_err(dev, "failed to allocate padded_authkey\n");
  164. goto init_failed;
  165. }
  166. } else {
  167. ctx->auth_state.hmac.ipad_opad = NULL;
  168. ctx->auth_state.hmac.padded_authkey = NULL;
  169. }
  170. return 0;
  171. init_failed:
  172. cc_aead_exit(tfm);
  173. return -ENOMEM;
  174. }
  175. static void cc_aead_complete(struct device *dev, void *cc_req, int err)
  176. {
  177. struct aead_request *areq = (struct aead_request *)cc_req;
  178. struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
  179. struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
  180. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  181. cc_unmap_aead_request(dev, areq);
  182. /* Restore ordinary iv pointer */
  183. areq->iv = areq_ctx->backup_iv;
  184. if (err)
  185. goto done;
  186. if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
  187. if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
  188. ctx->authsize) != 0) {
  189. dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
  190. ctx->authsize, ctx->cipher_mode);
  191. /* In case of payload authentication failure, MUST NOT
  192. * revealed the decrypted message --> zero its memory.
  193. */
  194. cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
  195. err = -EBADMSG;
  196. }
  197. } else { /*ENCRYPT*/
  198. if (areq_ctx->is_icv_fragmented) {
  199. u32 skip = areq->cryptlen + areq_ctx->dst_offset;
  200. cc_copy_sg_portion(dev, areq_ctx->mac_buf,
  201. areq_ctx->dst_sgl, skip,
  202. (skip + ctx->authsize),
  203. CC_SG_FROM_BUF);
  204. }
  205. /* If an IV was generated, copy it back to the user provided
  206. * buffer.
  207. */
  208. if (areq_ctx->backup_giv) {
  209. if (ctx->cipher_mode == DRV_CIPHER_CTR)
  210. memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
  211. CTR_RFC3686_NONCE_SIZE,
  212. CTR_RFC3686_IV_SIZE);
  213. else if (ctx->cipher_mode == DRV_CIPHER_CCM)
  214. memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
  215. CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
  216. }
  217. }
  218. done:
  219. aead_request_complete(areq, err);
  220. }
  221. static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
  222. struct cc_aead_ctx *ctx)
  223. {
  224. /* Load the AES key */
  225. hw_desc_init(&desc[0]);
  226. /* We are using for the source/user key the same buffer
  227. * as for the output keys, * because after this key loading it
  228. * is not needed anymore
  229. */
  230. set_din_type(&desc[0], DMA_DLLI,
  231. ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
  232. NS_BIT);
  233. set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
  234. set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
  235. set_key_size_aes(&desc[0], ctx->auth_keylen);
  236. set_flow_mode(&desc[0], S_DIN_to_AES);
  237. set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
  238. hw_desc_init(&desc[1]);
  239. set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
  240. set_flow_mode(&desc[1], DIN_AES_DOUT);
  241. set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
  242. AES_KEYSIZE_128, NS_BIT, 0);
  243. hw_desc_init(&desc[2]);
  244. set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
  245. set_flow_mode(&desc[2], DIN_AES_DOUT);
  246. set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
  247. + AES_KEYSIZE_128),
  248. AES_KEYSIZE_128, NS_BIT, 0);
  249. hw_desc_init(&desc[3]);
  250. set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
  251. set_flow_mode(&desc[3], DIN_AES_DOUT);
  252. set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
  253. + 2 * AES_KEYSIZE_128),
  254. AES_KEYSIZE_128, NS_BIT, 0);
  255. return 4;
  256. }
  257. static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
  258. {
  259. unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
  260. unsigned int digest_ofs = 0;
  261. unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
  262. DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
  263. unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
  264. CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
  265. struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
  266. unsigned int idx = 0;
  267. int i;
  268. /* calc derived HMAC key */
  269. for (i = 0; i < 2; i++) {
  270. /* Load hash initial state */
  271. hw_desc_init(&desc[idx]);
  272. set_cipher_mode(&desc[idx], hash_mode);
  273. set_din_sram(&desc[idx],
  274. cc_larval_digest_addr(ctx->drvdata,
  275. ctx->auth_mode),
  276. digest_size);
  277. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  278. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  279. idx++;
  280. /* Load the hash current length*/
  281. hw_desc_init(&desc[idx]);
  282. set_cipher_mode(&desc[idx], hash_mode);
  283. set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
  284. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  285. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  286. idx++;
  287. /* Prepare ipad key */
  288. hw_desc_init(&desc[idx]);
  289. set_xor_val(&desc[idx], hmac_pad_const[i]);
  290. set_cipher_mode(&desc[idx], hash_mode);
  291. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  292. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  293. idx++;
  294. /* Perform HASH update */
  295. hw_desc_init(&desc[idx]);
  296. set_din_type(&desc[idx], DMA_DLLI,
  297. hmac->padded_authkey_dma_addr,
  298. SHA256_BLOCK_SIZE, NS_BIT);
  299. set_cipher_mode(&desc[idx], hash_mode);
  300. set_xor_active(&desc[idx]);
  301. set_flow_mode(&desc[idx], DIN_HASH);
  302. idx++;
  303. /* Get the digset */
  304. hw_desc_init(&desc[idx]);
  305. set_cipher_mode(&desc[idx], hash_mode);
  306. set_dout_dlli(&desc[idx],
  307. (hmac->ipad_opad_dma_addr + digest_ofs),
  308. digest_size, NS_BIT, 0);
  309. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  310. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  311. set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
  312. idx++;
  313. digest_ofs += digest_size;
  314. }
  315. return idx;
  316. }
  317. static int validate_keys_sizes(struct cc_aead_ctx *ctx)
  318. {
  319. struct device *dev = drvdata_to_dev(ctx->drvdata);
  320. dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
  321. ctx->enc_keylen, ctx->auth_keylen);
  322. switch (ctx->auth_mode) {
  323. case DRV_HASH_SHA1:
  324. case DRV_HASH_SHA256:
  325. break;
  326. case DRV_HASH_XCBC_MAC:
  327. if (ctx->auth_keylen != AES_KEYSIZE_128 &&
  328. ctx->auth_keylen != AES_KEYSIZE_192 &&
  329. ctx->auth_keylen != AES_KEYSIZE_256)
  330. return -ENOTSUPP;
  331. break;
  332. case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
  333. if (ctx->auth_keylen > 0)
  334. return -EINVAL;
  335. break;
  336. default:
  337. dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
  338. return -EINVAL;
  339. }
  340. /* Check cipher key size */
  341. if (ctx->flow_mode == S_DIN_to_DES) {
  342. if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
  343. dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
  344. ctx->enc_keylen);
  345. return -EINVAL;
  346. }
  347. } else { /* Default assumed to be AES ciphers */
  348. if (ctx->enc_keylen != AES_KEYSIZE_128 &&
  349. ctx->enc_keylen != AES_KEYSIZE_192 &&
  350. ctx->enc_keylen != AES_KEYSIZE_256) {
  351. dev_err(dev, "Invalid cipher(AES) key size: %u\n",
  352. ctx->enc_keylen);
  353. return -EINVAL;
  354. }
  355. }
  356. return 0; /* All tests of keys sizes passed */
  357. }
  358. /* This function prepers the user key so it can pass to the hmac processing
  359. * (copy to intenral buffer or hash in case of key longer than block
  360. */
  361. static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
  362. unsigned int keylen)
  363. {
  364. dma_addr_t key_dma_addr = 0;
  365. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  366. struct device *dev = drvdata_to_dev(ctx->drvdata);
  367. u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
  368. struct cc_crypto_req cc_req = {};
  369. unsigned int blocksize;
  370. unsigned int digestsize;
  371. unsigned int hashmode;
  372. unsigned int idx = 0;
  373. int rc = 0;
  374. struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
  375. dma_addr_t padded_authkey_dma_addr =
  376. ctx->auth_state.hmac.padded_authkey_dma_addr;
  377. switch (ctx->auth_mode) { /* auth_key required and >0 */
  378. case DRV_HASH_SHA1:
  379. blocksize = SHA1_BLOCK_SIZE;
  380. digestsize = SHA1_DIGEST_SIZE;
  381. hashmode = DRV_HASH_HW_SHA1;
  382. break;
  383. case DRV_HASH_SHA256:
  384. default:
  385. blocksize = SHA256_BLOCK_SIZE;
  386. digestsize = SHA256_DIGEST_SIZE;
  387. hashmode = DRV_HASH_HW_SHA256;
  388. }
  389. if (keylen != 0) {
  390. key_dma_addr = dma_map_single(dev, (void *)key, keylen,
  391. DMA_TO_DEVICE);
  392. if (dma_mapping_error(dev, key_dma_addr)) {
  393. dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
  394. key, keylen);
  395. return -ENOMEM;
  396. }
  397. if (keylen > blocksize) {
  398. /* Load hash initial state */
  399. hw_desc_init(&desc[idx]);
  400. set_cipher_mode(&desc[idx], hashmode);
  401. set_din_sram(&desc[idx], larval_addr, digestsize);
  402. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  403. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  404. idx++;
  405. /* Load the hash current length*/
  406. hw_desc_init(&desc[idx]);
  407. set_cipher_mode(&desc[idx], hashmode);
  408. set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
  409. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  410. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  411. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  412. idx++;
  413. hw_desc_init(&desc[idx]);
  414. set_din_type(&desc[idx], DMA_DLLI,
  415. key_dma_addr, keylen, NS_BIT);
  416. set_flow_mode(&desc[idx], DIN_HASH);
  417. idx++;
  418. /* Get hashed key */
  419. hw_desc_init(&desc[idx]);
  420. set_cipher_mode(&desc[idx], hashmode);
  421. set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
  422. digestsize, NS_BIT, 0);
  423. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  424. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  425. set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
  426. set_cipher_config0(&desc[idx],
  427. HASH_DIGEST_RESULT_LITTLE_ENDIAN);
  428. idx++;
  429. hw_desc_init(&desc[idx]);
  430. set_din_const(&desc[idx], 0, (blocksize - digestsize));
  431. set_flow_mode(&desc[idx], BYPASS);
  432. set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
  433. digestsize), (blocksize - digestsize),
  434. NS_BIT, 0);
  435. idx++;
  436. } else {
  437. hw_desc_init(&desc[idx]);
  438. set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
  439. keylen, NS_BIT);
  440. set_flow_mode(&desc[idx], BYPASS);
  441. set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
  442. keylen, NS_BIT, 0);
  443. idx++;
  444. if ((blocksize - keylen) != 0) {
  445. hw_desc_init(&desc[idx]);
  446. set_din_const(&desc[idx], 0,
  447. (blocksize - keylen));
  448. set_flow_mode(&desc[idx], BYPASS);
  449. set_dout_dlli(&desc[idx],
  450. (padded_authkey_dma_addr +
  451. keylen),
  452. (blocksize - keylen), NS_BIT, 0);
  453. idx++;
  454. }
  455. }
  456. } else {
  457. hw_desc_init(&desc[idx]);
  458. set_din_const(&desc[idx], 0, (blocksize - keylen));
  459. set_flow_mode(&desc[idx], BYPASS);
  460. set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
  461. blocksize, NS_BIT, 0);
  462. idx++;
  463. }
  464. rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
  465. if (rc)
  466. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  467. if (key_dma_addr)
  468. dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
  469. return rc;
  470. }
  471. static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  472. unsigned int keylen)
  473. {
  474. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  475. struct rtattr *rta = (struct rtattr *)key;
  476. struct cc_crypto_req cc_req = {};
  477. struct crypto_authenc_key_param *param;
  478. struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
  479. int rc = -EINVAL;
  480. unsigned int seq_len = 0;
  481. struct device *dev = drvdata_to_dev(ctx->drvdata);
  482. dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
  483. ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
  484. /* STAT_PHASE_0: Init and sanity checks */
  485. if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
  486. if (!RTA_OK(rta, keylen))
  487. goto badkey;
  488. if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
  489. goto badkey;
  490. if (RTA_PAYLOAD(rta) < sizeof(*param))
  491. goto badkey;
  492. param = RTA_DATA(rta);
  493. ctx->enc_keylen = be32_to_cpu(param->enckeylen);
  494. key += RTA_ALIGN(rta->rta_len);
  495. keylen -= RTA_ALIGN(rta->rta_len);
  496. if (keylen < ctx->enc_keylen)
  497. goto badkey;
  498. ctx->auth_keylen = keylen - ctx->enc_keylen;
  499. if (ctx->cipher_mode == DRV_CIPHER_CTR) {
  500. /* the nonce is stored in bytes at end of key */
  501. if (ctx->enc_keylen <
  502. (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
  503. goto badkey;
  504. /* Copy nonce from last 4 bytes in CTR key to
  505. * first 4 bytes in CTR IV
  506. */
  507. memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
  508. ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
  509. CTR_RFC3686_NONCE_SIZE);
  510. /* Set CTR key size */
  511. ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
  512. }
  513. } else { /* non-authenc - has just one key */
  514. ctx->enc_keylen = keylen;
  515. ctx->auth_keylen = 0;
  516. }
  517. rc = validate_keys_sizes(ctx);
  518. if (rc)
  519. goto badkey;
  520. /* STAT_PHASE_1: Copy key to ctx */
  521. /* Get key material */
  522. memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
  523. if (ctx->enc_keylen == 24)
  524. memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
  525. if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
  526. memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
  527. } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
  528. rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
  529. if (rc)
  530. goto badkey;
  531. }
  532. /* STAT_PHASE_2: Create sequence */
  533. switch (ctx->auth_mode) {
  534. case DRV_HASH_SHA1:
  535. case DRV_HASH_SHA256:
  536. seq_len = hmac_setkey(desc, ctx);
  537. break;
  538. case DRV_HASH_XCBC_MAC:
  539. seq_len = xcbc_setkey(desc, ctx);
  540. break;
  541. case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
  542. break; /* No auth. key setup */
  543. default:
  544. dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
  545. rc = -ENOTSUPP;
  546. goto badkey;
  547. }
  548. /* STAT_PHASE_3: Submit sequence to HW */
  549. if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
  550. rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
  551. if (rc) {
  552. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  553. goto setkey_error;
  554. }
  555. }
  556. /* Update STAT_PHASE_3 */
  557. return rc;
  558. badkey:
  559. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  560. setkey_error:
  561. return rc;
  562. }
  563. static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
  564. unsigned int keylen)
  565. {
  566. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  567. if (keylen < 3)
  568. return -EINVAL;
  569. keylen -= 3;
  570. memcpy(ctx->ctr_nonce, key + keylen, 3);
  571. return cc_aead_setkey(tfm, key, keylen);
  572. }
  573. static int cc_aead_setauthsize(struct crypto_aead *authenc,
  574. unsigned int authsize)
  575. {
  576. struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
  577. struct device *dev = drvdata_to_dev(ctx->drvdata);
  578. /* Unsupported auth. sizes */
  579. if (authsize == 0 ||
  580. authsize > crypto_aead_maxauthsize(authenc)) {
  581. return -ENOTSUPP;
  582. }
  583. ctx->authsize = authsize;
  584. dev_dbg(dev, "authlen=%d\n", ctx->authsize);
  585. return 0;
  586. }
  587. static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
  588. unsigned int authsize)
  589. {
  590. switch (authsize) {
  591. case 8:
  592. case 12:
  593. case 16:
  594. break;
  595. default:
  596. return -EINVAL;
  597. }
  598. return cc_aead_setauthsize(authenc, authsize);
  599. }
  600. static int cc_ccm_setauthsize(struct crypto_aead *authenc,
  601. unsigned int authsize)
  602. {
  603. switch (authsize) {
  604. case 4:
  605. case 6:
  606. case 8:
  607. case 10:
  608. case 12:
  609. case 14:
  610. case 16:
  611. break;
  612. default:
  613. return -EINVAL;
  614. }
  615. return cc_aead_setauthsize(authenc, authsize);
  616. }
  617. static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
  618. struct cc_hw_desc desc[], unsigned int *seq_size)
  619. {
  620. struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
  621. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  622. struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
  623. enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
  624. unsigned int idx = *seq_size;
  625. struct device *dev = drvdata_to_dev(ctx->drvdata);
  626. switch (assoc_dma_type) {
  627. case CC_DMA_BUF_DLLI:
  628. dev_dbg(dev, "ASSOC buffer type DLLI\n");
  629. hw_desc_init(&desc[idx]);
  630. set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
  631. areq->assoclen, NS_BIT);
  632. set_flow_mode(&desc[idx], flow_mode);
  633. if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
  634. areq_ctx->cryptlen > 0)
  635. set_din_not_last_indication(&desc[idx]);
  636. break;
  637. case CC_DMA_BUF_MLLI:
  638. dev_dbg(dev, "ASSOC buffer type MLLI\n");
  639. hw_desc_init(&desc[idx]);
  640. set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
  641. areq_ctx->assoc.mlli_nents, NS_BIT);
  642. set_flow_mode(&desc[idx], flow_mode);
  643. if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
  644. areq_ctx->cryptlen > 0)
  645. set_din_not_last_indication(&desc[idx]);
  646. break;
  647. case CC_DMA_BUF_NULL:
  648. default:
  649. dev_err(dev, "Invalid ASSOC buffer type\n");
  650. }
  651. *seq_size = (++idx);
  652. }
  653. static void cc_proc_authen_desc(struct aead_request *areq,
  654. unsigned int flow_mode,
  655. struct cc_hw_desc desc[],
  656. unsigned int *seq_size, int direct)
  657. {
  658. struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
  659. enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
  660. unsigned int idx = *seq_size;
  661. struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
  662. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  663. struct device *dev = drvdata_to_dev(ctx->drvdata);
  664. switch (data_dma_type) {
  665. case CC_DMA_BUF_DLLI:
  666. {
  667. struct scatterlist *cipher =
  668. (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
  669. areq_ctx->dst_sgl : areq_ctx->src_sgl;
  670. unsigned int offset =
  671. (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
  672. areq_ctx->dst_offset : areq_ctx->src_offset;
  673. dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
  674. hw_desc_init(&desc[idx]);
  675. set_din_type(&desc[idx], DMA_DLLI,
  676. (sg_dma_address(cipher) + offset),
  677. areq_ctx->cryptlen, NS_BIT);
  678. set_flow_mode(&desc[idx], flow_mode);
  679. break;
  680. }
  681. case CC_DMA_BUF_MLLI:
  682. {
  683. /* DOUBLE-PASS flow (as default)
  684. * assoc. + iv + data -compact in one table
  685. * if assoclen is ZERO only IV perform
  686. */
  687. cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
  688. u32 mlli_nents = areq_ctx->assoc.mlli_nents;
  689. if (areq_ctx->is_single_pass) {
  690. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
  691. mlli_addr = areq_ctx->dst.sram_addr;
  692. mlli_nents = areq_ctx->dst.mlli_nents;
  693. } else {
  694. mlli_addr = areq_ctx->src.sram_addr;
  695. mlli_nents = areq_ctx->src.mlli_nents;
  696. }
  697. }
  698. dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
  699. hw_desc_init(&desc[idx]);
  700. set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
  701. NS_BIT);
  702. set_flow_mode(&desc[idx], flow_mode);
  703. break;
  704. }
  705. case CC_DMA_BUF_NULL:
  706. default:
  707. dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
  708. }
  709. *seq_size = (++idx);
  710. }
  711. static void cc_proc_cipher_desc(struct aead_request *areq,
  712. unsigned int flow_mode,
  713. struct cc_hw_desc desc[],
  714. unsigned int *seq_size)
  715. {
  716. unsigned int idx = *seq_size;
  717. struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
  718. enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
  719. struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
  720. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  721. struct device *dev = drvdata_to_dev(ctx->drvdata);
  722. if (areq_ctx->cryptlen == 0)
  723. return; /*null processing*/
  724. switch (data_dma_type) {
  725. case CC_DMA_BUF_DLLI:
  726. dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
  727. hw_desc_init(&desc[idx]);
  728. set_din_type(&desc[idx], DMA_DLLI,
  729. (sg_dma_address(areq_ctx->src_sgl) +
  730. areq_ctx->src_offset), areq_ctx->cryptlen,
  731. NS_BIT);
  732. set_dout_dlli(&desc[idx],
  733. (sg_dma_address(areq_ctx->dst_sgl) +
  734. areq_ctx->dst_offset),
  735. areq_ctx->cryptlen, NS_BIT, 0);
  736. set_flow_mode(&desc[idx], flow_mode);
  737. break;
  738. case CC_DMA_BUF_MLLI:
  739. dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
  740. hw_desc_init(&desc[idx]);
  741. set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
  742. areq_ctx->src.mlli_nents, NS_BIT);
  743. set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
  744. areq_ctx->dst.mlli_nents, NS_BIT, 0);
  745. set_flow_mode(&desc[idx], flow_mode);
  746. break;
  747. case CC_DMA_BUF_NULL:
  748. default:
  749. dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
  750. }
  751. *seq_size = (++idx);
  752. }
  753. static void cc_proc_digest_desc(struct aead_request *req,
  754. struct cc_hw_desc desc[],
  755. unsigned int *seq_size)
  756. {
  757. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  758. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  759. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  760. unsigned int idx = *seq_size;
  761. unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
  762. DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
  763. int direct = req_ctx->gen_ctx.op_type;
  764. /* Get final ICV result */
  765. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
  766. hw_desc_init(&desc[idx]);
  767. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  768. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  769. set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
  770. NS_BIT, 1);
  771. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  772. if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
  773. set_aes_not_hash_mode(&desc[idx]);
  774. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  775. } else {
  776. set_cipher_config0(&desc[idx],
  777. HASH_DIGEST_RESULT_LITTLE_ENDIAN);
  778. set_cipher_mode(&desc[idx], hash_mode);
  779. }
  780. } else { /*Decrypt*/
  781. /* Get ICV out from hardware */
  782. hw_desc_init(&desc[idx]);
  783. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  784. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  785. set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
  786. ctx->authsize, NS_BIT, 1);
  787. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  788. set_cipher_config0(&desc[idx],
  789. HASH_DIGEST_RESULT_LITTLE_ENDIAN);
  790. set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
  791. if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
  792. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  793. set_aes_not_hash_mode(&desc[idx]);
  794. } else {
  795. set_cipher_mode(&desc[idx], hash_mode);
  796. }
  797. }
  798. *seq_size = (++idx);
  799. }
  800. static void cc_set_cipher_desc(struct aead_request *req,
  801. struct cc_hw_desc desc[],
  802. unsigned int *seq_size)
  803. {
  804. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  805. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  806. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  807. unsigned int hw_iv_size = req_ctx->hw_iv_size;
  808. unsigned int idx = *seq_size;
  809. int direct = req_ctx->gen_ctx.op_type;
  810. /* Setup cipher state */
  811. hw_desc_init(&desc[idx]);
  812. set_cipher_config0(&desc[idx], direct);
  813. set_flow_mode(&desc[idx], ctx->flow_mode);
  814. set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
  815. hw_iv_size, NS_BIT);
  816. if (ctx->cipher_mode == DRV_CIPHER_CTR)
  817. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  818. else
  819. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  820. set_cipher_mode(&desc[idx], ctx->cipher_mode);
  821. idx++;
  822. /* Setup enc. key */
  823. hw_desc_init(&desc[idx]);
  824. set_cipher_config0(&desc[idx], direct);
  825. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  826. set_flow_mode(&desc[idx], ctx->flow_mode);
  827. if (ctx->flow_mode == S_DIN_to_AES) {
  828. set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
  829. ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
  830. ctx->enc_keylen), NS_BIT);
  831. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  832. } else {
  833. set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
  834. ctx->enc_keylen, NS_BIT);
  835. set_key_size_des(&desc[idx], ctx->enc_keylen);
  836. }
  837. set_cipher_mode(&desc[idx], ctx->cipher_mode);
  838. idx++;
  839. *seq_size = idx;
  840. }
  841. static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
  842. unsigned int *seq_size, unsigned int data_flow_mode)
  843. {
  844. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  845. int direct = req_ctx->gen_ctx.op_type;
  846. unsigned int idx = *seq_size;
  847. if (req_ctx->cryptlen == 0)
  848. return; /*null processing*/
  849. cc_set_cipher_desc(req, desc, &idx);
  850. cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
  851. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
  852. /* We must wait for DMA to write all cipher */
  853. hw_desc_init(&desc[idx]);
  854. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  855. set_dout_no_dma(&desc[idx], 0, 0, 1);
  856. idx++;
  857. }
  858. *seq_size = idx;
  859. }
  860. static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
  861. unsigned int *seq_size)
  862. {
  863. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  864. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  865. unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
  866. DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
  867. unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
  868. CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
  869. unsigned int idx = *seq_size;
  870. /* Loading hash ipad xor key state */
  871. hw_desc_init(&desc[idx]);
  872. set_cipher_mode(&desc[idx], hash_mode);
  873. set_din_type(&desc[idx], DMA_DLLI,
  874. ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
  875. NS_BIT);
  876. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  877. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  878. idx++;
  879. /* Load init. digest len (64 bytes) */
  880. hw_desc_init(&desc[idx]);
  881. set_cipher_mode(&desc[idx], hash_mode);
  882. set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
  883. ctx->drvdata->hash_len_sz);
  884. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  885. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  886. idx++;
  887. *seq_size = idx;
  888. }
  889. static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
  890. unsigned int *seq_size)
  891. {
  892. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  893. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  894. unsigned int idx = *seq_size;
  895. /* Loading MAC state */
  896. hw_desc_init(&desc[idx]);
  897. set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
  898. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  899. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  900. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  901. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  902. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  903. set_aes_not_hash_mode(&desc[idx]);
  904. idx++;
  905. /* Setup XCBC MAC K1 */
  906. hw_desc_init(&desc[idx]);
  907. set_din_type(&desc[idx], DMA_DLLI,
  908. ctx->auth_state.xcbc.xcbc_keys_dma_addr,
  909. AES_KEYSIZE_128, NS_BIT);
  910. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  911. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  912. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  913. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  914. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  915. set_aes_not_hash_mode(&desc[idx]);
  916. idx++;
  917. /* Setup XCBC MAC K2 */
  918. hw_desc_init(&desc[idx]);
  919. set_din_type(&desc[idx], DMA_DLLI,
  920. (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
  921. AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
  922. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  923. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  924. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  925. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  926. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  927. set_aes_not_hash_mode(&desc[idx]);
  928. idx++;
  929. /* Setup XCBC MAC K3 */
  930. hw_desc_init(&desc[idx]);
  931. set_din_type(&desc[idx], DMA_DLLI,
  932. (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
  933. 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
  934. set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
  935. set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
  936. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  937. set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
  938. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  939. set_aes_not_hash_mode(&desc[idx]);
  940. idx++;
  941. *seq_size = idx;
  942. }
  943. static void cc_proc_header_desc(struct aead_request *req,
  944. struct cc_hw_desc desc[],
  945. unsigned int *seq_size)
  946. {
  947. unsigned int idx = *seq_size;
  948. /* Hash associated data */
  949. if (req->assoclen > 0)
  950. cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
  951. /* Hash IV */
  952. *seq_size = idx;
  953. }
  954. static void cc_proc_scheme_desc(struct aead_request *req,
  955. struct cc_hw_desc desc[],
  956. unsigned int *seq_size)
  957. {
  958. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  959. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  960. struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
  961. unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
  962. DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
  963. unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
  964. CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
  965. unsigned int idx = *seq_size;
  966. hw_desc_init(&desc[idx]);
  967. set_cipher_mode(&desc[idx], hash_mode);
  968. set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
  969. ctx->drvdata->hash_len_sz);
  970. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  971. set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
  972. set_cipher_do(&desc[idx], DO_PAD);
  973. idx++;
  974. /* Get final ICV result */
  975. hw_desc_init(&desc[idx]);
  976. set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
  977. digest_size);
  978. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  979. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  980. set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
  981. set_cipher_mode(&desc[idx], hash_mode);
  982. idx++;
  983. /* Loading hash opad xor key state */
  984. hw_desc_init(&desc[idx]);
  985. set_cipher_mode(&desc[idx], hash_mode);
  986. set_din_type(&desc[idx], DMA_DLLI,
  987. (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
  988. digest_size, NS_BIT);
  989. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  990. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  991. idx++;
  992. /* Load init. digest len (64 bytes) */
  993. hw_desc_init(&desc[idx]);
  994. set_cipher_mode(&desc[idx], hash_mode);
  995. set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
  996. ctx->drvdata->hash_len_sz);
  997. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  998. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  999. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1000. idx++;
  1001. /* Perform HASH update */
  1002. hw_desc_init(&desc[idx]);
  1003. set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
  1004. digest_size);
  1005. set_flow_mode(&desc[idx], DIN_HASH);
  1006. idx++;
  1007. *seq_size = idx;
  1008. }
  1009. static void cc_mlli_to_sram(struct aead_request *req,
  1010. struct cc_hw_desc desc[], unsigned int *seq_size)
  1011. {
  1012. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1013. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1014. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1015. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1016. if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
  1017. req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
  1018. !req_ctx->is_single_pass) {
  1019. dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
  1020. (unsigned int)ctx->drvdata->mlli_sram_addr,
  1021. req_ctx->mlli_params.mlli_len);
  1022. /* Copy MLLI table host-to-sram */
  1023. hw_desc_init(&desc[*seq_size]);
  1024. set_din_type(&desc[*seq_size], DMA_DLLI,
  1025. req_ctx->mlli_params.mlli_dma_addr,
  1026. req_ctx->mlli_params.mlli_len, NS_BIT);
  1027. set_dout_sram(&desc[*seq_size],
  1028. ctx->drvdata->mlli_sram_addr,
  1029. req_ctx->mlli_params.mlli_len);
  1030. set_flow_mode(&desc[*seq_size], BYPASS);
  1031. (*seq_size)++;
  1032. }
  1033. }
  1034. static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
  1035. enum cc_flow_mode setup_flow_mode,
  1036. bool is_single_pass)
  1037. {
  1038. enum cc_flow_mode data_flow_mode;
  1039. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
  1040. if (setup_flow_mode == S_DIN_to_AES)
  1041. data_flow_mode = is_single_pass ?
  1042. AES_to_HASH_and_DOUT : DIN_AES_DOUT;
  1043. else
  1044. data_flow_mode = is_single_pass ?
  1045. DES_to_HASH_and_DOUT : DIN_DES_DOUT;
  1046. } else { /* Decrypt */
  1047. if (setup_flow_mode == S_DIN_to_AES)
  1048. data_flow_mode = is_single_pass ?
  1049. AES_and_HASH : DIN_AES_DOUT;
  1050. else
  1051. data_flow_mode = is_single_pass ?
  1052. DES_and_HASH : DIN_DES_DOUT;
  1053. }
  1054. return data_flow_mode;
  1055. }
  1056. static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
  1057. unsigned int *seq_size)
  1058. {
  1059. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1060. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1061. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1062. int direct = req_ctx->gen_ctx.op_type;
  1063. unsigned int data_flow_mode =
  1064. cc_get_data_flow(direct, ctx->flow_mode,
  1065. req_ctx->is_single_pass);
  1066. if (req_ctx->is_single_pass) {
  1067. /**
  1068. * Single-pass flow
  1069. */
  1070. cc_set_hmac_desc(req, desc, seq_size);
  1071. cc_set_cipher_desc(req, desc, seq_size);
  1072. cc_proc_header_desc(req, desc, seq_size);
  1073. cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
  1074. cc_proc_scheme_desc(req, desc, seq_size);
  1075. cc_proc_digest_desc(req, desc, seq_size);
  1076. return;
  1077. }
  1078. /**
  1079. * Double-pass flow
  1080. * Fallback for unsupported single-pass modes,
  1081. * i.e. using assoc. data of non-word-multiple
  1082. */
  1083. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
  1084. /* encrypt first.. */
  1085. cc_proc_cipher(req, desc, seq_size, data_flow_mode);
  1086. /* authenc after..*/
  1087. cc_set_hmac_desc(req, desc, seq_size);
  1088. cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
  1089. cc_proc_scheme_desc(req, desc, seq_size);
  1090. cc_proc_digest_desc(req, desc, seq_size);
  1091. } else { /*DECRYPT*/
  1092. /* authenc first..*/
  1093. cc_set_hmac_desc(req, desc, seq_size);
  1094. cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
  1095. cc_proc_scheme_desc(req, desc, seq_size);
  1096. /* decrypt after.. */
  1097. cc_proc_cipher(req, desc, seq_size, data_flow_mode);
  1098. /* read the digest result with setting the completion bit
  1099. * must be after the cipher operation
  1100. */
  1101. cc_proc_digest_desc(req, desc, seq_size);
  1102. }
  1103. }
  1104. static void
  1105. cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
  1106. unsigned int *seq_size)
  1107. {
  1108. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1109. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1110. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1111. int direct = req_ctx->gen_ctx.op_type;
  1112. unsigned int data_flow_mode =
  1113. cc_get_data_flow(direct, ctx->flow_mode,
  1114. req_ctx->is_single_pass);
  1115. if (req_ctx->is_single_pass) {
  1116. /**
  1117. * Single-pass flow
  1118. */
  1119. cc_set_xcbc_desc(req, desc, seq_size);
  1120. cc_set_cipher_desc(req, desc, seq_size);
  1121. cc_proc_header_desc(req, desc, seq_size);
  1122. cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
  1123. cc_proc_digest_desc(req, desc, seq_size);
  1124. return;
  1125. }
  1126. /**
  1127. * Double-pass flow
  1128. * Fallback for unsupported single-pass modes,
  1129. * i.e. using assoc. data of non-word-multiple
  1130. */
  1131. if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
  1132. /* encrypt first.. */
  1133. cc_proc_cipher(req, desc, seq_size, data_flow_mode);
  1134. /* authenc after.. */
  1135. cc_set_xcbc_desc(req, desc, seq_size);
  1136. cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
  1137. cc_proc_digest_desc(req, desc, seq_size);
  1138. } else { /*DECRYPT*/
  1139. /* authenc first.. */
  1140. cc_set_xcbc_desc(req, desc, seq_size);
  1141. cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
  1142. /* decrypt after..*/
  1143. cc_proc_cipher(req, desc, seq_size, data_flow_mode);
  1144. /* read the digest result with setting the completion bit
  1145. * must be after the cipher operation
  1146. */
  1147. cc_proc_digest_desc(req, desc, seq_size);
  1148. }
  1149. }
  1150. static int validate_data_size(struct cc_aead_ctx *ctx,
  1151. enum drv_crypto_direction direct,
  1152. struct aead_request *req)
  1153. {
  1154. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1155. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1156. unsigned int assoclen = req->assoclen;
  1157. unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
  1158. (req->cryptlen - ctx->authsize) : req->cryptlen;
  1159. if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
  1160. req->cryptlen < ctx->authsize)
  1161. goto data_size_err;
  1162. areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
  1163. switch (ctx->flow_mode) {
  1164. case S_DIN_to_AES:
  1165. if (ctx->cipher_mode == DRV_CIPHER_CBC &&
  1166. !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
  1167. goto data_size_err;
  1168. if (ctx->cipher_mode == DRV_CIPHER_CCM)
  1169. break;
  1170. if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
  1171. if (areq_ctx->plaintext_authenticate_only)
  1172. areq_ctx->is_single_pass = false;
  1173. break;
  1174. }
  1175. if (!IS_ALIGNED(assoclen, sizeof(u32)))
  1176. areq_ctx->is_single_pass = false;
  1177. if (ctx->cipher_mode == DRV_CIPHER_CTR &&
  1178. !IS_ALIGNED(cipherlen, sizeof(u32)))
  1179. areq_ctx->is_single_pass = false;
  1180. break;
  1181. case S_DIN_to_DES:
  1182. if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
  1183. goto data_size_err;
  1184. if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
  1185. areq_ctx->is_single_pass = false;
  1186. break;
  1187. default:
  1188. dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
  1189. goto data_size_err;
  1190. }
  1191. return 0;
  1192. data_size_err:
  1193. return -EINVAL;
  1194. }
  1195. static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
  1196. {
  1197. unsigned int len = 0;
  1198. if (header_size == 0)
  1199. return 0;
  1200. if (header_size < ((1UL << 16) - (1UL << 8))) {
  1201. len = 2;
  1202. pa0_buff[0] = (header_size >> 8) & 0xFF;
  1203. pa0_buff[1] = header_size & 0xFF;
  1204. } else {
  1205. len = 6;
  1206. pa0_buff[0] = 0xFF;
  1207. pa0_buff[1] = 0xFE;
  1208. pa0_buff[2] = (header_size >> 24) & 0xFF;
  1209. pa0_buff[3] = (header_size >> 16) & 0xFF;
  1210. pa0_buff[4] = (header_size >> 8) & 0xFF;
  1211. pa0_buff[5] = header_size & 0xFF;
  1212. }
  1213. return len;
  1214. }
  1215. static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
  1216. {
  1217. __be32 data;
  1218. memset(block, 0, csize);
  1219. block += csize;
  1220. if (csize >= 4)
  1221. csize = 4;
  1222. else if (msglen > (1 << (8 * csize)))
  1223. return -EOVERFLOW;
  1224. data = cpu_to_be32(msglen);
  1225. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  1226. return 0;
  1227. }
  1228. static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
  1229. unsigned int *seq_size)
  1230. {
  1231. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1232. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1233. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1234. unsigned int idx = *seq_size;
  1235. unsigned int cipher_flow_mode;
  1236. dma_addr_t mac_result;
  1237. if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
  1238. cipher_flow_mode = AES_to_HASH_and_DOUT;
  1239. mac_result = req_ctx->mac_buf_dma_addr;
  1240. } else { /* Encrypt */
  1241. cipher_flow_mode = AES_and_HASH;
  1242. mac_result = req_ctx->icv_dma_addr;
  1243. }
  1244. /* load key */
  1245. hw_desc_init(&desc[idx]);
  1246. set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
  1247. set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
  1248. ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
  1249. ctx->enc_keylen), NS_BIT);
  1250. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1251. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1252. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1253. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1254. idx++;
  1255. /* load ctr state */
  1256. hw_desc_init(&desc[idx]);
  1257. set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
  1258. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1259. set_din_type(&desc[idx], DMA_DLLI,
  1260. req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
  1261. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1262. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  1263. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1264. idx++;
  1265. /* load MAC key */
  1266. hw_desc_init(&desc[idx]);
  1267. set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
  1268. set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
  1269. ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
  1270. ctx->enc_keylen), NS_BIT);
  1271. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1272. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1273. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1274. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  1275. set_aes_not_hash_mode(&desc[idx]);
  1276. idx++;
  1277. /* load MAC state */
  1278. hw_desc_init(&desc[idx]);
  1279. set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
  1280. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1281. set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
  1282. AES_BLOCK_SIZE, NS_BIT);
  1283. set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
  1284. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  1285. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  1286. set_aes_not_hash_mode(&desc[idx]);
  1287. idx++;
  1288. /* process assoc data */
  1289. if (req->assoclen > 0) {
  1290. cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
  1291. } else {
  1292. hw_desc_init(&desc[idx]);
  1293. set_din_type(&desc[idx], DMA_DLLI,
  1294. sg_dma_address(&req_ctx->ccm_adata_sg),
  1295. AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
  1296. set_flow_mode(&desc[idx], DIN_HASH);
  1297. idx++;
  1298. }
  1299. /* process the cipher */
  1300. if (req_ctx->cryptlen)
  1301. cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
  1302. /* Read temporal MAC */
  1303. hw_desc_init(&desc[idx]);
  1304. set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
  1305. set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
  1306. NS_BIT, 0);
  1307. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  1308. set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
  1309. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  1310. set_aes_not_hash_mode(&desc[idx]);
  1311. idx++;
  1312. /* load AES-CTR state (for last MAC calculation)*/
  1313. hw_desc_init(&desc[idx]);
  1314. set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
  1315. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  1316. set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
  1317. AES_BLOCK_SIZE, NS_BIT);
  1318. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1319. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  1320. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1321. idx++;
  1322. hw_desc_init(&desc[idx]);
  1323. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  1324. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1325. idx++;
  1326. /* encrypt the "T" value and store MAC in mac_state */
  1327. hw_desc_init(&desc[idx]);
  1328. set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
  1329. ctx->authsize, NS_BIT);
  1330. set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
  1331. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  1332. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  1333. idx++;
  1334. *seq_size = idx;
  1335. return 0;
  1336. }
  1337. static int config_ccm_adata(struct aead_request *req)
  1338. {
  1339. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1340. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1341. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1342. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1343. //unsigned int size_of_a = 0, rem_a_size = 0;
  1344. unsigned int lp = req->iv[0];
  1345. /* Note: The code assume that req->iv[0] already contains the value
  1346. * of L' of RFC3610
  1347. */
  1348. unsigned int l = lp + 1; /* This is L' of RFC 3610. */
  1349. unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
  1350. u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
  1351. u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
  1352. u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
  1353. unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
  1354. DRV_CRYPTO_DIRECTION_ENCRYPT) ?
  1355. req->cryptlen :
  1356. (req->cryptlen - ctx->authsize);
  1357. int rc;
  1358. memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
  1359. memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
  1360. /* taken from crypto/ccm.c */
  1361. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  1362. if (l < 2 || l > 8) {
  1363. dev_err(dev, "illegal iv value %X\n", req->iv[0]);
  1364. return -EINVAL;
  1365. }
  1366. memcpy(b0, req->iv, AES_BLOCK_SIZE);
  1367. /* format control info per RFC 3610 and
  1368. * NIST Special Publication 800-38C
  1369. */
  1370. *b0 |= (8 * ((m - 2) / 2));
  1371. if (req->assoclen > 0)
  1372. *b0 |= 64; /* Enable bit 6 if Adata exists. */
  1373. rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
  1374. if (rc) {
  1375. dev_err(dev, "message len overflow detected");
  1376. return rc;
  1377. }
  1378. /* END of "taken from crypto/ccm.c" */
  1379. /* l(a) - size of associated data. */
  1380. req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
  1381. memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
  1382. req->iv[15] = 1;
  1383. memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
  1384. ctr_count_0[15] = 0;
  1385. return 0;
  1386. }
  1387. static void cc_proc_rfc4309_ccm(struct aead_request *req)
  1388. {
  1389. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1390. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1391. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1392. /* L' */
  1393. memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
  1394. /* For RFC 4309, always use 4 bytes for message length
  1395. * (at most 2^32-1 bytes).
  1396. */
  1397. areq_ctx->ctr_iv[0] = 3;
  1398. /* In RFC 4309 there is an 11-bytes nonce+IV part,
  1399. * that we build here.
  1400. */
  1401. memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
  1402. CCM_BLOCK_NONCE_SIZE);
  1403. memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
  1404. CCM_BLOCK_IV_SIZE);
  1405. req->iv = areq_ctx->ctr_iv;
  1406. req->assoclen -= CCM_BLOCK_IV_SIZE;
  1407. }
  1408. static void cc_set_ghash_desc(struct aead_request *req,
  1409. struct cc_hw_desc desc[], unsigned int *seq_size)
  1410. {
  1411. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1412. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1413. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1414. unsigned int idx = *seq_size;
  1415. /* load key to AES*/
  1416. hw_desc_init(&desc[idx]);
  1417. set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
  1418. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  1419. set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
  1420. ctx->enc_keylen, NS_BIT);
  1421. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1422. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1423. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1424. idx++;
  1425. /* process one zero block to generate hkey */
  1426. hw_desc_init(&desc[idx]);
  1427. set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
  1428. set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
  1429. NS_BIT, 0);
  1430. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  1431. idx++;
  1432. /* Memory Barrier */
  1433. hw_desc_init(&desc[idx]);
  1434. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  1435. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1436. idx++;
  1437. /* Load GHASH subkey */
  1438. hw_desc_init(&desc[idx]);
  1439. set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
  1440. AES_BLOCK_SIZE, NS_BIT);
  1441. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1442. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  1443. set_aes_not_hash_mode(&desc[idx]);
  1444. set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
  1445. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  1446. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1447. idx++;
  1448. /* Configure Hash Engine to work with GHASH.
  1449. * Since it was not possible to extend HASH submodes to add GHASH,
  1450. * The following command is necessary in order to
  1451. * select GHASH (according to HW designers)
  1452. */
  1453. hw_desc_init(&desc[idx]);
  1454. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  1455. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1456. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  1457. set_aes_not_hash_mode(&desc[idx]);
  1458. set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
  1459. set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
  1460. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  1461. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  1462. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1463. idx++;
  1464. /* Load GHASH initial STATE (which is 0). (for any hash there is an
  1465. * initial state)
  1466. */
  1467. hw_desc_init(&desc[idx]);
  1468. set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
  1469. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1470. set_flow_mode(&desc[idx], S_DIN_to_HASH);
  1471. set_aes_not_hash_mode(&desc[idx]);
  1472. set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
  1473. set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
  1474. set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
  1475. idx++;
  1476. *seq_size = idx;
  1477. }
  1478. static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
  1479. unsigned int *seq_size)
  1480. {
  1481. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1482. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1483. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1484. unsigned int idx = *seq_size;
  1485. /* load key to AES*/
  1486. hw_desc_init(&desc[idx]);
  1487. set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
  1488. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  1489. set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
  1490. ctx->enc_keylen, NS_BIT);
  1491. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1492. set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
  1493. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1494. idx++;
  1495. if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
  1496. /* load AES/CTR initial CTR value inc by 2*/
  1497. hw_desc_init(&desc[idx]);
  1498. set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
  1499. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1500. set_din_type(&desc[idx], DMA_DLLI,
  1501. req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
  1502. NS_BIT);
  1503. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  1504. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  1505. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1506. idx++;
  1507. }
  1508. *seq_size = idx;
  1509. }
  1510. static void cc_proc_gcm_result(struct aead_request *req,
  1511. struct cc_hw_desc desc[],
  1512. unsigned int *seq_size)
  1513. {
  1514. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1515. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1516. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1517. dma_addr_t mac_result;
  1518. unsigned int idx = *seq_size;
  1519. if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
  1520. mac_result = req_ctx->mac_buf_dma_addr;
  1521. } else { /* Encrypt */
  1522. mac_result = req_ctx->icv_dma_addr;
  1523. }
  1524. /* process(ghash) gcm_block_len */
  1525. hw_desc_init(&desc[idx]);
  1526. set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
  1527. AES_BLOCK_SIZE, NS_BIT);
  1528. set_flow_mode(&desc[idx], DIN_HASH);
  1529. idx++;
  1530. /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
  1531. hw_desc_init(&desc[idx]);
  1532. set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
  1533. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  1534. set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
  1535. NS_BIT, 0);
  1536. set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
  1537. set_flow_mode(&desc[idx], S_HASH_to_DOUT);
  1538. set_aes_not_hash_mode(&desc[idx]);
  1539. idx++;
  1540. /* load AES/CTR initial CTR value inc by 1*/
  1541. hw_desc_init(&desc[idx]);
  1542. set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
  1543. set_key_size_aes(&desc[idx], ctx->enc_keylen);
  1544. set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
  1545. AES_BLOCK_SIZE, NS_BIT);
  1546. set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
  1547. set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
  1548. set_flow_mode(&desc[idx], S_DIN_to_AES);
  1549. idx++;
  1550. /* Memory Barrier */
  1551. hw_desc_init(&desc[idx]);
  1552. set_din_no_dma(&desc[idx], 0, 0xfffff0);
  1553. set_dout_no_dma(&desc[idx], 0, 0, 1);
  1554. idx++;
  1555. /* process GCTR on stored GHASH and store MAC in mac_state*/
  1556. hw_desc_init(&desc[idx]);
  1557. set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
  1558. set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
  1559. AES_BLOCK_SIZE, NS_BIT);
  1560. set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
  1561. set_queue_last_ind(ctx->drvdata, &desc[idx]);
  1562. set_flow_mode(&desc[idx], DIN_AES_DOUT);
  1563. idx++;
  1564. *seq_size = idx;
  1565. }
  1566. static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
  1567. unsigned int *seq_size)
  1568. {
  1569. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1570. unsigned int cipher_flow_mode;
  1571. if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
  1572. cipher_flow_mode = AES_and_HASH;
  1573. } else { /* Encrypt */
  1574. cipher_flow_mode = AES_to_HASH_and_DOUT;
  1575. }
  1576. //in RFC4543 no data to encrypt. just copy data from src to dest.
  1577. if (req_ctx->plaintext_authenticate_only) {
  1578. cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
  1579. cc_set_ghash_desc(req, desc, seq_size);
  1580. /* process(ghash) assoc data */
  1581. cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
  1582. cc_set_gctr_desc(req, desc, seq_size);
  1583. cc_proc_gcm_result(req, desc, seq_size);
  1584. return 0;
  1585. }
  1586. // for gcm and rfc4106.
  1587. cc_set_ghash_desc(req, desc, seq_size);
  1588. /* process(ghash) assoc data */
  1589. if (req->assoclen > 0)
  1590. cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
  1591. cc_set_gctr_desc(req, desc, seq_size);
  1592. /* process(gctr+ghash) */
  1593. if (req_ctx->cryptlen)
  1594. cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
  1595. cc_proc_gcm_result(req, desc, seq_size);
  1596. return 0;
  1597. }
  1598. static int config_gcm_context(struct aead_request *req)
  1599. {
  1600. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1601. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1602. struct aead_req_ctx *req_ctx = aead_request_ctx(req);
  1603. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1604. unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
  1605. DRV_CRYPTO_DIRECTION_ENCRYPT) ?
  1606. req->cryptlen :
  1607. (req->cryptlen - ctx->authsize);
  1608. __be32 counter = cpu_to_be32(2);
  1609. dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
  1610. __func__, cryptlen, req->assoclen, ctx->authsize);
  1611. memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
  1612. memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
  1613. memcpy(req->iv + 12, &counter, 4);
  1614. memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
  1615. counter = cpu_to_be32(1);
  1616. memcpy(req->iv + 12, &counter, 4);
  1617. memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
  1618. if (!req_ctx->plaintext_authenticate_only) {
  1619. __be64 temp64;
  1620. temp64 = cpu_to_be64(req->assoclen * 8);
  1621. memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
  1622. temp64 = cpu_to_be64(cryptlen * 8);
  1623. memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
  1624. } else {
  1625. /* rfc4543=> all data(AAD,IV,Plain) are considered additional
  1626. * data that is nothing is encrypted.
  1627. */
  1628. __be64 temp64;
  1629. temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
  1630. cryptlen) * 8);
  1631. memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
  1632. temp64 = 0;
  1633. memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
  1634. }
  1635. return 0;
  1636. }
  1637. static void cc_proc_rfc4_gcm(struct aead_request *req)
  1638. {
  1639. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1640. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1641. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1642. memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
  1643. ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
  1644. memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
  1645. GCM_BLOCK_RFC4_IV_SIZE);
  1646. req->iv = areq_ctx->ctr_iv;
  1647. req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
  1648. }
  1649. static int cc_proc_aead(struct aead_request *req,
  1650. enum drv_crypto_direction direct)
  1651. {
  1652. int rc = 0;
  1653. int seq_len = 0;
  1654. struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
  1655. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1656. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1657. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1658. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1659. struct cc_crypto_req cc_req = {};
  1660. dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
  1661. ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
  1662. ctx, req, req->iv, sg_virt(req->src), req->src->offset,
  1663. sg_virt(req->dst), req->dst->offset, req->cryptlen);
  1664. /* STAT_PHASE_0: Init and sanity checks */
  1665. /* Check data length according to mode */
  1666. if (validate_data_size(ctx, direct, req)) {
  1667. dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
  1668. req->cryptlen, req->assoclen);
  1669. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
  1670. return -EINVAL;
  1671. }
  1672. /* Setup request structure */
  1673. cc_req.user_cb = (void *)cc_aead_complete;
  1674. cc_req.user_arg = (void *)req;
  1675. /* Setup request context */
  1676. areq_ctx->gen_ctx.op_type = direct;
  1677. areq_ctx->req_authsize = ctx->authsize;
  1678. areq_ctx->cipher_mode = ctx->cipher_mode;
  1679. /* STAT_PHASE_1: Map buffers */
  1680. if (ctx->cipher_mode == DRV_CIPHER_CTR) {
  1681. /* Build CTR IV - Copy nonce from last 4 bytes in
  1682. * CTR key to first 4 bytes in CTR IV
  1683. */
  1684. memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
  1685. CTR_RFC3686_NONCE_SIZE);
  1686. if (!areq_ctx->backup_giv) /*User none-generated IV*/
  1687. memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
  1688. req->iv, CTR_RFC3686_IV_SIZE);
  1689. /* Initialize counter portion of counter block */
  1690. *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
  1691. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1692. /* Replace with counter iv */
  1693. req->iv = areq_ctx->ctr_iv;
  1694. areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
  1695. } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
  1696. (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
  1697. areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
  1698. if (areq_ctx->ctr_iv != req->iv) {
  1699. memcpy(areq_ctx->ctr_iv, req->iv,
  1700. crypto_aead_ivsize(tfm));
  1701. req->iv = areq_ctx->ctr_iv;
  1702. }
  1703. } else {
  1704. areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
  1705. }
  1706. if (ctx->cipher_mode == DRV_CIPHER_CCM) {
  1707. rc = config_ccm_adata(req);
  1708. if (rc) {
  1709. dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
  1710. rc);
  1711. goto exit;
  1712. }
  1713. } else {
  1714. areq_ctx->ccm_hdr_size = ccm_header_size_null;
  1715. }
  1716. if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
  1717. rc = config_gcm_context(req);
  1718. if (rc) {
  1719. dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
  1720. rc);
  1721. goto exit;
  1722. }
  1723. }
  1724. rc = cc_map_aead_request(ctx->drvdata, req);
  1725. if (rc) {
  1726. dev_err(dev, "map_request() failed\n");
  1727. goto exit;
  1728. }
  1729. /* do we need to generate IV? */
  1730. if (areq_ctx->backup_giv) {
  1731. /* set the DMA mapped IV address*/
  1732. if (ctx->cipher_mode == DRV_CIPHER_CTR) {
  1733. cc_req.ivgen_dma_addr[0] =
  1734. areq_ctx->gen_ctx.iv_dma_addr +
  1735. CTR_RFC3686_NONCE_SIZE;
  1736. cc_req.ivgen_dma_addr_len = 1;
  1737. } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
  1738. /* In ccm, the IV needs to exist both inside B0 and
  1739. * inside the counter.It is also copied to iv_dma_addr
  1740. * for other reasons (like returning it to the user).
  1741. * So, using 3 (identical) IV outputs.
  1742. */
  1743. cc_req.ivgen_dma_addr[0] =
  1744. areq_ctx->gen_ctx.iv_dma_addr +
  1745. CCM_BLOCK_IV_OFFSET;
  1746. cc_req.ivgen_dma_addr[1] =
  1747. sg_dma_address(&areq_ctx->ccm_adata_sg) +
  1748. CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
  1749. cc_req.ivgen_dma_addr[2] =
  1750. sg_dma_address(&areq_ctx->ccm_adata_sg) +
  1751. CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
  1752. cc_req.ivgen_dma_addr_len = 3;
  1753. } else {
  1754. cc_req.ivgen_dma_addr[0] =
  1755. areq_ctx->gen_ctx.iv_dma_addr;
  1756. cc_req.ivgen_dma_addr_len = 1;
  1757. }
  1758. /* set the IV size (8/16 B long)*/
  1759. cc_req.ivgen_size = crypto_aead_ivsize(tfm);
  1760. }
  1761. /* STAT_PHASE_2: Create sequence */
  1762. /* Load MLLI tables to SRAM if necessary */
  1763. cc_mlli_to_sram(req, desc, &seq_len);
  1764. /*TODO: move seq len by reference */
  1765. switch (ctx->auth_mode) {
  1766. case DRV_HASH_SHA1:
  1767. case DRV_HASH_SHA256:
  1768. cc_hmac_authenc(req, desc, &seq_len);
  1769. break;
  1770. case DRV_HASH_XCBC_MAC:
  1771. cc_xcbc_authenc(req, desc, &seq_len);
  1772. break;
  1773. case DRV_HASH_NULL:
  1774. if (ctx->cipher_mode == DRV_CIPHER_CCM)
  1775. cc_ccm(req, desc, &seq_len);
  1776. if (ctx->cipher_mode == DRV_CIPHER_GCTR)
  1777. cc_gcm(req, desc, &seq_len);
  1778. break;
  1779. default:
  1780. dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
  1781. cc_unmap_aead_request(dev, req);
  1782. rc = -ENOTSUPP;
  1783. goto exit;
  1784. }
  1785. /* STAT_PHASE_3: Lock HW and push sequence */
  1786. rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
  1787. if (rc != -EINPROGRESS && rc != -EBUSY) {
  1788. dev_err(dev, "send_request() failed (rc=%d)\n", rc);
  1789. cc_unmap_aead_request(dev, req);
  1790. }
  1791. exit:
  1792. return rc;
  1793. }
  1794. static int cc_aead_encrypt(struct aead_request *req)
  1795. {
  1796. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1797. int rc;
  1798. /* No generated IV required */
  1799. areq_ctx->backup_iv = req->iv;
  1800. areq_ctx->backup_giv = NULL;
  1801. areq_ctx->is_gcm4543 = false;
  1802. areq_ctx->plaintext_authenticate_only = false;
  1803. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
  1804. if (rc != -EINPROGRESS && rc != -EBUSY)
  1805. req->iv = areq_ctx->backup_iv;
  1806. return rc;
  1807. }
  1808. static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
  1809. {
  1810. /* Very similar to cc_aead_encrypt() above. */
  1811. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1812. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1813. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1814. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1815. int rc = -EINVAL;
  1816. if (!valid_assoclen(req)) {
  1817. dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
  1818. goto out;
  1819. }
  1820. /* No generated IV required */
  1821. areq_ctx->backup_iv = req->iv;
  1822. areq_ctx->backup_giv = NULL;
  1823. areq_ctx->is_gcm4543 = true;
  1824. cc_proc_rfc4309_ccm(req);
  1825. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
  1826. if (rc != -EINPROGRESS && rc != -EBUSY)
  1827. req->iv = areq_ctx->backup_iv;
  1828. out:
  1829. return rc;
  1830. }
  1831. static int cc_aead_decrypt(struct aead_request *req)
  1832. {
  1833. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1834. int rc;
  1835. /* No generated IV required */
  1836. areq_ctx->backup_iv = req->iv;
  1837. areq_ctx->backup_giv = NULL;
  1838. areq_ctx->is_gcm4543 = false;
  1839. areq_ctx->plaintext_authenticate_only = false;
  1840. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
  1841. if (rc != -EINPROGRESS && rc != -EBUSY)
  1842. req->iv = areq_ctx->backup_iv;
  1843. return rc;
  1844. }
  1845. static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
  1846. {
  1847. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1848. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1849. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1850. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1851. int rc = -EINVAL;
  1852. if (!valid_assoclen(req)) {
  1853. dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
  1854. goto out;
  1855. }
  1856. /* No generated IV required */
  1857. areq_ctx->backup_iv = req->iv;
  1858. areq_ctx->backup_giv = NULL;
  1859. areq_ctx->is_gcm4543 = true;
  1860. cc_proc_rfc4309_ccm(req);
  1861. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
  1862. if (rc != -EINPROGRESS && rc != -EBUSY)
  1863. req->iv = areq_ctx->backup_iv;
  1864. out:
  1865. return rc;
  1866. }
  1867. static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
  1868. unsigned int keylen)
  1869. {
  1870. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1871. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1872. dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
  1873. if (keylen < 4)
  1874. return -EINVAL;
  1875. keylen -= 4;
  1876. memcpy(ctx->ctr_nonce, key + keylen, 4);
  1877. return cc_aead_setkey(tfm, key, keylen);
  1878. }
  1879. static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
  1880. unsigned int keylen)
  1881. {
  1882. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1883. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1884. dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
  1885. if (keylen < 4)
  1886. return -EINVAL;
  1887. keylen -= 4;
  1888. memcpy(ctx->ctr_nonce, key + keylen, 4);
  1889. return cc_aead_setkey(tfm, key, keylen);
  1890. }
  1891. static int cc_gcm_setauthsize(struct crypto_aead *authenc,
  1892. unsigned int authsize)
  1893. {
  1894. switch (authsize) {
  1895. case 4:
  1896. case 8:
  1897. case 12:
  1898. case 13:
  1899. case 14:
  1900. case 15:
  1901. case 16:
  1902. break;
  1903. default:
  1904. return -EINVAL;
  1905. }
  1906. return cc_aead_setauthsize(authenc, authsize);
  1907. }
  1908. static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
  1909. unsigned int authsize)
  1910. {
  1911. struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
  1912. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1913. dev_dbg(dev, "authsize %d\n", authsize);
  1914. switch (authsize) {
  1915. case 8:
  1916. case 12:
  1917. case 16:
  1918. break;
  1919. default:
  1920. return -EINVAL;
  1921. }
  1922. return cc_aead_setauthsize(authenc, authsize);
  1923. }
  1924. static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
  1925. unsigned int authsize)
  1926. {
  1927. struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
  1928. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1929. dev_dbg(dev, "authsize %d\n", authsize);
  1930. if (authsize != 16)
  1931. return -EINVAL;
  1932. return cc_aead_setauthsize(authenc, authsize);
  1933. }
  1934. static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
  1935. {
  1936. /* Very similar to cc_aead_encrypt() above. */
  1937. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1938. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1939. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1940. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1941. int rc = -EINVAL;
  1942. if (!valid_assoclen(req)) {
  1943. dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
  1944. goto out;
  1945. }
  1946. /* No generated IV required */
  1947. areq_ctx->backup_iv = req->iv;
  1948. areq_ctx->backup_giv = NULL;
  1949. areq_ctx->plaintext_authenticate_only = false;
  1950. cc_proc_rfc4_gcm(req);
  1951. areq_ctx->is_gcm4543 = true;
  1952. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
  1953. if (rc != -EINPROGRESS && rc != -EBUSY)
  1954. req->iv = areq_ctx->backup_iv;
  1955. out:
  1956. return rc;
  1957. }
  1958. static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
  1959. {
  1960. /* Very similar to cc_aead_encrypt() above. */
  1961. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1962. int rc;
  1963. //plaintext is not encryped with rfc4543
  1964. areq_ctx->plaintext_authenticate_only = true;
  1965. /* No generated IV required */
  1966. areq_ctx->backup_iv = req->iv;
  1967. areq_ctx->backup_giv = NULL;
  1968. cc_proc_rfc4_gcm(req);
  1969. areq_ctx->is_gcm4543 = true;
  1970. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
  1971. if (rc != -EINPROGRESS && rc != -EBUSY)
  1972. req->iv = areq_ctx->backup_iv;
  1973. return rc;
  1974. }
  1975. static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
  1976. {
  1977. /* Very similar to cc_aead_decrypt() above. */
  1978. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1979. struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  1980. struct device *dev = drvdata_to_dev(ctx->drvdata);
  1981. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  1982. int rc = -EINVAL;
  1983. if (!valid_assoclen(req)) {
  1984. dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
  1985. goto out;
  1986. }
  1987. /* No generated IV required */
  1988. areq_ctx->backup_iv = req->iv;
  1989. areq_ctx->backup_giv = NULL;
  1990. areq_ctx->plaintext_authenticate_only = false;
  1991. cc_proc_rfc4_gcm(req);
  1992. areq_ctx->is_gcm4543 = true;
  1993. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
  1994. if (rc != -EINPROGRESS && rc != -EBUSY)
  1995. req->iv = areq_ctx->backup_iv;
  1996. out:
  1997. return rc;
  1998. }
  1999. static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
  2000. {
  2001. /* Very similar to cc_aead_decrypt() above. */
  2002. struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  2003. int rc;
  2004. //plaintext is not decryped with rfc4543
  2005. areq_ctx->plaintext_authenticate_only = true;
  2006. /* No generated IV required */
  2007. areq_ctx->backup_iv = req->iv;
  2008. areq_ctx->backup_giv = NULL;
  2009. cc_proc_rfc4_gcm(req);
  2010. areq_ctx->is_gcm4543 = true;
  2011. rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
  2012. if (rc != -EINPROGRESS && rc != -EBUSY)
  2013. req->iv = areq_ctx->backup_iv;
  2014. return rc;
  2015. }
  2016. /* aead alg */
  2017. static struct cc_alg_template aead_algs[] = {
  2018. {
  2019. .name = "authenc(hmac(sha1),cbc(aes))",
  2020. .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
  2021. .blocksize = AES_BLOCK_SIZE,
  2022. .template_aead = {
  2023. .setkey = cc_aead_setkey,
  2024. .setauthsize = cc_aead_setauthsize,
  2025. .encrypt = cc_aead_encrypt,
  2026. .decrypt = cc_aead_decrypt,
  2027. .init = cc_aead_init,
  2028. .exit = cc_aead_exit,
  2029. .ivsize = AES_BLOCK_SIZE,
  2030. .maxauthsize = SHA1_DIGEST_SIZE,
  2031. },
  2032. .cipher_mode = DRV_CIPHER_CBC,
  2033. .flow_mode = S_DIN_to_AES,
  2034. .auth_mode = DRV_HASH_SHA1,
  2035. .min_hw_rev = CC_HW_REV_630,
  2036. },
  2037. {
  2038. .name = "authenc(hmac(sha1),cbc(des3_ede))",
  2039. .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
  2040. .blocksize = DES3_EDE_BLOCK_SIZE,
  2041. .template_aead = {
  2042. .setkey = cc_aead_setkey,
  2043. .setauthsize = cc_aead_setauthsize,
  2044. .encrypt = cc_aead_encrypt,
  2045. .decrypt = cc_aead_decrypt,
  2046. .init = cc_aead_init,
  2047. .exit = cc_aead_exit,
  2048. .ivsize = DES3_EDE_BLOCK_SIZE,
  2049. .maxauthsize = SHA1_DIGEST_SIZE,
  2050. },
  2051. .cipher_mode = DRV_CIPHER_CBC,
  2052. .flow_mode = S_DIN_to_DES,
  2053. .auth_mode = DRV_HASH_SHA1,
  2054. .min_hw_rev = CC_HW_REV_630,
  2055. },
  2056. {
  2057. .name = "authenc(hmac(sha256),cbc(aes))",
  2058. .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
  2059. .blocksize = AES_BLOCK_SIZE,
  2060. .template_aead = {
  2061. .setkey = cc_aead_setkey,
  2062. .setauthsize = cc_aead_setauthsize,
  2063. .encrypt = cc_aead_encrypt,
  2064. .decrypt = cc_aead_decrypt,
  2065. .init = cc_aead_init,
  2066. .exit = cc_aead_exit,
  2067. .ivsize = AES_BLOCK_SIZE,
  2068. .maxauthsize = SHA256_DIGEST_SIZE,
  2069. },
  2070. .cipher_mode = DRV_CIPHER_CBC,
  2071. .flow_mode = S_DIN_to_AES,
  2072. .auth_mode = DRV_HASH_SHA256,
  2073. .min_hw_rev = CC_HW_REV_630,
  2074. },
  2075. {
  2076. .name = "authenc(hmac(sha256),cbc(des3_ede))",
  2077. .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
  2078. .blocksize = DES3_EDE_BLOCK_SIZE,
  2079. .template_aead = {
  2080. .setkey = cc_aead_setkey,
  2081. .setauthsize = cc_aead_setauthsize,
  2082. .encrypt = cc_aead_encrypt,
  2083. .decrypt = cc_aead_decrypt,
  2084. .init = cc_aead_init,
  2085. .exit = cc_aead_exit,
  2086. .ivsize = DES3_EDE_BLOCK_SIZE,
  2087. .maxauthsize = SHA256_DIGEST_SIZE,
  2088. },
  2089. .cipher_mode = DRV_CIPHER_CBC,
  2090. .flow_mode = S_DIN_to_DES,
  2091. .auth_mode = DRV_HASH_SHA256,
  2092. .min_hw_rev = CC_HW_REV_630,
  2093. },
  2094. {
  2095. .name = "authenc(xcbc(aes),cbc(aes))",
  2096. .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
  2097. .blocksize = AES_BLOCK_SIZE,
  2098. .template_aead = {
  2099. .setkey = cc_aead_setkey,
  2100. .setauthsize = cc_aead_setauthsize,
  2101. .encrypt = cc_aead_encrypt,
  2102. .decrypt = cc_aead_decrypt,
  2103. .init = cc_aead_init,
  2104. .exit = cc_aead_exit,
  2105. .ivsize = AES_BLOCK_SIZE,
  2106. .maxauthsize = AES_BLOCK_SIZE,
  2107. },
  2108. .cipher_mode = DRV_CIPHER_CBC,
  2109. .flow_mode = S_DIN_to_AES,
  2110. .auth_mode = DRV_HASH_XCBC_MAC,
  2111. .min_hw_rev = CC_HW_REV_630,
  2112. },
  2113. {
  2114. .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  2115. .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
  2116. .blocksize = 1,
  2117. .template_aead = {
  2118. .setkey = cc_aead_setkey,
  2119. .setauthsize = cc_aead_setauthsize,
  2120. .encrypt = cc_aead_encrypt,
  2121. .decrypt = cc_aead_decrypt,
  2122. .init = cc_aead_init,
  2123. .exit = cc_aead_exit,
  2124. .ivsize = CTR_RFC3686_IV_SIZE,
  2125. .maxauthsize = SHA1_DIGEST_SIZE,
  2126. },
  2127. .cipher_mode = DRV_CIPHER_CTR,
  2128. .flow_mode = S_DIN_to_AES,
  2129. .auth_mode = DRV_HASH_SHA1,
  2130. .min_hw_rev = CC_HW_REV_630,
  2131. },
  2132. {
  2133. .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  2134. .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
  2135. .blocksize = 1,
  2136. .template_aead = {
  2137. .setkey = cc_aead_setkey,
  2138. .setauthsize = cc_aead_setauthsize,
  2139. .encrypt = cc_aead_encrypt,
  2140. .decrypt = cc_aead_decrypt,
  2141. .init = cc_aead_init,
  2142. .exit = cc_aead_exit,
  2143. .ivsize = CTR_RFC3686_IV_SIZE,
  2144. .maxauthsize = SHA256_DIGEST_SIZE,
  2145. },
  2146. .cipher_mode = DRV_CIPHER_CTR,
  2147. .flow_mode = S_DIN_to_AES,
  2148. .auth_mode = DRV_HASH_SHA256,
  2149. .min_hw_rev = CC_HW_REV_630,
  2150. },
  2151. {
  2152. .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
  2153. .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
  2154. .blocksize = 1,
  2155. .template_aead = {
  2156. .setkey = cc_aead_setkey,
  2157. .setauthsize = cc_aead_setauthsize,
  2158. .encrypt = cc_aead_encrypt,
  2159. .decrypt = cc_aead_decrypt,
  2160. .init = cc_aead_init,
  2161. .exit = cc_aead_exit,
  2162. .ivsize = CTR_RFC3686_IV_SIZE,
  2163. .maxauthsize = AES_BLOCK_SIZE,
  2164. },
  2165. .cipher_mode = DRV_CIPHER_CTR,
  2166. .flow_mode = S_DIN_to_AES,
  2167. .auth_mode = DRV_HASH_XCBC_MAC,
  2168. .min_hw_rev = CC_HW_REV_630,
  2169. },
  2170. {
  2171. .name = "ccm(aes)",
  2172. .driver_name = "ccm-aes-ccree",
  2173. .blocksize = 1,
  2174. .template_aead = {
  2175. .setkey = cc_aead_setkey,
  2176. .setauthsize = cc_ccm_setauthsize,
  2177. .encrypt = cc_aead_encrypt,
  2178. .decrypt = cc_aead_decrypt,
  2179. .init = cc_aead_init,
  2180. .exit = cc_aead_exit,
  2181. .ivsize = AES_BLOCK_SIZE,
  2182. .maxauthsize = AES_BLOCK_SIZE,
  2183. },
  2184. .cipher_mode = DRV_CIPHER_CCM,
  2185. .flow_mode = S_DIN_to_AES,
  2186. .auth_mode = DRV_HASH_NULL,
  2187. .min_hw_rev = CC_HW_REV_630,
  2188. },
  2189. {
  2190. .name = "rfc4309(ccm(aes))",
  2191. .driver_name = "rfc4309-ccm-aes-ccree",
  2192. .blocksize = 1,
  2193. .template_aead = {
  2194. .setkey = cc_rfc4309_ccm_setkey,
  2195. .setauthsize = cc_rfc4309_ccm_setauthsize,
  2196. .encrypt = cc_rfc4309_ccm_encrypt,
  2197. .decrypt = cc_rfc4309_ccm_decrypt,
  2198. .init = cc_aead_init,
  2199. .exit = cc_aead_exit,
  2200. .ivsize = CCM_BLOCK_IV_SIZE,
  2201. .maxauthsize = AES_BLOCK_SIZE,
  2202. },
  2203. .cipher_mode = DRV_CIPHER_CCM,
  2204. .flow_mode = S_DIN_to_AES,
  2205. .auth_mode = DRV_HASH_NULL,
  2206. .min_hw_rev = CC_HW_REV_630,
  2207. },
  2208. {
  2209. .name = "gcm(aes)",
  2210. .driver_name = "gcm-aes-ccree",
  2211. .blocksize = 1,
  2212. .template_aead = {
  2213. .setkey = cc_aead_setkey,
  2214. .setauthsize = cc_gcm_setauthsize,
  2215. .encrypt = cc_aead_encrypt,
  2216. .decrypt = cc_aead_decrypt,
  2217. .init = cc_aead_init,
  2218. .exit = cc_aead_exit,
  2219. .ivsize = 12,
  2220. .maxauthsize = AES_BLOCK_SIZE,
  2221. },
  2222. .cipher_mode = DRV_CIPHER_GCTR,
  2223. .flow_mode = S_DIN_to_AES,
  2224. .auth_mode = DRV_HASH_NULL,
  2225. .min_hw_rev = CC_HW_REV_630,
  2226. },
  2227. {
  2228. .name = "rfc4106(gcm(aes))",
  2229. .driver_name = "rfc4106-gcm-aes-ccree",
  2230. .blocksize = 1,
  2231. .template_aead = {
  2232. .setkey = cc_rfc4106_gcm_setkey,
  2233. .setauthsize = cc_rfc4106_gcm_setauthsize,
  2234. .encrypt = cc_rfc4106_gcm_encrypt,
  2235. .decrypt = cc_rfc4106_gcm_decrypt,
  2236. .init = cc_aead_init,
  2237. .exit = cc_aead_exit,
  2238. .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
  2239. .maxauthsize = AES_BLOCK_SIZE,
  2240. },
  2241. .cipher_mode = DRV_CIPHER_GCTR,
  2242. .flow_mode = S_DIN_to_AES,
  2243. .auth_mode = DRV_HASH_NULL,
  2244. .min_hw_rev = CC_HW_REV_630,
  2245. },
  2246. {
  2247. .name = "rfc4543(gcm(aes))",
  2248. .driver_name = "rfc4543-gcm-aes-ccree",
  2249. .blocksize = 1,
  2250. .template_aead = {
  2251. .setkey = cc_rfc4543_gcm_setkey,
  2252. .setauthsize = cc_rfc4543_gcm_setauthsize,
  2253. .encrypt = cc_rfc4543_gcm_encrypt,
  2254. .decrypt = cc_rfc4543_gcm_decrypt,
  2255. .init = cc_aead_init,
  2256. .exit = cc_aead_exit,
  2257. .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
  2258. .maxauthsize = AES_BLOCK_SIZE,
  2259. },
  2260. .cipher_mode = DRV_CIPHER_GCTR,
  2261. .flow_mode = S_DIN_to_AES,
  2262. .auth_mode = DRV_HASH_NULL,
  2263. .min_hw_rev = CC_HW_REV_630,
  2264. },
  2265. };
  2266. static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
  2267. struct device *dev)
  2268. {
  2269. struct cc_crypto_alg *t_alg;
  2270. struct aead_alg *alg;
  2271. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  2272. if (!t_alg)
  2273. return ERR_PTR(-ENOMEM);
  2274. alg = &tmpl->template_aead;
  2275. snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
  2276. snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  2277. tmpl->driver_name);
  2278. alg->base.cra_module = THIS_MODULE;
  2279. alg->base.cra_priority = CC_CRA_PRIO;
  2280. alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
  2281. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  2282. alg->init = cc_aead_init;
  2283. alg->exit = cc_aead_exit;
  2284. t_alg->aead_alg = *alg;
  2285. t_alg->cipher_mode = tmpl->cipher_mode;
  2286. t_alg->flow_mode = tmpl->flow_mode;
  2287. t_alg->auth_mode = tmpl->auth_mode;
  2288. return t_alg;
  2289. }
  2290. int cc_aead_free(struct cc_drvdata *drvdata)
  2291. {
  2292. struct cc_crypto_alg *t_alg, *n;
  2293. struct cc_aead_handle *aead_handle =
  2294. (struct cc_aead_handle *)drvdata->aead_handle;
  2295. if (aead_handle) {
  2296. /* Remove registered algs */
  2297. list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
  2298. entry) {
  2299. crypto_unregister_aead(&t_alg->aead_alg);
  2300. list_del(&t_alg->entry);
  2301. kfree(t_alg);
  2302. }
  2303. kfree(aead_handle);
  2304. drvdata->aead_handle = NULL;
  2305. }
  2306. return 0;
  2307. }
  2308. int cc_aead_alloc(struct cc_drvdata *drvdata)
  2309. {
  2310. struct cc_aead_handle *aead_handle;
  2311. struct cc_crypto_alg *t_alg;
  2312. int rc = -ENOMEM;
  2313. int alg;
  2314. struct device *dev = drvdata_to_dev(drvdata);
  2315. aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
  2316. if (!aead_handle) {
  2317. rc = -ENOMEM;
  2318. goto fail0;
  2319. }
  2320. INIT_LIST_HEAD(&aead_handle->aead_list);
  2321. drvdata->aead_handle = aead_handle;
  2322. aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
  2323. MAX_HMAC_DIGEST_SIZE);
  2324. if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
  2325. dev_err(dev, "SRAM pool exhausted\n");
  2326. rc = -ENOMEM;
  2327. goto fail1;
  2328. }
  2329. /* Linux crypto */
  2330. for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
  2331. if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
  2332. continue;
  2333. t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
  2334. if (IS_ERR(t_alg)) {
  2335. rc = PTR_ERR(t_alg);
  2336. dev_err(dev, "%s alg allocation failed\n",
  2337. aead_algs[alg].driver_name);
  2338. goto fail1;
  2339. }
  2340. t_alg->drvdata = drvdata;
  2341. rc = crypto_register_aead(&t_alg->aead_alg);
  2342. if (rc) {
  2343. dev_err(dev, "%s alg registration failed\n",
  2344. t_alg->aead_alg.base.cra_driver_name);
  2345. goto fail2;
  2346. } else {
  2347. list_add_tail(&t_alg->entry, &aead_handle->aead_list);
  2348. dev_dbg(dev, "Registered %s\n",
  2349. t_alg->aead_alg.base.cra_driver_name);
  2350. }
  2351. }
  2352. return 0;
  2353. fail2:
  2354. kfree(t_alg);
  2355. fail1:
  2356. cc_aead_free(drvdata);
  2357. fail0:
  2358. return rc;
  2359. }