sa2ul.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * AM6 SA2UL crypto accelerator driver
  4. *
  5. * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. * Authors: Keerthy
  8. * Vitaly Andrianov
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/module.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/cryptohash.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <crypto/authenc.h>
  19. #include <crypto/des.h>
  20. #include <crypto/internal/aead.h>
  21. #include <crypto/internal/skcipher.h>
  22. #include <crypto/internal/hash.h>
  23. #include <crypto/scatterwalk.h>
  24. #include <crypto/sha.h>
  25. #include "sa2ul.h"
  26. /* Byte offset for key in encryption security context */
  27. #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
  28. /* Byte offset for Aux-1 in encryption security context */
  29. #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
  30. #define SA_CMDL_UPD_ENC 0x0001
  31. #define SA_CMDL_UPD_AUTH 0x0002
  32. #define SA_CMDL_UPD_ENC_IV 0x0004
  33. #define SA_CMDL_UPD_AUTH_IV 0x0008
  34. #define SA_CMDL_UPD_AUX_KEY 0x0010
  35. #define SA_AUTH_SUBKEY_LEN 16
  36. #define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
  37. #define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
  38. #define MODE_CONTROL_BYTES 27
  39. #define SA_HASH_PROCESSING 0
  40. #define SA_CRYPTO_PROCESSING 0
  41. #define SA_UPLOAD_HASH_TO_TLR BIT(6)
  42. #define SA_SW0_FLAGS_MASK 0xF0000
  43. #define SA_SW0_CMDL_INFO_MASK 0x1F00000
  44. #define SA_SW0_CMDL_PRESENT BIT(4)
  45. #define SA_SW0_ENG_ID_MASK 0x3E000000
  46. #define SA_SW0_DEST_INFO_PRESENT BIT(30)
  47. #define SA_SW2_EGRESS_LENGTH 0xFF000000
  48. #define SHA256_DIGEST_WORDS 8
  49. /* Make 32-bit word from 4 bytes */
  50. #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
  51. ((b2) << 8) | (b3))
  52. /* size of SCCTL structure in bytes */
  53. #define SA_SCCTL_SZ 16
  54. /* Max Authentication tag size */
  55. #define SA_MAX_AUTH_TAG_SZ 64
  56. static struct device *sa_k3_dev;
  57. /**
  58. * struct sa_cmdl_cfg - Command label configuration descriptor
  59. * @enc1st: If the iteration needs encryption before authentication
  60. * @aalg: authentication algorithm ID
  61. * @enc_eng_id: Encryption Engine ID supported by the SA hardware
  62. * @auth_eng_id: authentication Engine ID
  63. * @iv_size: Initialization Vector size
  64. * @akey: Authentication key
  65. * @akey_len: Authentication key length
  66. */
  67. struct sa_cmdl_cfg {
  68. int enc1st;
  69. int aalg;
  70. u8 enc_eng_id;
  71. u8 auth_eng_id;
  72. u8 iv_size;
  73. const u8 *akey;
  74. u16 akey_len;
  75. u16 auth_subkey_len;
  76. };
  77. /**
  78. * struct algo_data - Crypto algorithm specific data
  79. * @enc_eng: Encryption engine info structure
  80. * @auth_eng: Authentication engine info structure
  81. * @auth_ctrl: Authentication control word
  82. * @hash_size: Size of Digest
  83. * @ealg_id: Encryption Algorithm ID
  84. * @aalg_id: Authentication algorithm ID
  85. * @mci_enc: Mode Control Instruction for Encryption algorithm
  86. * @mci_dec: Mode Control Instruction for Decryption
  87. * @inv_key: Whether the encryption algorithm demands key inversion
  88. * @keyed_mac: Whether the Authentication algorithm has Key
  89. * @prep_iopad: Function pointer to generate intermediate ipad/opad
  90. */
  91. struct algo_data {
  92. struct sa_eng_info enc_eng;
  93. struct sa_eng_info auth_eng;
  94. u8 auth_ctrl;
  95. u8 hash_size;
  96. u8 ealg_id;
  97. u8 aalg_id;
  98. u8 *mci_enc;
  99. u8 *mci_dec;
  100. bool inv_key;
  101. bool keyed_mac;
  102. void (*prep_iopad)(const u8 *key, u16 key_sz, u32 *ipad, u32 *opad);
  103. };
  104. /**
  105. * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
  106. * @alg: A union of aead/crypto algorithm type.
  107. * @registered: Flag indicating if the crypto algorithm is already registered
  108. */
  109. struct sa_alg_tmpl {
  110. u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
  111. union {
  112. struct crypto_alg crypto;
  113. struct aead_alg aead;
  114. } alg;
  115. int registered;
  116. };
  117. /**
  118. * struct sa_rx_data: RX Packet miscellaneous data place holder
  119. * @req: crypto request data pointer
  120. * @tx_in: dma_async_tx_descriptor pointer for rx channel
  121. * @enc: Flag indicating either encryption or decryption
  122. */
  123. struct sa_rx_data {
  124. void *req;
  125. struct dma_async_tx_descriptor *tx_in;
  126. u8 enc;
  127. };
  128. /*
  129. * Mode Control Instructions for various Key lengths 128, 192, 256
  130. * For CBC (Cipher Block Chaining) mode for encryption
  131. */
  132. static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
  133. { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
  134. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  135. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  136. { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
  137. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  138. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  139. { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
  140. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  141. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  142. };
  143. /*
  144. * Mode Control Instructions for various Key lengths 128, 192, 256
  145. * For CBC (Cipher Block Chaining) mode for decryption
  146. */
  147. static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
  148. { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  149. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  150. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  151. { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  152. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  153. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  154. { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  155. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  156. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  157. };
  158. /*
  159. * Mode Control Instructions for various Key lengths 128, 192, 256
  160. * For ECB (Electronic Code Book) mode for encryption
  161. */
  162. static u8 mci_ecb_enc_array[3][27] = {
  163. { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  164. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  165. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  166. { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  167. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  168. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  169. { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  170. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  171. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  172. };
  173. /*
  174. * Mode Control Instructions for various Key lengths 128, 192, 256
  175. * For ECB (Electronic Code Book) mode for decryption
  176. */
  177. static u8 mci_ecb_dec_array[3][27] = {
  178. { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  179. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  180. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  181. { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  182. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  183. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  184. { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  185. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  186. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  187. };
  188. /*
  189. * Mode Control Instructions for DES algorithm
  190. * For CBC (Cipher Block Chaining) mode and ECB mode
  191. * encryption and for decryption respectively
  192. */
  193. static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
  194. 0x20, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
  195. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  196. 0x00, 0x00, 0x00,
  197. };
  198. static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
  199. 0x30, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
  200. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  201. 0x00, 0x00, 0x00,
  202. };
  203. static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
  204. 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
  205. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  206. 0x00, 0x00, 0x00,
  207. };
  208. static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
  209. 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
  210. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  211. 0x00, 0x00, 0x00,
  212. };
  213. /*
  214. * Perform 16 byte or 128 bit swizzling
  215. * The SA2UL Expects the security context to
  216. * be in little Endian and the bus width is 128 bits or 16 bytes
  217. * Hence swap 16 bytes at a time from higher to lower address
  218. */
  219. static void sa_swiz_128(u8 *in, u16 len)
  220. {
  221. u8 data[16];
  222. int i, j;
  223. for (i = 0; i < len; i += 16) {
  224. memcpy(data, &in[i], 16);
  225. for (j = 0; j < 16; j++)
  226. in[i + j] = data[15 - j];
  227. }
  228. }
  229. /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
  230. static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
  231. {
  232. int i;
  233. for (i = 0; i < key_sz; i++) {
  234. k_ipad[i] = key[i] ^ 0x36;
  235. k_opad[i] = key[i] ^ 0x5c;
  236. }
  237. /* Instead of XOR with 0 */
  238. for (; i < SHA_MESSAGE_BYTES; i++) {
  239. k_ipad[i] = 0x36;
  240. k_opad[i] = 0x5c;
  241. }
  242. }
  243. /* Generate HMAC-SHA1 intermediate Hash */
  244. static
  245. void sa_hmac_sha1_get_pad(const u8 *key, u16 key_sz, u32 *ipad, u32 *opad)
  246. {
  247. u32 ws[SHA_WORKSPACE_WORDS];
  248. u8 k_ipad[SHA_MESSAGE_BYTES];
  249. u8 k_opad[SHA_MESSAGE_BYTES];
  250. int i;
  251. prepare_kiopad(k_ipad, k_opad, key, key_sz);
  252. /* SHA-1 on k_ipad */
  253. sha_init(ipad);
  254. sha_transform(ipad, k_ipad, ws);
  255. for (i = 0; i < SHA_DIGEST_WORDS; i++)
  256. ipad[i] = cpu_to_be32(ipad[i]);
  257. /* SHA-1 on k_opad */
  258. sha_init(opad);
  259. sha_transform(opad, k_opad, ws);
  260. for (i = 0; i < SHA_DIGEST_WORDS; i++)
  261. opad[i] = cpu_to_be32(opad[i]);
  262. }
  263. void sha256_init(u32 *buf)
  264. {
  265. buf[0] = SHA256_H0;
  266. buf[1] = SHA256_H1;
  267. buf[2] = SHA256_H2;
  268. buf[3] = SHA256_H3;
  269. buf[4] = SHA256_H4;
  270. buf[5] = SHA256_H5;
  271. buf[6] = SHA256_H6;
  272. buf[7] = SHA256_H7;
  273. }
  274. static void sa_hmac_sha256_get_pad(const u8 *key, u16 key_sz, u32 *ipad,
  275. u32 *opad)
  276. {
  277. u8 k_ipad[SHA_MESSAGE_BYTES];
  278. u8 k_opad[SHA_MESSAGE_BYTES];
  279. int i;
  280. prepare_kiopad(k_ipad, k_opad, key, key_sz);
  281. /* SHA-256 on k_ipad */
  282. sha256_init(ipad);
  283. sha256_transform(ipad, k_ipad);
  284. for (i = 0; i < SHA256_DIGEST_WORDS; i++)
  285. ipad[i] = cpu_to_be32(ipad[i]);
  286. /* SHA-256 on k_opad */
  287. sha256_init(opad);
  288. sha256_transform(opad, k_opad);
  289. for (i = 0; i < SHA256_DIGEST_WORDS; i++)
  290. opad[i] = cpu_to_be32(opad[i]);
  291. }
  292. /* Derive the inverse key used in AES-CBC decryption operation */
  293. static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
  294. {
  295. struct crypto_aes_ctx ctx;
  296. int key_pos;
  297. if (crypto_aes_expand_key(&ctx, key, key_sz)) {
  298. pr_err("%s: bad key len(%d)\n", __func__, key_sz);
  299. return -EINVAL;
  300. }
  301. /* Based crypto_aes_expand_key logic */
  302. switch (key_sz) {
  303. case AES_KEYSIZE_128:
  304. case AES_KEYSIZE_192:
  305. key_pos = key_sz + 24;
  306. break;
  307. case AES_KEYSIZE_256:
  308. key_pos = key_sz + 24 - 4;
  309. break;
  310. default:
  311. pr_err("%s: bad key len(%d)\n", __func__, key_sz);
  312. return -EINVAL;
  313. }
  314. memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
  315. return 0;
  316. }
  317. /* Set Security context for the encryption engine */
  318. static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
  319. u16 aad_len, u8 enc, u8 *sc_buf)
  320. {
  321. const u8 *mci = NULL;
  322. /* Set Encryption mode selector to crypto processing */
  323. sc_buf[0] = SA_CRYPTO_PROCESSING;
  324. if (enc)
  325. mci = ad->mci_enc;
  326. else
  327. mci = ad->mci_dec;
  328. /* Set the mode control instructions in security context */
  329. if (mci)
  330. memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
  331. /* For AES-CBC decryption get the inverse key */
  332. if (ad->inv_key && !enc) {
  333. if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
  334. return -EINVAL;
  335. /* For all other cases: key is used */
  336. } else {
  337. memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
  338. }
  339. return 0;
  340. }
  341. /* Set Security context for the authentication engine */
  342. static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
  343. u8 *sc_buf)
  344. {
  345. u32 ipad[64], opad[64];
  346. /* Set Authentication mode selector to hash processing */
  347. sc_buf[0] = SA_HASH_PROCESSING;
  348. /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
  349. sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
  350. sc_buf[1] |= ad->auth_ctrl;
  351. /* Copy the keys or ipad/opad */
  352. if (ad->keyed_mac) {
  353. ad->prep_iopad(key, key_sz, ipad, opad);
  354. /* Copy ipad to AuthKey */
  355. memcpy(&sc_buf[32], ipad, ad->hash_size);
  356. /* Copy opad to Aux-1 */
  357. memcpy(&sc_buf[64], opad, ad->hash_size);
  358. }
  359. }
  360. static inline void sa_copy_iv(u32 *out, const u8 *iv, bool size16)
  361. {
  362. int j;
  363. for (j = 0; j < ((size16) ? 4 : 2); j++) {
  364. *out = cpu_to_be32(*((u32 *)iv));
  365. iv += 4;
  366. out++;
  367. }
  368. }
  369. /* Format general command label */
  370. static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
  371. struct sa_cmdl_upd_info *upd_info)
  372. {
  373. u8 enc_offset = 0, auth_offset = 0, total = 0;
  374. u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
  375. u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
  376. u32 *word_ptr = (u32 *)cmdl;
  377. int i;
  378. /* Clear the command label */
  379. memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
  380. /* Iniialize the command update structure */
  381. memzero_explicit(upd_info, sizeof(*upd_info));
  382. if (cfg->enc1st) {
  383. if (cfg->enc_eng_id != SA_ENG_ID_NONE)
  384. auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
  385. if (cfg->iv_size)
  386. auth_offset += cfg->iv_size;
  387. if (cfg->auth_eng_id != SA_ENG_ID_NONE)
  388. enc_next_eng = cfg->auth_eng_id;
  389. else
  390. enc_next_eng = SA_ENG_ID_OUTPORT2;
  391. } else {
  392. if (cfg->auth_eng_id != SA_ENG_ID_NONE)
  393. enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
  394. if (cfg->auth_subkey_len)
  395. enc_offset += cfg->auth_subkey_len;
  396. if (cfg->enc_eng_id != SA_ENG_ID_NONE)
  397. auth_next_eng = cfg->enc_eng_id;
  398. else
  399. auth_next_eng = SA_ENG_ID_OUTPORT2;
  400. }
  401. if (cfg->enc_eng_id != SA_ENG_ID_NONE) {
  402. upd_info->flags |= SA_CMDL_UPD_ENC;
  403. upd_info->enc_size.index = enc_offset >> 2;
  404. upd_info->enc_offset.index = upd_info->enc_size.index + 1;
  405. /* Encryption command label */
  406. cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
  407. /* Encryption modes requiring IV */
  408. if (cfg->iv_size) {
  409. upd_info->flags |= SA_CMDL_UPD_ENC_IV;
  410. upd_info->enc_iv.index =
  411. (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  412. upd_info->enc_iv.size = cfg->iv_size;
  413. cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  414. SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
  415. cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  416. (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
  417. enc_offset += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
  418. } else {
  419. cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  420. SA_CMDL_HEADER_SIZE_BYTES;
  421. enc_offset += SA_CMDL_HEADER_SIZE_BYTES;
  422. }
  423. }
  424. if (cfg->auth_eng_id != SA_ENG_ID_NONE) {
  425. upd_info->flags |= SA_CMDL_UPD_AUTH;
  426. upd_info->auth_size.index = auth_offset >> 2;
  427. upd_info->auth_offset.index = upd_info->auth_size.index + 1;
  428. cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
  429. /* Algorithm with subkeys */
  430. if (cfg->aalg == SA_AALG_ID_AES_XCBC ||
  431. cfg->aalg == SA_AALG_ID_CMAC) {
  432. upd_info->flags |= SA_CMDL_UPD_AUX_KEY;
  433. upd_info->aux_key_info.index =
  434. (auth_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  435. cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  436. SA_CMDL_HEADER_SIZE_BYTES +
  437. cfg->auth_subkey_len;
  438. cmdl[auth_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  439. (SA_CTX_ENC_AUX1_OFFSET |
  440. (cfg->auth_subkey_len >> 3));
  441. auth_offset += SA_CMDL_HEADER_SIZE_BYTES +
  442. cfg->auth_subkey_len;
  443. } else {
  444. cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  445. SA_CMDL_HEADER_SIZE_BYTES;
  446. auth_offset += SA_CMDL_HEADER_SIZE_BYTES;
  447. }
  448. }
  449. if (cfg->enc1st)
  450. total = auth_offset;
  451. else
  452. total = enc_offset;
  453. total = roundup(total, 8);
  454. for (i = 0; i < total / 4; i++)
  455. word_ptr[i] = be32_to_cpu(word_ptr[i]);
  456. return total;
  457. }
  458. /* Update Command label */
  459. static inline void
  460. sa_update_cmdl(struct device *dev, u8 enc_offset, u16 enc_size, u8 *enc_iv,
  461. u8 auth_offset, u16 auth_size, u8 *auth_iv, u8 aad_size,
  462. u8 *aad, struct sa_cmdl_upd_info *upd_info, u32 *cmdl)
  463. {
  464. int i = 0, j;
  465. if (upd_info->submode != SA_MODE_GEN) {
  466. dev_err(dev, "unsupported mode(%d)\n", upd_info->submode);
  467. return;
  468. }
  469. if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
  470. cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
  471. cmdl[upd_info->enc_size.index] |= enc_size;
  472. cmdl[upd_info->enc_offset.index] &=
  473. ~SA_CMDL_SOP_BYPASS_LEN_MASK;
  474. cmdl[upd_info->enc_offset.index] |=
  475. ((u32)enc_offset << __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
  476. if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
  477. u32 *data = &cmdl[upd_info->enc_iv.index];
  478. for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
  479. data[j] = cpu_to_be32(*((u32 *)enc_iv));
  480. enc_iv += 4;
  481. }
  482. }
  483. }
  484. if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
  485. cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
  486. cmdl[upd_info->auth_size.index] |= auth_size;
  487. cmdl[upd_info->auth_offset.index] &=
  488. ~SA_CMDL_SOP_BYPASS_LEN_MASK;
  489. cmdl[upd_info->auth_offset.index] |= ((u32)auth_offset <<
  490. __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
  491. if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
  492. sa_copy_iv(&cmdl[upd_info->auth_iv.index], auth_iv,
  493. (upd_info->auth_iv.size > 8));
  494. }
  495. if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
  496. int offset = (auth_size & 0xF) ? 4 : 0;
  497. memcpy(&cmdl[upd_info->aux_key_info.index],
  498. &upd_info->aux_key[offset], 16);
  499. }
  500. }
  501. }
  502. /* Format SWINFO words to be sent to SA */
  503. static
  504. void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
  505. u8 cmdl_present, u8 cmdl_offset, u8 flags,
  506. u8 hash_size, u32 *swinfo)
  507. {
  508. swinfo[0] = sc_id;
  509. swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
  510. if (likely(cmdl_present))
  511. swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
  512. __ffs(SA_SW0_CMDL_INFO_MASK));
  513. swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
  514. swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
  515. swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
  516. swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
  517. swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
  518. }
  519. /* Dump the security context */
  520. static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
  521. {
  522. #ifdef DEBUG
  523. dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
  524. print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  525. 16, 1, buf, SA_CTX_MAX_SZ, false);
  526. #endif
  527. }
  528. static
  529. int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
  530. u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
  531. struct algo_data *ad, u8 enc, u32 *swinfo, bool auth_req)
  532. {
  533. int use_enc = 0;
  534. int enc_sc_offset = 0, auth_sc_offset = 0;
  535. u8 *sc_buf = ctx->sc;
  536. u16 sc_id = ctx->sc_id;
  537. u16 aad_len = 0; /* Currently not supporting AEAD algo */
  538. u8 first_engine;
  539. memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
  540. if (ad->auth_eng.eng_id <= SA_ENG_ID_EM2 || !auth_req)
  541. use_enc = 1;
  542. /* Determine the order of encryption & Authentication contexts */
  543. if (enc || !use_enc) {
  544. if (auth_req) {
  545. enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  546. auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
  547. } else {
  548. enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  549. }
  550. } else {
  551. auth_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  552. enc_sc_offset = auth_sc_offset + ad->auth_eng.sc_size;
  553. }
  554. /* SCCTL Owner info: 0=host, 1=CP_ACE */
  555. sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
  556. /* SCCTL F/E control */
  557. if (auth_req)
  558. sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
  559. else
  560. sc_buf[1] = SA_SCCTL_FE_ENC;
  561. memcpy(&sc_buf[2], &sc_id, 2);
  562. sc_buf[4] = 0x0;
  563. sc_buf[5] = 0x0;
  564. sc_buf[6] = 0x0;
  565. sc_buf[7] = 0x0;
  566. /* Initialize the rest of PHP context */
  567. memzero_explicit(sc_buf + SA_SCCTL_SZ, SA_CTX_PHP_PE_CTX_SZ -
  568. SA_SCCTL_SZ);
  569. /* Prepare context for encryption engine */
  570. if (ad->enc_eng.sc_size) {
  571. if (sa_set_sc_enc(ad, enc_key, enc_key_sz, aad_len,
  572. enc, &sc_buf[enc_sc_offset]))
  573. return -EINVAL;
  574. }
  575. /* Prepare context for authentication engine */
  576. if (ad->auth_eng.sc_size) {
  577. if (use_enc) {
  578. if (sa_set_sc_enc(ad, auth_key, auth_key_sz,
  579. aad_len, 0, &sc_buf[auth_sc_offset]))
  580. return -EINVAL;
  581. } else {
  582. sa_set_sc_auth(ad, auth_key, auth_key_sz,
  583. &sc_buf[auth_sc_offset]);
  584. }
  585. }
  586. /* Set the ownership of context to CP_ACE */
  587. sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
  588. /* swizzle the security context */
  589. sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
  590. /* Setup SWINFO */
  591. if (!auth_req)
  592. first_engine = ad->enc_eng.eng_id;
  593. else
  594. first_engine = enc ? ad->enc_eng.eng_id : ad->auth_eng.eng_id;
  595. if (auth_req) {
  596. if (!ad->hash_size)
  597. return -EINVAL;
  598. /* Round up the tag size to multiple of 4 */
  599. ad->hash_size = roundup(ad->hash_size, 8);
  600. }
  601. sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
  602. SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
  603. sa_dump_sc(sc_buf, ctx->sc_phys);
  604. return 0;
  605. }
  606. /* Free the per direction context memory */
  607. static void sa_free_ctx_info(struct sa_ctx_info *ctx,
  608. struct sa_crypto_data *data)
  609. {
  610. unsigned long bn;
  611. bn = ctx->sc_id - data->sc_id_start;
  612. spin_lock(&data->scid_lock);
  613. __clear_bit(bn, data->ctx_bm);
  614. data->sc_id--;
  615. spin_unlock(&data->scid_lock);
  616. if (ctx->sc) {
  617. dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
  618. ctx->sc = NULL;
  619. }
  620. }
  621. static int sa_init_ctx_info(struct sa_ctx_info *ctx,
  622. struct sa_crypto_data *data)
  623. {
  624. unsigned long bn;
  625. int err;
  626. spin_lock(&data->scid_lock);
  627. bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
  628. __set_bit(bn, data->ctx_bm);
  629. data->sc_id++;
  630. spin_unlock(&data->scid_lock);
  631. ctx->sc_id = (u16)(data->sc_id_start + bn);
  632. ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
  633. if (!ctx->sc) {
  634. dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
  635. err = -ENOMEM;
  636. goto scid_rollback;
  637. }
  638. return 0;
  639. scid_rollback:
  640. spin_lock(&data->scid_lock);
  641. __clear_bit(bn, data->ctx_bm);
  642. data->sc_id--;
  643. spin_unlock(&data->scid_lock);
  644. return err;
  645. }
  646. static void sa_aes_cra_exit(struct crypto_tfm *tfm)
  647. {
  648. struct crypto_alg *alg = tfm->__crt_alg;
  649. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  650. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  651. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  652. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  653. ctx->dec.sc_id, &ctx->dec.sc_phys);
  654. if ((alg->cra_flags & CRYPTO_ALG_TYPE_ABLKCIPHER)
  655. == CRYPTO_ALG_TYPE_ABLKCIPHER) {
  656. sa_free_ctx_info(&ctx->enc, data);
  657. sa_free_ctx_info(&ctx->dec, data);
  658. }
  659. }
  660. static int sa_aes_cra_init(struct crypto_tfm *tfm)
  661. {
  662. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  663. struct crypto_alg *alg = tfm->__crt_alg;
  664. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  665. int ret;
  666. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  667. CRYPTO_ALG_TYPE_ABLKCIPHER) {
  668. memzero_explicit(ctx, sizeof(*ctx));
  669. ctx->dev_data = data;
  670. ret = sa_init_ctx_info(&ctx->enc, data);
  671. if (ret)
  672. return ret;
  673. ret = sa_init_ctx_info(&ctx->dec, data);
  674. if (ret) {
  675. sa_free_ctx_info(&ctx->enc, data);
  676. return ret;
  677. }
  678. }
  679. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  680. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  681. ctx->dec.sc_id, &ctx->dec.sc_phys);
  682. return 0;
  683. }
  684. static int sa_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  685. unsigned int keylen, struct algo_data *ad)
  686. {
  687. struct sa_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  688. const char *cra_name;
  689. int cmdl_len;
  690. struct sa_cmdl_cfg cfg;
  691. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  692. keylen != AES_KEYSIZE_256)
  693. return -EINVAL;
  694. cra_name = crypto_tfm_alg_name(&tfm->base);
  695. memzero_explicit(&cfg, sizeof(cfg));
  696. cfg.enc1st = 1;
  697. cfg.enc_eng_id = ad->enc_eng.eng_id;
  698. cfg.iv_size = crypto_ablkcipher_ivsize(tfm);
  699. cfg.auth_eng_id = SA_ENG_ID_NONE;
  700. cfg.auth_subkey_len = 0;
  701. /* Setup Encryption Security Context & Command label template */
  702. if (sa_init_sc(&ctx->enc, key, keylen,
  703. NULL, 0, ad, 1, &ctx->enc.epib[1], false))
  704. goto badkey;
  705. cmdl_len = sa_format_cmdl_gen(&cfg,
  706. (u8 *)ctx->enc.cmdl,
  707. &ctx->enc.cmdl_upd_info);
  708. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  709. goto badkey;
  710. ctx->enc.cmdl_size = cmdl_len;
  711. /* Setup Decryption Security Context & Command label template */
  712. if (sa_init_sc(&ctx->dec, key, keylen,
  713. NULL, 0, ad, 0, &ctx->dec.epib[1], false))
  714. goto badkey;
  715. cfg.enc1st = 0;
  716. cfg.enc_eng_id = ad->enc_eng.eng_id;
  717. cfg.auth_eng_id = SA_ENG_ID_NONE;
  718. cfg.auth_subkey_len = 0;
  719. cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
  720. &ctx->dec.cmdl_upd_info);
  721. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  722. goto badkey;
  723. ctx->dec.cmdl_size = cmdl_len;
  724. kfree(ad);
  725. return 0;
  726. badkey:
  727. dev_err(sa_k3_dev, "%s: badkey\n", __func__);
  728. return -EINVAL;
  729. }
  730. static int sa_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  731. unsigned int keylen)
  732. {
  733. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  734. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  735. int key_idx = (keylen >> 3) - 2;
  736. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  737. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  738. ad->auth_eng.eng_id = SA_ENG_ID_NONE;
  739. ad->auth_eng.sc_size = 0;
  740. ad->mci_enc = mci_cbc_enc_array[key_idx];
  741. ad->mci_dec = mci_cbc_dec_array[key_idx];
  742. ad->inv_key = true;
  743. ad->ealg_id = SA_EALG_ID_AES_CBC;
  744. ad->aalg_id = SA_AALG_ID_NONE;
  745. return sa_aes_setkey(tfm, key, keylen, ad);
  746. }
  747. static int sa_aes_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  748. unsigned int keylen)
  749. {
  750. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  751. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  752. int key_idx = (keylen >> 3) - 2;
  753. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  754. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  755. ad->auth_eng.eng_id = SA_ENG_ID_NONE;
  756. ad->auth_eng.sc_size = 0;
  757. ad->mci_enc = mci_ecb_enc_array[key_idx];
  758. ad->mci_dec = mci_ecb_dec_array[key_idx];
  759. ad->inv_key = true;
  760. ad->ealg_id = SA_EALG_ID_AES_ECB;
  761. ad->aalg_id = SA_AALG_ID_NONE;
  762. return sa_aes_setkey(tfm, key, keylen, ad);
  763. }
  764. static int sa_3des_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  765. unsigned int keylen)
  766. {
  767. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  768. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  769. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  770. ad->auth_eng.eng_id = SA_ENG_ID_NONE;
  771. ad->auth_eng.sc_size = 0;
  772. ad->mci_enc = mci_cbc_3des_enc_array;
  773. ad->mci_dec = mci_cbc_3des_dec_array;
  774. ad->ealg_id = SA_EALG_ID_3DES_CBC;
  775. ad->aalg_id = SA_AALG_ID_NONE;
  776. return sa_aes_setkey(tfm, key, keylen, ad);
  777. }
  778. static int sa_3des_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  779. unsigned int keylen)
  780. {
  781. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  782. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  783. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  784. ad->auth_eng.eng_id = SA_ENG_ID_NONE;
  785. ad->auth_eng.sc_size = 0;
  786. ad->mci_enc = mci_ecb_3des_enc_array;
  787. ad->mci_dec = mci_ecb_3des_dec_array;
  788. ad->aalg_id = SA_AALG_ID_NONE;
  789. return sa_aes_setkey(tfm, key, keylen, ad);
  790. }
  791. static void sa_aes_dma_in_callback(void *data)
  792. {
  793. struct sa_rx_data *rxd = (struct sa_rx_data *)data;
  794. struct ablkcipher_request *req = (struct ablkcipher_request *)rxd->req;
  795. int sglen = sg_nents_for_len(req->dst, req->nbytes);
  796. kfree(rxd);
  797. dma_unmap_sg(sa_k3_dev, req->src, sglen, DMA_TO_DEVICE);
  798. if (req->src != req->dst)
  799. dma_unmap_sg(sa_k3_dev, req->dst, sglen, DMA_FROM_DEVICE);
  800. ablkcipher_request_complete(req, 0);
  801. }
  802. static void sa_aead_dma_in_callback(void *data)
  803. {
  804. struct sa_rx_data *rxd = (struct sa_rx_data *)data;
  805. struct aead_request *req = (struct aead_request *)rxd->req;
  806. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  807. u32 *mdptr;
  808. unsigned int start = req->assoclen + req->cryptlen;
  809. unsigned int authsize = crypto_aead_authsize(tfm);
  810. u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
  811. int i, sglen, err = 0;
  812. size_t pl, ml;
  813. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
  814. for (i = 0; i < (authsize / 4); i++)
  815. mdptr[i + 4] = htonl(mdptr[i + 4]);
  816. if (rxd->enc) {
  817. scatterwalk_map_and_copy((void *)&mdptr[4], req->dst,
  818. start, crypto_aead_authsize(tfm), 1);
  819. } else {
  820. start -= authsize;
  821. scatterwalk_map_and_copy(auth_tag, req->src,
  822. start, crypto_aead_authsize(tfm), 0);
  823. err = memcmp((void *)&mdptr[4],
  824. auth_tag, authsize) ? -EBADMSG : 0;
  825. }
  826. kfree(rxd);
  827. sglen = sg_nents_for_len(req->dst, req->cryptlen + authsize);
  828. dma_unmap_sg(sa_k3_dev, req->dst, sglen, DMA_FROM_DEVICE);
  829. sglen = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
  830. dma_unmap_sg(sa_k3_dev, req->src, sglen, DMA_TO_DEVICE);
  831. aead_request_complete(req, err);
  832. }
  833. static void
  834. sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
  835. {
  836. u32 *out, *in;
  837. int i;
  838. for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
  839. *out++ = *in++;
  840. mdptr[4] = (0xFFFF << 16);
  841. for (out = &mdptr[5], in = psdata, i = 0;
  842. i < pslen / sizeof(u32); i++)
  843. *out++ = *in++;
  844. }
  845. static int sa_aes_run(struct ablkcipher_request *req, u8 *iv, int enc)
  846. {
  847. struct sa_tfm_ctx *ctx =
  848. crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  849. struct sa_ctx_info *sa_ctx = enc ? &ctx->enc : &ctx->dec;
  850. struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
  851. struct sa_dma_req_ctx req_ctx;
  852. struct dma_async_tx_descriptor *tx_in, *tx_out;
  853. struct sa_rx_data *rxd;
  854. u8 enc_offset;
  855. int sg_nents, dst_nents;
  856. int psdata_offset;
  857. u8 auth_offset = 0;
  858. u8 *auth_iv = NULL;
  859. u8 *aad = NULL;
  860. u8 aad_len = 0;
  861. u16 enc_len;
  862. u16 auth_len = 0;
  863. u32 req_type;
  864. u32 *mdptr;
  865. size_t pl, ml;
  866. struct dma_chan *dma_rx;
  867. gfp_t flags;
  868. flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  869. GFP_KERNEL : GFP_ATOMIC;
  870. enc_offset = 0x0;
  871. enc_len = req->nbytes;
  872. /* Allocate descriptor & submit packet */
  873. sg_nents = sg_nents_for_len(req->src, enc_len);
  874. dst_nents = sg_nents_for_len(req->dst, enc_len);
  875. memcpy(req_ctx.cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
  876. /* Update Command Label */
  877. sa_update_cmdl(sa_k3_dev, enc_offset, enc_len,
  878. iv, auth_offset, auth_len,
  879. auth_iv, aad_len, aad,
  880. &sa_ctx->cmdl_upd_info, req_ctx.cmdl);
  881. /*
  882. * Last 2 words in PSDATA will have the crypto alg type &
  883. * crypto request pointer
  884. */
  885. req_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
  886. if (enc)
  887. req_type |= (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
  888. else
  889. req_type |= (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
  890. psdata_offset = sa_ctx->cmdl_size / sizeof(u32);
  891. req_ctx.cmdl[psdata_offset++] = req_type;
  892. /* map the packet */
  893. req_ctx.src = req->src;
  894. req_ctx.src_nents = dma_map_sg(sa_k3_dev, req_ctx.src,
  895. sg_nents, DMA_TO_DEVICE);
  896. if (req->src != req->dst)
  897. dst_nents = dma_map_sg(sa_k3_dev, req->dst,
  898. sg_nents, DMA_FROM_DEVICE);
  899. else
  900. dst_nents = req_ctx.src_nents;
  901. if (unlikely(req_ctx.src_nents != sg_nents)) {
  902. dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
  903. return -EIO;
  904. }
  905. req_ctx.dev_data = pdata;
  906. req_ctx.pkt = true;
  907. dma_sync_sg_for_device(pdata->dev, req->src, req_ctx.src_nents,
  908. DMA_TO_DEVICE);
  909. if (enc_len >= 256)
  910. dma_rx = pdata->dma_rx2;
  911. else
  912. dma_rx = pdata->dma_rx1;
  913. tx_in = dmaengine_prep_slave_sg(dma_rx, req->dst, dst_nents,
  914. DMA_DEV_TO_MEM,
  915. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  916. if (!tx_in) {
  917. dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
  918. return -EINVAL;
  919. }
  920. rxd = kzalloc(sizeof(*rxd), GFP_KERNEL);
  921. rxd->req = (void *)req;
  922. /* IN */
  923. tx_in->callback = sa_aes_dma_in_callback;
  924. tx_in->callback_param = rxd;
  925. tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, req->src,
  926. req_ctx.src_nents, DMA_MEM_TO_DEV,
  927. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  928. if (!tx_out) {
  929. dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
  930. return -EINVAL;
  931. }
  932. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
  933. sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
  934. sizeof(u32))), req_ctx.cmdl,
  935. sizeof(sa_ctx->epib), sa_ctx->epib);
  936. ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
  937. dmaengine_desc_set_metadata_len(tx_out, 44);
  938. dmaengine_submit(tx_out);
  939. dmaengine_submit(tx_in);
  940. dma_async_issue_pending(dma_rx);
  941. dma_async_issue_pending(pdata->dma_tx);
  942. return -EINPROGRESS;
  943. }
  944. static int sa_aes_cbc_encrypt(struct ablkcipher_request *req)
  945. {
  946. return sa_aes_run(req, req->info, 1);
  947. }
  948. static int sa_aes_cbc_decrypt(struct ablkcipher_request *req)
  949. {
  950. return sa_aes_run(req, req->info, 0);
  951. }
  952. static int sa_init_tfm(struct crypto_tfm *tfm)
  953. {
  954. struct crypto_alg *alg = tfm->__crt_alg;
  955. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  956. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  957. int ret;
  958. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AEAD) {
  959. memset(ctx, 0, sizeof(*ctx));
  960. ctx->dev_data = data;
  961. ret = sa_init_ctx_info(&ctx->enc, data);
  962. if (ret)
  963. return ret;
  964. ret = sa_init_ctx_info(&ctx->dec, data);
  965. if (ret) {
  966. sa_free_ctx_info(&ctx->enc, data);
  967. return ret;
  968. }
  969. }
  970. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  971. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  972. ctx->dec.sc_id, &ctx->dec.sc_phys);
  973. return 0;
  974. }
  975. /* Algorithm init */
  976. static int sa_cra_init_aead(struct crypto_aead *tfm)
  977. {
  978. return sa_init_tfm(crypto_aead_tfm(tfm));
  979. }
  980. /* Algorithm context teardown */
  981. static void sa_exit_tfm(struct crypto_tfm *tfm)
  982. {
  983. struct crypto_alg *alg = tfm->__crt_alg;
  984. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  985. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  986. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  987. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  988. ctx->dec.sc_id, &ctx->dec.sc_phys);
  989. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK)
  990. == CRYPTO_ALG_TYPE_AEAD) {
  991. sa_free_ctx_info(&ctx->enc, data);
  992. sa_free_ctx_info(&ctx->dec, data);
  993. }
  994. }
  995. static void sa_exit_tfm_aead(struct crypto_aead *tfm)
  996. {
  997. return sa_exit_tfm(crypto_aead_tfm(tfm));
  998. }
  999. /* AEAD algorithm configuration interface function */
  1000. static int sa_aead_setkey(struct crypto_aead *authenc,
  1001. const u8 *key, unsigned int keylen,
  1002. struct algo_data *ad)
  1003. {
  1004. struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
  1005. struct crypto_authenc_keys keys;
  1006. const char *cra_name;
  1007. int cmdl_len;
  1008. struct sa_cmdl_cfg cfg;
  1009. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1010. goto badkey;
  1011. cra_name = crypto_tfm_alg_name(crypto_aead_tfm(authenc));
  1012. memset(&cfg, 0, sizeof(cfg));
  1013. cfg.enc1st = 1;
  1014. cfg.aalg = ad->aalg_id;
  1015. cfg.enc_eng_id = ad->enc_eng.eng_id;
  1016. cfg.auth_eng_id = ad->auth_eng.eng_id;
  1017. cfg.iv_size = crypto_aead_ivsize(authenc);
  1018. cfg.akey = keys.authkey;
  1019. cfg.akey_len = keys.authkeylen;
  1020. /* Setup Encryption Security Context & Command label template */
  1021. if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
  1022. keys.authkey, keys.authkeylen,
  1023. ad, 1, &ctx->enc.epib[1], true))
  1024. goto badkey;
  1025. cmdl_len = sa_format_cmdl_gen(&cfg,
  1026. (u8 *)ctx->enc.cmdl,
  1027. &ctx->enc.cmdl_upd_info);
  1028. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1029. goto badkey;
  1030. ctx->enc.cmdl_size = cmdl_len;
  1031. /* Setup Decryption Security Context & Command label template */
  1032. if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
  1033. keys.authkey, keys.authkeylen,
  1034. ad, 0, &ctx->dec.epib[1], true))
  1035. goto badkey;
  1036. cfg.enc1st = 0;
  1037. cfg.enc_eng_id = ad->enc_eng.eng_id;
  1038. cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
  1039. &ctx->dec.cmdl_upd_info);
  1040. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1041. goto badkey;
  1042. ctx->dec.cmdl_size = cmdl_len;
  1043. kfree(ad);
  1044. return 0;
  1045. badkey:
  1046. dev_err(sa_k3_dev, "%s: badkey\n", __func__);
  1047. return -EINVAL;
  1048. }
  1049. static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
  1050. const u8 *key, unsigned int keylen)
  1051. {
  1052. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  1053. struct crypto_authenc_keys keys;
  1054. int ret = 0, key_idx;
  1055. ret = crypto_authenc_extractkeys(&keys, key, keylen);
  1056. if (ret)
  1057. return ret;
  1058. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  1059. key_idx = (keys.enckeylen >> 3) - 2;
  1060. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  1061. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  1062. ad->auth_eng.eng_id = SA_ENG_ID_AM1;
  1063. ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
  1064. ad->mci_enc = mci_cbc_enc_array[key_idx];
  1065. ad->mci_dec = mci_cbc_dec_array[key_idx];
  1066. ad->inv_key = true;
  1067. ad->keyed_mac = true;
  1068. ad->ealg_id = SA_EALG_ID_AES_CBC;
  1069. ad->aalg_id = SA_AALG_ID_HMAC_SHA1;
  1070. ad->hash_size = SHA1_DIGEST_SIZE;
  1071. ad->auth_ctrl = 0x2;
  1072. ad->prep_iopad = sa_hmac_sha1_get_pad;
  1073. return sa_aead_setkey(authenc, key, keylen, ad);
  1074. }
  1075. static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
  1076. const u8 *key, unsigned int keylen)
  1077. {
  1078. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  1079. struct crypto_authenc_keys keys;
  1080. int ret = 0, key_idx;
  1081. ret = crypto_authenc_extractkeys(&keys, key, keylen);
  1082. if (ret)
  1083. return ret;
  1084. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  1085. key_idx = (keys.enckeylen >> 3) - 2;
  1086. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  1087. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  1088. ad->auth_eng.eng_id = SA_ENG_ID_AM1;
  1089. ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
  1090. ad->mci_enc = mci_cbc_enc_array[key_idx];
  1091. ad->mci_dec = mci_cbc_dec_array[key_idx];
  1092. ad->inv_key = true;
  1093. ad->keyed_mac = true;
  1094. ad->ealg_id = SA_EALG_ID_AES_CBC;
  1095. ad->aalg_id = SA_AALG_ID_HMAC_SHA2_256;
  1096. ad->hash_size = SHA256_DIGEST_SIZE;
  1097. ad->auth_ctrl = 0x4;
  1098. ad->prep_iopad = sa_hmac_sha256_get_pad;
  1099. return sa_aead_setkey(authenc, key, keylen, ad);
  1100. }
  1101. static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
  1102. {
  1103. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1104. struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
  1105. struct sa_ctx_info *sa_ctx = enc ? &ctx->enc : &ctx->dec;
  1106. struct sa_rx_data *rxd;
  1107. struct dma_async_tx_descriptor *tx_in, *tx_out;
  1108. struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
  1109. struct sa_dma_req_ctx req_ctx;
  1110. u8 enc_offset;
  1111. int sg_nents, dst_nents;
  1112. int psdata_offset;
  1113. u8 auth_offset = 0;
  1114. u8 *auth_iv = NULL;
  1115. u8 *aad = NULL;
  1116. u8 aad_len = 0;
  1117. u16 enc_len;
  1118. u16 auth_len = 0;
  1119. u32 *mdptr;
  1120. u32 req_type;
  1121. struct dma_chan *dma_rx;
  1122. gfp_t flags;
  1123. size_t pl, ml;
  1124. flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  1125. GFP_KERNEL : GFP_ATOMIC;
  1126. if (enc) {
  1127. iv = (u8 *)(req->iv);
  1128. enc_offset = req->assoclen;
  1129. enc_len = req->cryptlen;
  1130. auth_len = req->assoclen + req->cryptlen;
  1131. } else {
  1132. enc_offset = req->assoclen;
  1133. enc_len = req->cryptlen - crypto_aead_authsize(tfm);
  1134. auth_len = req->assoclen + req->cryptlen -
  1135. crypto_aead_authsize(tfm);
  1136. }
  1137. /* Allocate descriptor & submit packet */
  1138. sg_nents = sg_nents_for_len(req->src, enc_len + req->assoclen);
  1139. dst_nents = sg_nents_for_len(req->dst, enc_len +
  1140. crypto_aead_authsize(tfm));
  1141. memcpy(req_ctx.cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
  1142. /* Update Command Label */
  1143. sa_update_cmdl(sa_k3_dev, enc_offset, enc_len,
  1144. iv, auth_offset, auth_len,
  1145. auth_iv, aad_len, aad,
  1146. &sa_ctx->cmdl_upd_info, req_ctx.cmdl);
  1147. /*
  1148. * Last 2 words in PSDATA will have the crypto alg type &
  1149. * crypto request pointer
  1150. */
  1151. req_type = CRYPTO_ALG_TYPE_AEAD;
  1152. if (enc)
  1153. req_type |= (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
  1154. else
  1155. req_type |= (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
  1156. psdata_offset = sa_ctx->cmdl_size / sizeof(u32);
  1157. /* map the packet */
  1158. req_ctx.src = req->src;
  1159. req_ctx.src_nents = dma_map_sg(sa_k3_dev, req_ctx.src,
  1160. sg_nents, DMA_TO_DEVICE);
  1161. dst_nents = dma_map_sg(sa_k3_dev, req->dst,
  1162. dst_nents, DMA_FROM_DEVICE);
  1163. if (unlikely(req_ctx.src_nents != sg_nents)) {
  1164. dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
  1165. return -EIO;
  1166. }
  1167. req_ctx.dev_data = pdata;
  1168. req_ctx.pkt = true;
  1169. dma_sync_sg_for_device(pdata->dev, req->src, req_ctx.src_nents,
  1170. DMA_TO_DEVICE);
  1171. if (enc_len >= 256)
  1172. dma_rx = pdata->dma_rx2;
  1173. else
  1174. dma_rx = pdata->dma_rx1;
  1175. tx_in = dmaengine_prep_slave_sg(dma_rx, req->dst, dst_nents,
  1176. DMA_DEV_TO_MEM,
  1177. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1178. if (!tx_in) {
  1179. dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
  1180. return -EINVAL;
  1181. }
  1182. rxd = kzalloc(sizeof(*rxd), GFP_KERNEL);
  1183. rxd->req = (void *)req;
  1184. rxd->enc = enc;
  1185. rxd->tx_in = tx_in;
  1186. /* IN */
  1187. tx_in->callback = sa_aead_dma_in_callback;
  1188. tx_in->callback_param = rxd;
  1189. tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, req->src,
  1190. req_ctx.src_nents, DMA_MEM_TO_DEV,
  1191. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1192. if (!tx_out) {
  1193. dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
  1194. return -EINVAL;
  1195. }
  1196. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
  1197. sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
  1198. sizeof(u32))), req_ctx.cmdl,
  1199. sizeof(sa_ctx->epib), sa_ctx->epib);
  1200. ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
  1201. dmaengine_desc_set_metadata_len(tx_out, 52);
  1202. dmaengine_submit(tx_out);
  1203. dmaengine_submit(tx_in);
  1204. dma_async_issue_pending(dma_rx);
  1205. dma_async_issue_pending(pdata->dma_tx);
  1206. return -EINPROGRESS;
  1207. }
  1208. /* AEAD algorithm encrypt interface function */
  1209. static int sa_aead_encrypt(struct aead_request *req)
  1210. {
  1211. return sa_aead_run(req, req->iv, 1);
  1212. }
  1213. /* AEAD algorithm decrypt interface function */
  1214. static int sa_aead_decrypt(struct aead_request *req)
  1215. {
  1216. return sa_aead_run(req, req->iv, 0);
  1217. }
  1218. static int sa_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
  1219. {
  1220. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1221. struct crypto_alg *alg = tfm->__crt_alg;
  1222. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  1223. int ret;
  1224. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  1225. CRYPTO_ALG_TYPE_AHASH) {
  1226. memset(ctx, 0, sizeof(*ctx));
  1227. ctx->dev_data = data;
  1228. ret = sa_init_ctx_info(&ctx->enc, data);
  1229. if (ret)
  1230. return ret;
  1231. }
  1232. if (alg_base) {
  1233. ctx->shash = crypto_alloc_shash(alg_base, 0,
  1234. CRYPTO_ALG_NEED_FALLBACK);
  1235. if (IS_ERR(ctx->shash)) {
  1236. pr_err("base driver %s couldn't be loaded\n", alg_base);
  1237. return PTR_ERR(ctx->shash);
  1238. }
  1239. }
  1240. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  1241. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  1242. ctx->dec.sc_id, &ctx->dec.sc_phys);
  1243. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1244. sizeof(struct sa_dma_req_ctx) +
  1245. SHA512_BLOCK_SIZE);
  1246. return 0;
  1247. }
  1248. static void sa_sham_dma_in_callback(void *data)
  1249. {
  1250. struct sa_rx_data *rxd = (struct sa_rx_data *)data;
  1251. struct ahash_request *req = (struct ahash_request *)rxd->req;
  1252. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1253. unsigned int authsize = crypto_ahash_digestsize(tfm);
  1254. int i;
  1255. size_t ml, pl;
  1256. u32 *mdptr, *result;
  1257. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
  1258. result = (u32 *)req->result;
  1259. kfree(rxd);
  1260. for (i = 0; i < (authsize / 4); i++)
  1261. result[i] = htonl(mdptr[i + 4]);
  1262. ahash_request_complete(req, 0);
  1263. }
  1264. static int sa_sham_digest(struct ahash_request *req)
  1265. {
  1266. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
  1267. struct sa_ctx_info *sa_ctx = &ctx->enc;
  1268. struct dma_async_tx_descriptor *tx_in, *tx_out;
  1269. struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
  1270. struct sa_dma_req_ctx req_ctx;
  1271. struct sa_rx_data *rxd;
  1272. u8 enc_offset;
  1273. int sg_nents;
  1274. int psdata_offset;
  1275. u8 auth_offset = 0;
  1276. u8 *auth_iv = NULL;
  1277. u8 *aad = NULL;
  1278. u8 aad_len = 0;
  1279. u16 enc_len;
  1280. u16 auth_len = 0;
  1281. u32 req_type;
  1282. u32 *mdptr;
  1283. struct dma_chan *dma_rx;
  1284. gfp_t flags;
  1285. size_t pl, ml;
  1286. flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  1287. GFP_KERNEL : GFP_ATOMIC;
  1288. enc_len = 0;
  1289. auth_len = req->nbytes;
  1290. enc_offset = 0;
  1291. /* Allocate descriptor & submit packet */
  1292. sg_nents = sg_nents_for_len(req->src, req->nbytes);
  1293. memcpy(req_ctx.cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
  1294. /* Update Command Label */
  1295. sa_update_cmdl(sa_k3_dev, enc_offset, enc_len,
  1296. NULL, auth_offset, auth_len,
  1297. auth_iv, aad_len, aad,
  1298. &sa_ctx->cmdl_upd_info, req_ctx.cmdl);
  1299. /*
  1300. * Last 2 words in PSDATA will have the crypto alg type &
  1301. * crypto request pointer
  1302. */
  1303. req_type = CRYPTO_ALG_TYPE_AHASH;
  1304. psdata_offset = sa_ctx->cmdl_size / sizeof(u32);
  1305. req_ctx.cmdl[psdata_offset++] = req_type;
  1306. /* map the packet */
  1307. req_ctx.src = req->src;
  1308. req_ctx.src_nents = dma_map_sg(sa_k3_dev, req_ctx.src,
  1309. sg_nents, DMA_TO_DEVICE);
  1310. if (unlikely(req_ctx.src_nents != sg_nents)) {
  1311. dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
  1312. return -EIO;
  1313. }
  1314. req_ctx.dev_data = pdata;
  1315. req_ctx.pkt = true;
  1316. dma_sync_sg_for_device(pdata->dev, req->src, req_ctx.src_nents,
  1317. DMA_TO_DEVICE);
  1318. if (enc_len > 256)
  1319. dma_rx = pdata->dma_rx2;
  1320. else
  1321. dma_rx = pdata->dma_rx1;
  1322. tx_in = dmaengine_prep_slave_sg(dma_rx, req->src, req_ctx.src_nents,
  1323. DMA_DEV_TO_MEM,
  1324. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1325. if (!tx_in) {
  1326. dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
  1327. return -EINVAL;
  1328. }
  1329. rxd = kzalloc(sizeof(*rxd), GFP_KERNEL);
  1330. rxd->req = (void *)req;
  1331. rxd->tx_in = tx_in;
  1332. tx_in->callback = sa_sham_dma_in_callback;
  1333. tx_in->callback_param = rxd;
  1334. tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, req->src,
  1335. req_ctx.src_nents, DMA_MEM_TO_DEV,
  1336. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1337. if (!tx_out) {
  1338. dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
  1339. return -EINVAL;
  1340. }
  1341. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
  1342. sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
  1343. sizeof(u32))), req_ctx.cmdl,
  1344. sizeof(sa_ctx->epib), sa_ctx->epib);
  1345. dmaengine_desc_set_metadata_len(tx_out, 28);
  1346. dmaengine_submit(tx_out);
  1347. dmaengine_submit(tx_in);
  1348. dma_async_issue_pending(dma_rx);
  1349. dma_async_issue_pending(pdata->dma_tx);
  1350. return -EINPROGRESS;
  1351. }
  1352. static int sa_sham_init(struct ahash_request *req)
  1353. {
  1354. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1355. dev_dbg(sa_k3_dev, "init: digest size: %d\n",
  1356. crypto_ahash_digestsize(tfm));
  1357. return 0;
  1358. }
  1359. static int sa_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
  1360. const u8 *data, unsigned int len, u8 *out)
  1361. {
  1362. SHASH_DESC_ON_STACK(shash, tfm);
  1363. shash->tfm = tfm;
  1364. shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1365. return crypto_shash_digest(shash, data, len, out);
  1366. }
  1367. static int sa_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
  1368. unsigned int keylen, struct algo_data *ad)
  1369. {
  1370. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1371. int bs = crypto_shash_blocksize(ctx->shash);
  1372. int ds = crypto_shash_digestsize(ctx->shash);
  1373. int cmdl_len;
  1374. struct sa_cmdl_cfg cfg;
  1375. int err;
  1376. if (keylen > bs) {
  1377. err = sa_sham_shash_digest(ctx->shash,
  1378. crypto_shash_get_flags(ctx->shash),
  1379. key, keylen, ctx->authkey);
  1380. if (err)
  1381. return err;
  1382. keylen = ds;
  1383. } else {
  1384. memcpy(ctx->authkey, key, keylen);
  1385. }
  1386. memset(ctx->authkey + keylen, 0, bs - keylen);
  1387. memset(&cfg, 0, sizeof(cfg));
  1388. cfg.enc1st = 0;
  1389. cfg.aalg = ad->aalg_id;
  1390. cfg.enc_eng_id = ad->enc_eng.eng_id;
  1391. cfg.auth_eng_id = ad->auth_eng.eng_id;
  1392. cfg.iv_size = 0;
  1393. cfg.akey = ctx->authkey;
  1394. cfg.akey_len = keylen;
  1395. /* Setup Encryption Security Context & Command label template */
  1396. if (sa_init_sc(&ctx->enc, NULL, 0, ctx->authkey, keylen, ad, 0,
  1397. &ctx->enc.epib[1], true))
  1398. goto badkey;
  1399. cmdl_len = sa_format_cmdl_gen(&cfg,
  1400. (u8 *)ctx->enc.cmdl,
  1401. &ctx->enc.cmdl_upd_info);
  1402. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1403. goto badkey;
  1404. ctx->enc.cmdl_size = cmdl_len;
  1405. kfree(ad);
  1406. return 0;
  1407. badkey:
  1408. dev_err(sa_k3_dev, "%s: badkey\n", __func__);
  1409. return -EINVAL;
  1410. }
  1411. static int sa_sham_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
  1412. unsigned int keylen)
  1413. {
  1414. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  1415. ad->enc_eng.eng_id = SA_ENG_ID_NONE;
  1416. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  1417. ad->auth_eng.eng_id = SA_ENG_ID_AM1;
  1418. ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
  1419. ad->mci_enc = NULL;
  1420. ad->mci_dec = NULL;
  1421. ad->inv_key = false;
  1422. ad->keyed_mac = true;
  1423. ad->ealg_id = SA_EALG_ID_NONE;
  1424. ad->aalg_id = SA_AALG_ID_HMAC_SHA1;
  1425. ad->hash_size = SHA1_DIGEST_SIZE;
  1426. ad->auth_ctrl = 0x2;
  1427. ad->prep_iopad = sa_hmac_sha1_get_pad;
  1428. return sa_sham_setkey(tfm, key, keylen, ad);
  1429. }
  1430. static int sa_sham_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
  1431. unsigned int keylen)
  1432. {
  1433. struct algo_data *ad = kzalloc(sizeof(*ad), GFP_KERNEL);
  1434. ad->enc_eng.eng_id = SA_ENG_ID_NONE;
  1435. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  1436. ad->auth_eng.eng_id = SA_ENG_ID_AM1;
  1437. ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
  1438. ad->mci_enc = NULL;
  1439. ad->mci_dec = NULL;
  1440. ad->inv_key = false;
  1441. ad->keyed_mac = true;
  1442. ad->ealg_id = SA_EALG_ID_NONE;
  1443. ad->aalg_id = SA_AALG_ID_HMAC_SHA2_256;
  1444. ad->hash_size = SHA256_DIGEST_SIZE;
  1445. ad->auth_ctrl = 0x4;
  1446. ad->prep_iopad = sa_hmac_sha256_get_pad;
  1447. return sa_sham_setkey(tfm, key, keylen, ad);
  1448. }
  1449. static int sa_sham_cra_sha1_init(struct crypto_tfm *tfm)
  1450. {
  1451. return sa_sham_cra_init_alg(tfm, "sha1");
  1452. }
  1453. static int sa_sham_cra_sha256_init(struct crypto_tfm *tfm)
  1454. {
  1455. return sa_sham_cra_init_alg(tfm, "sha256");
  1456. }
  1457. static void sa_sham_cra_exit(struct crypto_tfm *tfm)
  1458. {
  1459. struct crypto_alg *alg = tfm->__crt_alg;
  1460. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1461. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  1462. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  1463. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  1464. ctx->dec.sc_id, &ctx->dec.sc_phys);
  1465. if ((alg->cra_flags & CRYPTO_ALG_TYPE_AHASH)
  1466. == CRYPTO_ALG_TYPE_AHASH) {
  1467. sa_free_ctx_info(&ctx->enc, data);
  1468. }
  1469. }
  1470. static int sa_sham_update(struct ahash_request *req)
  1471. {
  1472. return 0;
  1473. }
  1474. static int sa_sham_final(struct ahash_request *req)
  1475. {
  1476. return sa_sham_digest(req);
  1477. }
  1478. static int sa_sham_finup(struct ahash_request *req)
  1479. {
  1480. return sa_sham_digest(req);
  1481. }
  1482. static struct sa_alg_tmpl sa_algs[] = {
  1483. {.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1484. .alg.crypto = {
  1485. .cra_name = "cbc(aes)",
  1486. .cra_driver_name = "cbc-aes-sa2ul",
  1487. .cra_priority = 30000,
  1488. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1489. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1490. CRYPTO_ALG_ASYNC |
  1491. CRYPTO_ALG_NEED_FALLBACK,
  1492. .cra_blocksize = AES_BLOCK_SIZE,
  1493. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1494. .cra_alignmask = 0,
  1495. .cra_type = &crypto_ablkcipher_type,
  1496. .cra_module = THIS_MODULE,
  1497. .cra_init = sa_aes_cra_init,
  1498. .cra_exit = sa_aes_cra_exit,
  1499. .cra_u.ablkcipher = {
  1500. .min_keysize = AES_MIN_KEY_SIZE,
  1501. .max_keysize = AES_MAX_KEY_SIZE,
  1502. .ivsize = AES_BLOCK_SIZE,
  1503. .setkey = sa_aes_cbc_setkey,
  1504. .encrypt = sa_aes_cbc_encrypt,
  1505. .decrypt = sa_aes_cbc_decrypt,
  1506. }
  1507. }
  1508. },
  1509. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1510. .alg.crypto = {
  1511. .cra_name = "ecb(aes)",
  1512. .cra_driver_name = "ecb-aes-sa2ul",
  1513. .cra_priority = 30000,
  1514. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1515. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1516. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  1517. .cra_blocksize = AES_BLOCK_SIZE,
  1518. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1519. .cra_alignmask = 0,
  1520. .cra_type = &crypto_ablkcipher_type,
  1521. .cra_module = THIS_MODULE,
  1522. .cra_init = sa_aes_cra_init,
  1523. .cra_exit = sa_aes_cra_exit,
  1524. .cra_u.ablkcipher = {
  1525. .min_keysize = AES_MIN_KEY_SIZE,
  1526. .max_keysize = AES_MAX_KEY_SIZE,
  1527. .setkey = sa_aes_ecb_setkey,
  1528. .encrypt = sa_aes_cbc_encrypt,
  1529. .decrypt = sa_aes_cbc_decrypt,
  1530. }
  1531. }
  1532. },
  1533. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1534. .alg.crypto = {
  1535. .cra_name = "cbc(des3_ede)",
  1536. .cra_driver_name = "cbc-des3-sa2ul",
  1537. .cra_priority = 30000,
  1538. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1539. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1540. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  1541. .cra_blocksize = DES_BLOCK_SIZE,
  1542. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1543. .cra_alignmask = 0,
  1544. .cra_type = &crypto_ablkcipher_type,
  1545. .cra_module = THIS_MODULE,
  1546. .cra_init = sa_aes_cra_init,
  1547. .cra_exit = sa_aes_cra_exit,
  1548. .cra_u.ablkcipher = {
  1549. .min_keysize = 3 * DES_KEY_SIZE,
  1550. .max_keysize = 3 * DES_KEY_SIZE,
  1551. .ivsize = DES_BLOCK_SIZE,
  1552. .setkey = sa_3des_cbc_setkey,
  1553. .encrypt = sa_aes_cbc_encrypt,
  1554. .decrypt = sa_aes_cbc_decrypt,
  1555. }
  1556. }
  1557. },
  1558. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1559. .alg.crypto = {
  1560. .cra_name = "ecb(des3_ede)",
  1561. .cra_driver_name = "ecb-des3-sa2ul",
  1562. .cra_priority = 30000,
  1563. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1564. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1565. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  1566. .cra_blocksize = DES_BLOCK_SIZE,
  1567. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1568. .cra_alignmask = 0,
  1569. .cra_type = &crypto_ablkcipher_type,
  1570. .cra_module = THIS_MODULE,
  1571. .cra_init = sa_aes_cra_init,
  1572. .cra_exit = sa_aes_cra_exit,
  1573. .cra_u.ablkcipher = {
  1574. .min_keysize = 3 * DES_KEY_SIZE,
  1575. .max_keysize = 3 * DES_KEY_SIZE,
  1576. .ivsize = DES_BLOCK_SIZE,
  1577. .setkey = sa_3des_ecb_setkey,
  1578. .encrypt = sa_aes_cbc_encrypt,
  1579. .decrypt = sa_aes_cbc_decrypt,
  1580. }
  1581. }
  1582. },
  1583. /* AEAD algorithms */
  1584. {.type = CRYPTO_ALG_TYPE_AEAD,
  1585. .alg.aead = {
  1586. .base = {
  1587. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1588. .cra_driver_name =
  1589. "authenc(hmac(sha1),cbc(aes))-keystone-sa",
  1590. .cra_blocksize = AES_BLOCK_SIZE,
  1591. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1592. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1593. CRYPTO_ALG_ASYNC,
  1594. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1595. .cra_module = THIS_MODULE,
  1596. .cra_alignmask = 0,
  1597. .cra_priority = 3000,
  1598. },
  1599. .ivsize = AES_BLOCK_SIZE,
  1600. .maxauthsize = SHA1_DIGEST_SIZE,
  1601. .init = sa_cra_init_aead,
  1602. .exit = sa_exit_tfm_aead,
  1603. .setkey = sa_aead_cbc_sha1_setkey,
  1604. .encrypt = sa_aead_encrypt,
  1605. .decrypt = sa_aead_decrypt,
  1606. }
  1607. },
  1608. {.type = CRYPTO_ALG_TYPE_AEAD,
  1609. .alg.aead = {
  1610. .base = {
  1611. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1612. .cra_driver_name =
  1613. "authenc(hmac(sha256),cbc(aes))-keystone-sa",
  1614. .cra_blocksize = AES_BLOCK_SIZE,
  1615. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1616. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1617. CRYPTO_ALG_ASYNC,
  1618. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1619. .cra_module = THIS_MODULE,
  1620. .cra_alignmask = 0,
  1621. .cra_priority = 3000,
  1622. },
  1623. .ivsize = AES_BLOCK_SIZE,
  1624. .maxauthsize = SHA256_DIGEST_SIZE,
  1625. .init = sa_cra_init_aead,
  1626. .exit = sa_exit_tfm_aead,
  1627. .setkey = sa_aead_cbc_sha256_setkey,
  1628. .encrypt = sa_aead_encrypt,
  1629. .decrypt = sa_aead_decrypt,
  1630. }
  1631. },
  1632. };
  1633. static struct ahash_alg algs_sha[] = {
  1634. {
  1635. .init = sa_sham_init,
  1636. .update = sa_sham_update,
  1637. .final = sa_sham_final,
  1638. .finup = sa_sham_finup,
  1639. .digest = sa_sham_digest,
  1640. .setkey = sa_sham_sha1_setkey,
  1641. .halg.digestsize = SHA1_DIGEST_SIZE,
  1642. .halg.statesize = 128,
  1643. .halg.base = {
  1644. .cra_name = "hmac(sha1)",
  1645. .cra_driver_name = "sa-hmac-sha1",
  1646. .cra_priority = 400,
  1647. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1648. CRYPTO_ALG_ASYNC |
  1649. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1650. CRYPTO_ALG_NEED_FALLBACK,
  1651. .cra_blocksize = SHA1_BLOCK_SIZE,
  1652. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1653. .cra_alignmask = SA_ALIGN_MASK,
  1654. .cra_module = THIS_MODULE,
  1655. .cra_init = sa_sham_cra_sha1_init,
  1656. .cra_exit = sa_sham_cra_exit,
  1657. }
  1658. },
  1659. {
  1660. .init = sa_sham_init,
  1661. .update = sa_sham_update,
  1662. .final = sa_sham_final,
  1663. .finup = sa_sham_finup,
  1664. .digest = sa_sham_digest,
  1665. .setkey = sa_sham_sha256_setkey,
  1666. .halg.digestsize = SHA256_DIGEST_SIZE,
  1667. .halg.statesize = 128,
  1668. .halg.base = {
  1669. .cra_name = "hmac(sha256)",
  1670. .cra_driver_name = "sa-hmac-sha256",
  1671. .cra_priority = 400,
  1672. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1673. CRYPTO_ALG_ASYNC |
  1674. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1675. CRYPTO_ALG_NEED_FALLBACK,
  1676. .cra_blocksize = SHA256_BLOCK_SIZE,
  1677. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1678. .cra_alignmask = SA_ALIGN_MASK,
  1679. .cra_module = THIS_MODULE,
  1680. .cra_init = sa_sham_cra_sha256_init,
  1681. .cra_exit = sa_sham_cra_exit,
  1682. }
  1683. },
  1684. };
  1685. /* Register the algorithms in crypto framework */
  1686. void sa_register_algos(const struct device *dev)
  1687. {
  1688. char *alg_name;
  1689. u32 type;
  1690. int i, err, num_algs = ARRAY_SIZE(sa_algs);
  1691. for (i = 0; i < num_algs; i++) {
  1692. type = sa_algs[i].type;
  1693. if (type == CRYPTO_ALG_TYPE_AEAD) {
  1694. alg_name = sa_algs[i].alg.aead.base.cra_name;
  1695. err = crypto_register_aead(&sa_algs[i].alg.aead);
  1696. } else if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
  1697. alg_name = sa_algs[i].alg.crypto.cra_name;
  1698. err = crypto_register_alg(&sa_algs[i].alg.crypto);
  1699. } else {
  1700. dev_err(dev,
  1701. "un-supported crypto algorithm (%d)",
  1702. sa_algs[i].type);
  1703. continue;
  1704. }
  1705. if (err)
  1706. dev_err(dev, "Failed to register '%s'\n", alg_name);
  1707. else
  1708. sa_algs[i].registered = 1;
  1709. }
  1710. num_algs = ARRAY_SIZE(algs_sha);
  1711. for (i = 0; i < num_algs; i++) {
  1712. alg_name = algs_sha[i].halg.base.cra_name;
  1713. err = crypto_register_ahash(&algs_sha[i]);
  1714. if (err)
  1715. dev_err(dev, "Failed to register '%s'\n",
  1716. alg_name);
  1717. }
  1718. }
  1719. /* Unregister the algorithms in crypto framework */
  1720. void sa_unregister_algos(const struct device *dev)
  1721. {
  1722. char *alg_name;
  1723. u32 type;
  1724. int i, err = 0, num_algs = ARRAY_SIZE(sa_algs);
  1725. for (i = 0; i < num_algs; i++) {
  1726. type = sa_algs[i].type;
  1727. if (type == CRYPTO_ALG_TYPE_AEAD) {
  1728. alg_name = sa_algs[i].alg.aead.base.cra_name;
  1729. crypto_unregister_aead(&sa_algs[i].alg.aead);
  1730. } else {
  1731. alg_name = sa_algs[i].alg.crypto.cra_name;
  1732. err = crypto_unregister_alg(&sa_algs[i].alg.crypto);
  1733. }
  1734. sa_algs[i].registered = 0;
  1735. }
  1736. num_algs = ARRAY_SIZE(algs_sha);
  1737. for (i = 0; i < num_algs; i++) {
  1738. alg_name = algs_sha[i].halg.base.cra_name;
  1739. err = crypto_unregister_ahash(&algs_sha[i]);
  1740. if (err)
  1741. dev_err(dev, "Failed to register '%s'\n",
  1742. alg_name);
  1743. }
  1744. }
  1745. static int sa_init_mem(struct sa_crypto_data *dev_data)
  1746. {
  1747. struct device *dev = &dev_data->pdev->dev;
  1748. /* Setup dma pool for security context buffers */
  1749. dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
  1750. SA_CTX_MAX_SZ, 64, 0);
  1751. if (!dev_data->sc_pool) {
  1752. dev_err(dev, "Failed to create dma pool");
  1753. return -ENOMEM;
  1754. }
  1755. return 0;
  1756. }
  1757. static int sa_dma_init(struct sa_crypto_data *dd)
  1758. {
  1759. int ret;
  1760. struct dma_slave_config cfg;
  1761. dd->dma_rx1 = NULL;
  1762. dd->dma_tx = NULL;
  1763. dd->dma_rx2 = NULL;
  1764. ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
  1765. if (ret)
  1766. return ret;
  1767. dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
  1768. if (IS_ERR(dd->dma_rx1)) {
  1769. if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
  1770. dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
  1771. return PTR_ERR(dd->dma_rx1);
  1772. }
  1773. dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
  1774. if (IS_ERR(dd->dma_rx2)) {
  1775. dma_release_channel(dd->dma_rx1);
  1776. if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
  1777. dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
  1778. return PTR_ERR(dd->dma_rx2);
  1779. }
  1780. dd->dma_tx = dma_request_chan(dd->dev, "tx");
  1781. if (IS_ERR(dd->dma_tx)) {
  1782. if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
  1783. dev_err(dd->dev, "Unable to request tx DMA channel\n");
  1784. ret = PTR_ERR(dd->dma_tx);
  1785. goto err_dma_tx;
  1786. }
  1787. memzero_explicit(&cfg, sizeof(cfg));
  1788. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1789. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1790. cfg.src_maxburst = 4;
  1791. cfg.dst_maxburst = 4;
  1792. ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
  1793. if (ret) {
  1794. dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
  1795. ret);
  1796. return ret;
  1797. }
  1798. ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
  1799. if (ret) {
  1800. dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
  1801. ret);
  1802. return ret;
  1803. }
  1804. ret = dmaengine_slave_config(dd->dma_tx, &cfg);
  1805. if (ret) {
  1806. dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
  1807. ret);
  1808. return ret;
  1809. }
  1810. return 0;
  1811. err_dma_tx:
  1812. dma_release_channel(dd->dma_rx1);
  1813. dma_release_channel(dd->dma_rx2);
  1814. return ret;
  1815. }
  1816. static int sa_ul_probe(struct platform_device *pdev)
  1817. {
  1818. struct device *dev = &pdev->dev;
  1819. struct resource *res;
  1820. static void __iomem *saul_base;
  1821. struct sa_crypto_data *dev_data;
  1822. u32 val;
  1823. int ret;
  1824. dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
  1825. if (!dev_data)
  1826. return -ENOMEM;
  1827. sa_k3_dev = dev;
  1828. dev_data->dev = dev;
  1829. dev_data->pdev = pdev;
  1830. platform_set_drvdata(pdev, dev_data);
  1831. dev_set_drvdata(sa_k3_dev, dev_data);
  1832. sa_init_mem(dev_data);
  1833. ret = sa_dma_init(dev_data);
  1834. if (ret)
  1835. return ret;
  1836. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1837. saul_base = devm_ioremap_resource(dev, res);
  1838. val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
  1839. SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | SA_EEC_TRNG_EN;
  1840. writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
  1841. sa_register_algos(dev);
  1842. return 0;
  1843. }
  1844. static int sa_ul_remove(struct platform_device *pdev)
  1845. {
  1846. struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
  1847. sa_unregister_algos(&pdev->dev);
  1848. dma_release_channel(dev_data->dma_rx2);
  1849. dma_release_channel(dev_data->dma_rx1);
  1850. dma_release_channel(dev_data->dma_tx);
  1851. dma_pool_destroy(dev_data->sc_pool);
  1852. platform_set_drvdata(pdev, NULL);
  1853. return 0;
  1854. }
  1855. static const struct of_device_id of_match[] = {
  1856. {.compatible = "ti,sa2ul-crypto",},
  1857. {},
  1858. };
  1859. MODULE_DEVICE_TABLE(of, of_match);
  1860. static struct platform_driver sa_ul_driver = {
  1861. .probe = sa_ul_probe,
  1862. .remove = sa_ul_remove,
  1863. .driver = {
  1864. .name = "saul-crypto",
  1865. .of_match_table = of_match,
  1866. },
  1867. };
  1868. module_platform_driver(sa_ul_driver);
  1869. MODULE_LICENSE("GPL v2");