artpec6_crypto.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190
  1. /*
  2. * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
  3. *
  4. * Copyright (C) 2014-2017 Axis Communications AB
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/bitfield.h>
  8. #include <linux/crypto.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/fault-inject.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel.h>
  16. #include <linux/list.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/slab.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/gcm.h>
  24. #include <crypto/internal/aead.h>
  25. #include <crypto/internal/hash.h>
  26. #include <crypto/internal/skcipher.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/sha.h>
  29. #include <crypto/xts.h>
  30. /* Max length of a line in all cache levels for Artpec SoCs. */
  31. #define ARTPEC_CACHE_LINE_MAX 32
  32. #define PDMA_OUT_CFG 0x0000
  33. #define PDMA_OUT_BUF_CFG 0x0004
  34. #define PDMA_OUT_CMD 0x0008
  35. #define PDMA_OUT_DESCRQ_PUSH 0x0010
  36. #define PDMA_OUT_DESCRQ_STAT 0x0014
  37. #define A6_PDMA_IN_CFG 0x0028
  38. #define A6_PDMA_IN_BUF_CFG 0x002c
  39. #define A6_PDMA_IN_CMD 0x0030
  40. #define A6_PDMA_IN_STATQ_PUSH 0x0038
  41. #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
  42. #define A6_PDMA_IN_DESCRQ_STAT 0x0048
  43. #define A6_PDMA_INTR_MASK 0x0068
  44. #define A6_PDMA_ACK_INTR 0x006c
  45. #define A6_PDMA_MASKED_INTR 0x0074
  46. #define A7_PDMA_IN_CFG 0x002c
  47. #define A7_PDMA_IN_BUF_CFG 0x0030
  48. #define A7_PDMA_IN_CMD 0x0034
  49. #define A7_PDMA_IN_STATQ_PUSH 0x003c
  50. #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
  51. #define A7_PDMA_IN_DESCRQ_STAT 0x004C
  52. #define A7_PDMA_INTR_MASK 0x006c
  53. #define A7_PDMA_ACK_INTR 0x0070
  54. #define A7_PDMA_MASKED_INTR 0x0078
  55. #define PDMA_OUT_CFG_EN BIT(0)
  56. #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  57. #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  58. #define PDMA_OUT_CMD_START BIT(0)
  59. #define A6_PDMA_OUT_CMD_STOP BIT(3)
  60. #define A7_PDMA_OUT_CMD_STOP BIT(2)
  61. #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
  62. #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  63. #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  64. #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
  65. #define PDMA_IN_CFG_EN BIT(0)
  66. #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  67. #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  68. #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
  69. #define PDMA_IN_CMD_START BIT(0)
  70. #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
  71. #define A6_PDMA_IN_CMD_STOP BIT(3)
  72. #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
  73. #define A7_PDMA_IN_CMD_STOP BIT(2)
  74. #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
  75. #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
  76. #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
  77. #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  78. #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  79. #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
  80. #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
  81. #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
  82. #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
  83. #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
  84. #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
  85. #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
  86. #define A6_CRY_MD_OPER GENMASK(19, 16)
  87. #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
  88. #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
  89. #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
  90. #define A6_CRY_MD_CIPHER_DECR BIT(22)
  91. #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
  92. #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
  93. #define A7_CRY_MD_OPER GENMASK(11, 8)
  94. #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
  95. #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
  96. #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
  97. #define A7_CRY_MD_CIPHER_DECR BIT(14)
  98. #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
  99. #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
  100. /* DMA metadata constants */
  101. #define regk_crypto_aes_cbc 0x00000002
  102. #define regk_crypto_aes_ctr 0x00000003
  103. #define regk_crypto_aes_ecb 0x00000001
  104. #define regk_crypto_aes_gcm 0x00000004
  105. #define regk_crypto_aes_xts 0x00000005
  106. #define regk_crypto_cache 0x00000002
  107. #define a6_regk_crypto_dlkey 0x0000000a
  108. #define a7_regk_crypto_dlkey 0x0000000e
  109. #define regk_crypto_ext 0x00000001
  110. #define regk_crypto_hmac_sha1 0x00000007
  111. #define regk_crypto_hmac_sha256 0x00000009
  112. #define regk_crypto_hmac_sha384 0x0000000b
  113. #define regk_crypto_hmac_sha512 0x0000000d
  114. #define regk_crypto_init 0x00000000
  115. #define regk_crypto_key_128 0x00000000
  116. #define regk_crypto_key_192 0x00000001
  117. #define regk_crypto_key_256 0x00000002
  118. #define regk_crypto_null 0x00000000
  119. #define regk_crypto_sha1 0x00000006
  120. #define regk_crypto_sha256 0x00000008
  121. #define regk_crypto_sha384 0x0000000a
  122. #define regk_crypto_sha512 0x0000000c
  123. /* DMA descriptor structures */
  124. struct pdma_descr_ctrl {
  125. unsigned char short_descr : 1;
  126. unsigned char pad1 : 1;
  127. unsigned char eop : 1;
  128. unsigned char intr : 1;
  129. unsigned char short_len : 3;
  130. unsigned char pad2 : 1;
  131. } __packed;
  132. struct pdma_data_descr {
  133. unsigned int len : 24;
  134. unsigned int buf : 32;
  135. } __packed;
  136. struct pdma_short_descr {
  137. unsigned char data[7];
  138. } __packed;
  139. struct pdma_descr {
  140. struct pdma_descr_ctrl ctrl;
  141. union {
  142. struct pdma_data_descr data;
  143. struct pdma_short_descr shrt;
  144. };
  145. };
  146. struct pdma_stat_descr {
  147. unsigned char pad1 : 1;
  148. unsigned char pad2 : 1;
  149. unsigned char eop : 1;
  150. unsigned char pad3 : 5;
  151. unsigned int len : 24;
  152. };
  153. /* Each descriptor array can hold max 64 entries */
  154. #define PDMA_DESCR_COUNT 64
  155. #define MODULE_NAME "Artpec-6 CA"
  156. /* Hash modes (including HMAC variants) */
  157. #define ARTPEC6_CRYPTO_HASH_SHA1 1
  158. #define ARTPEC6_CRYPTO_HASH_SHA256 2
  159. #define ARTPEC6_CRYPTO_HASH_SHA384 3
  160. #define ARTPEC6_CRYPTO_HASH_SHA512 4
  161. /* Crypto modes */
  162. #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
  163. #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
  164. #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
  165. #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
  166. /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
  167. * It operates on a descriptor array with up to 64 descriptor entries.
  168. * The arrays must be 64 byte aligned in memory.
  169. *
  170. * The ciphering unit has no registers and is completely controlled by
  171. * a 4-byte metadata that is inserted at the beginning of each dma packet.
  172. *
  173. * A dma packet is a sequence of descriptors terminated by setting the .eop
  174. * field in the final descriptor of the packet.
  175. *
  176. * Multiple packets are used for providing context data, key data and
  177. * the plain/ciphertext.
  178. *
  179. * PDMA Descriptors (Array)
  180. * +------+------+------+~~+-------+------+----
  181. * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
  182. * +--+---+--+---+----+-+~~+-------+----+-+----
  183. * | | | | |
  184. * | | | | |
  185. * __|__ +-------++-------++-------+ +----+
  186. * | MD | |Payload||Payload||Payload| | MD |
  187. * +-----+ +-------++-------++-------+ +----+
  188. */
  189. struct artpec6_crypto_bounce_buffer {
  190. struct list_head list;
  191. size_t length;
  192. struct scatterlist *sg;
  193. size_t offset;
  194. /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
  195. * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
  196. */
  197. void *buf;
  198. };
  199. struct artpec6_crypto_dma_map {
  200. dma_addr_t dma_addr;
  201. size_t size;
  202. enum dma_data_direction dir;
  203. };
  204. struct artpec6_crypto_dma_descriptors {
  205. struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
  206. struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
  207. u32 stat[PDMA_DESCR_COUNT] __aligned(64);
  208. struct list_head bounce_buffers;
  209. /* Enough maps for all out/in buffers, and all three descr. arrays */
  210. struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
  211. dma_addr_t out_dma_addr;
  212. dma_addr_t in_dma_addr;
  213. dma_addr_t stat_dma_addr;
  214. size_t out_cnt;
  215. size_t in_cnt;
  216. size_t map_count;
  217. };
  218. enum artpec6_crypto_variant {
  219. ARTPEC6_CRYPTO,
  220. ARTPEC7_CRYPTO,
  221. };
  222. struct artpec6_crypto {
  223. void __iomem *base;
  224. spinlock_t queue_lock;
  225. struct list_head queue; /* waiting for pdma fifo space */
  226. struct list_head pending; /* submitted to pdma fifo */
  227. struct tasklet_struct task;
  228. struct kmem_cache *dma_cache;
  229. int pending_count;
  230. struct timer_list timer;
  231. enum artpec6_crypto_variant variant;
  232. void *pad_buffer; /* cache-aligned block padding buffer */
  233. void *zero_buffer;
  234. };
  235. enum artpec6_crypto_hash_flags {
  236. HASH_FLAG_INIT_CTX = 2,
  237. HASH_FLAG_UPDATE = 4,
  238. HASH_FLAG_FINALIZE = 8,
  239. HASH_FLAG_HMAC = 16,
  240. HASH_FLAG_UPDATE_KEY = 32,
  241. };
  242. struct artpec6_crypto_req_common {
  243. struct list_head list;
  244. struct artpec6_crypto_dma_descriptors *dma;
  245. struct crypto_async_request *req;
  246. void (*complete)(struct crypto_async_request *req);
  247. gfp_t gfp_flags;
  248. };
  249. struct artpec6_hash_request_context {
  250. char partial_buffer[SHA512_BLOCK_SIZE];
  251. char partial_buffer_out[SHA512_BLOCK_SIZE];
  252. char key_buffer[SHA512_BLOCK_SIZE];
  253. char pad_buffer[SHA512_BLOCK_SIZE + 32];
  254. unsigned char digeststate[SHA512_DIGEST_SIZE];
  255. size_t partial_bytes;
  256. u64 digcnt;
  257. u32 key_md;
  258. u32 hash_md;
  259. enum artpec6_crypto_hash_flags hash_flags;
  260. struct artpec6_crypto_req_common common;
  261. };
  262. struct artpec6_hash_export_state {
  263. char partial_buffer[SHA512_BLOCK_SIZE];
  264. unsigned char digeststate[SHA512_DIGEST_SIZE];
  265. size_t partial_bytes;
  266. u64 digcnt;
  267. int oper;
  268. unsigned int hash_flags;
  269. };
  270. struct artpec6_hashalg_context {
  271. char hmac_key[SHA512_BLOCK_SIZE];
  272. size_t hmac_key_length;
  273. struct crypto_shash *child_hash;
  274. };
  275. struct artpec6_crypto_request_context {
  276. u32 cipher_md;
  277. bool decrypt;
  278. struct artpec6_crypto_req_common common;
  279. };
  280. struct artpec6_cryptotfm_context {
  281. unsigned char aes_key[2*AES_MAX_KEY_SIZE];
  282. size_t key_length;
  283. u32 key_md;
  284. int crypto_type;
  285. struct crypto_skcipher *fallback;
  286. };
  287. struct artpec6_crypto_aead_hw_ctx {
  288. __be64 aad_length_bits;
  289. __be64 text_length_bits;
  290. __u8 J0[AES_BLOCK_SIZE];
  291. };
  292. struct artpec6_crypto_aead_req_ctx {
  293. struct artpec6_crypto_aead_hw_ctx hw_ctx;
  294. u32 cipher_md;
  295. bool decrypt;
  296. struct artpec6_crypto_req_common common;
  297. __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
  298. };
  299. /* The crypto framework makes it hard to avoid this global. */
  300. static struct device *artpec6_crypto_dev;
  301. #ifdef CONFIG_FAULT_INJECTION
  302. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
  303. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
  304. #endif
  305. enum {
  306. ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
  307. ARTPEC6_CRYPTO_PREPARE_HASH_START,
  308. };
  309. static int artpec6_crypto_prepare_aead(struct aead_request *areq);
  310. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
  311. static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
  312. static void
  313. artpec6_crypto_complete_crypto(struct crypto_async_request *req);
  314. static void
  315. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
  316. static void
  317. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
  318. static void
  319. artpec6_crypto_complete_aead(struct crypto_async_request *req);
  320. static void
  321. artpec6_crypto_complete_hash(struct crypto_async_request *req);
  322. static int
  323. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
  324. static void
  325. artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
  326. struct artpec6_crypto_walk {
  327. struct scatterlist *sg;
  328. size_t offset;
  329. };
  330. static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
  331. struct scatterlist *sg)
  332. {
  333. awalk->sg = sg;
  334. awalk->offset = 0;
  335. }
  336. static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
  337. size_t nbytes)
  338. {
  339. while (nbytes && awalk->sg) {
  340. size_t piece;
  341. WARN_ON(awalk->offset > awalk->sg->length);
  342. piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
  343. nbytes -= piece;
  344. awalk->offset += piece;
  345. if (awalk->offset == awalk->sg->length) {
  346. awalk->sg = sg_next(awalk->sg);
  347. awalk->offset = 0;
  348. }
  349. }
  350. return nbytes;
  351. }
  352. static size_t
  353. artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
  354. {
  355. WARN_ON(awalk->sg->length == awalk->offset);
  356. return awalk->sg->length - awalk->offset;
  357. }
  358. static dma_addr_t
  359. artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
  360. {
  361. return sg_phys(awalk->sg) + awalk->offset;
  362. }
  363. static void
  364. artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
  365. {
  366. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  367. struct artpec6_crypto_bounce_buffer *b;
  368. struct artpec6_crypto_bounce_buffer *next;
  369. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  370. pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
  371. b, b->length, b->offset, b->buf);
  372. sg_pcopy_from_buffer(b->sg,
  373. 1,
  374. b->buf,
  375. b->length,
  376. b->offset);
  377. list_del(&b->list);
  378. kfree(b);
  379. }
  380. }
  381. static inline bool artpec6_crypto_busy(void)
  382. {
  383. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  384. int fifo_count = ac->pending_count;
  385. return fifo_count > 6;
  386. }
  387. static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
  388. {
  389. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  390. int ret = -EBUSY;
  391. spin_lock_bh(&ac->queue_lock);
  392. if (!artpec6_crypto_busy()) {
  393. list_add_tail(&req->list, &ac->pending);
  394. artpec6_crypto_start_dma(req);
  395. ret = -EINPROGRESS;
  396. } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
  397. list_add_tail(&req->list, &ac->queue);
  398. } else {
  399. artpec6_crypto_common_destroy(req);
  400. }
  401. spin_unlock_bh(&ac->queue_lock);
  402. return ret;
  403. }
  404. static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
  405. {
  406. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  407. enum artpec6_crypto_variant variant = ac->variant;
  408. void __iomem *base = ac->base;
  409. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  410. u32 ind, statd, outd;
  411. /* Make descriptor content visible to the DMA before starting it. */
  412. wmb();
  413. ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
  414. FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
  415. statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
  416. FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
  417. outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
  418. FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
  419. if (variant == ARTPEC6_CRYPTO) {
  420. writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
  421. writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
  422. writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
  423. } else {
  424. writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
  425. writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
  426. writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
  427. }
  428. writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
  429. writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
  430. ac->pending_count++;
  431. }
  432. static void
  433. artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
  434. {
  435. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  436. dma->out_cnt = 0;
  437. dma->in_cnt = 0;
  438. dma->map_count = 0;
  439. INIT_LIST_HEAD(&dma->bounce_buffers);
  440. }
  441. static bool fault_inject_dma_descr(void)
  442. {
  443. #ifdef CONFIG_FAULT_INJECTION
  444. return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
  445. #else
  446. return false;
  447. #endif
  448. }
  449. /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
  450. * physical address
  451. *
  452. * @addr: The physical address of the data buffer
  453. * @len: The length of the data buffer
  454. * @eop: True if this is the last buffer in the packet
  455. *
  456. * @return 0 on success or -ENOSPC if there are no more descriptors available
  457. */
  458. static int
  459. artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
  460. dma_addr_t addr, size_t len, bool eop)
  461. {
  462. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  463. struct pdma_descr *d;
  464. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  465. fault_inject_dma_descr()) {
  466. pr_err("No free OUT DMA descriptors available!\n");
  467. return -ENOSPC;
  468. }
  469. d = &dma->out[dma->out_cnt++];
  470. memset(d, 0, sizeof(*d));
  471. d->ctrl.short_descr = 0;
  472. d->ctrl.eop = eop;
  473. d->data.len = len;
  474. d->data.buf = addr;
  475. return 0;
  476. }
  477. /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
  478. *
  479. * @dst: The virtual address of the data
  480. * @len: The length of the data, must be between 1 to 7 bytes
  481. * @eop: True if this is the last buffer in the packet
  482. *
  483. * @return 0 on success
  484. * -ENOSPC if no more descriptors are available
  485. * -EINVAL if the data length exceeds 7 bytes
  486. */
  487. static int
  488. artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
  489. void *dst, unsigned int len, bool eop)
  490. {
  491. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  492. struct pdma_descr *d;
  493. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  494. fault_inject_dma_descr()) {
  495. pr_err("No free OUT DMA descriptors available!\n");
  496. return -ENOSPC;
  497. } else if (len > 7 || len < 1) {
  498. return -EINVAL;
  499. }
  500. d = &dma->out[dma->out_cnt++];
  501. memset(d, 0, sizeof(*d));
  502. d->ctrl.short_descr = 1;
  503. d->ctrl.short_len = len;
  504. d->ctrl.eop = eop;
  505. memcpy(d->shrt.data, dst, len);
  506. return 0;
  507. }
  508. static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
  509. struct page *page, size_t offset,
  510. size_t size,
  511. enum dma_data_direction dir,
  512. dma_addr_t *dma_addr_out)
  513. {
  514. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  515. struct device *dev = artpec6_crypto_dev;
  516. struct artpec6_crypto_dma_map *map;
  517. dma_addr_t dma_addr;
  518. *dma_addr_out = 0;
  519. if (dma->map_count >= ARRAY_SIZE(dma->maps))
  520. return -ENOMEM;
  521. dma_addr = dma_map_page(dev, page, offset, size, dir);
  522. if (dma_mapping_error(dev, dma_addr))
  523. return -ENOMEM;
  524. map = &dma->maps[dma->map_count++];
  525. map->size = size;
  526. map->dma_addr = dma_addr;
  527. map->dir = dir;
  528. *dma_addr_out = dma_addr;
  529. return 0;
  530. }
  531. static int
  532. artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
  533. void *ptr, size_t size,
  534. enum dma_data_direction dir,
  535. dma_addr_t *dma_addr_out)
  536. {
  537. struct page *page = virt_to_page(ptr);
  538. size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
  539. return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
  540. dma_addr_out);
  541. }
  542. static int
  543. artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
  544. {
  545. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  546. int ret;
  547. ret = artpec6_crypto_dma_map_single(common, dma->in,
  548. sizeof(dma->in[0]) * dma->in_cnt,
  549. DMA_TO_DEVICE, &dma->in_dma_addr);
  550. if (ret)
  551. return ret;
  552. ret = artpec6_crypto_dma_map_single(common, dma->out,
  553. sizeof(dma->out[0]) * dma->out_cnt,
  554. DMA_TO_DEVICE, &dma->out_dma_addr);
  555. if (ret)
  556. return ret;
  557. /* We only read one stat descriptor */
  558. dma->stat[dma->in_cnt - 1] = 0;
  559. /*
  560. * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
  561. * to be written.
  562. */
  563. return artpec6_crypto_dma_map_single(common,
  564. dma->stat + dma->in_cnt - 1,
  565. sizeof(dma->stat[0]),
  566. DMA_BIDIRECTIONAL,
  567. &dma->stat_dma_addr);
  568. }
  569. static void
  570. artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
  571. {
  572. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  573. struct device *dev = artpec6_crypto_dev;
  574. int i;
  575. for (i = 0; i < dma->map_count; i++) {
  576. struct artpec6_crypto_dma_map *map = &dma->maps[i];
  577. dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
  578. }
  579. dma->map_count = 0;
  580. }
  581. /** artpec6_crypto_setup_out_descr - Setup an out descriptor
  582. *
  583. * @dst: The virtual address of the data
  584. * @len: The length of the data
  585. * @eop: True if this is the last buffer in the packet
  586. * @use_short: If this is true and the data length is 7 bytes or less then
  587. * a short descriptor will be used
  588. *
  589. * @return 0 on success
  590. * Any errors from artpec6_crypto_setup_out_descr_short() or
  591. * setup_out_descr_phys()
  592. */
  593. static int
  594. artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
  595. void *dst, unsigned int len, bool eop,
  596. bool use_short)
  597. {
  598. if (use_short && len < 7) {
  599. return artpec6_crypto_setup_out_descr_short(common, dst, len,
  600. eop);
  601. } else {
  602. int ret;
  603. dma_addr_t dma_addr;
  604. ret = artpec6_crypto_dma_map_single(common, dst, len,
  605. DMA_TO_DEVICE,
  606. &dma_addr);
  607. if (ret)
  608. return ret;
  609. return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
  610. len, eop);
  611. }
  612. }
  613. /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
  614. * physical address
  615. *
  616. * @addr: The physical address of the data buffer
  617. * @len: The length of the data buffer
  618. * @intr: True if an interrupt should be fired after HW processing of this
  619. * descriptor
  620. *
  621. */
  622. static int
  623. artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
  624. dma_addr_t addr, unsigned int len, bool intr)
  625. {
  626. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  627. struct pdma_descr *d;
  628. if (dma->in_cnt >= PDMA_DESCR_COUNT ||
  629. fault_inject_dma_descr()) {
  630. pr_err("No free IN DMA descriptors available!\n");
  631. return -ENOSPC;
  632. }
  633. d = &dma->in[dma->in_cnt++];
  634. memset(d, 0, sizeof(*d));
  635. d->ctrl.intr = intr;
  636. d->data.len = len;
  637. d->data.buf = addr;
  638. return 0;
  639. }
  640. /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
  641. *
  642. * @buffer: The virtual address to of the data buffer
  643. * @len: The length of the data buffer
  644. * @last: If this is the last data buffer in the request (i.e. an interrupt
  645. * is needed
  646. *
  647. * Short descriptors are not used for the in channel
  648. */
  649. static int
  650. artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
  651. void *buffer, unsigned int len, bool last)
  652. {
  653. dma_addr_t dma_addr;
  654. int ret;
  655. ret = artpec6_crypto_dma_map_single(common, buffer, len,
  656. DMA_FROM_DEVICE, &dma_addr);
  657. if (ret)
  658. return ret;
  659. return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
  660. }
  661. static struct artpec6_crypto_bounce_buffer *
  662. artpec6_crypto_alloc_bounce(gfp_t flags)
  663. {
  664. void *base;
  665. size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
  666. 2 * ARTPEC_CACHE_LINE_MAX;
  667. struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
  668. if (!bbuf)
  669. return NULL;
  670. base = bbuf + 1;
  671. bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
  672. return bbuf;
  673. }
  674. static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
  675. struct artpec6_crypto_walk *walk, size_t size)
  676. {
  677. struct artpec6_crypto_bounce_buffer *bbuf;
  678. int ret;
  679. bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
  680. if (!bbuf)
  681. return -ENOMEM;
  682. bbuf->length = size;
  683. bbuf->sg = walk->sg;
  684. bbuf->offset = walk->offset;
  685. ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
  686. if (ret) {
  687. kfree(bbuf);
  688. return ret;
  689. }
  690. pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
  691. list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
  692. return 0;
  693. }
  694. static int
  695. artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
  696. struct artpec6_crypto_walk *walk,
  697. size_t count)
  698. {
  699. size_t chunk;
  700. int ret;
  701. dma_addr_t addr;
  702. while (walk->sg && count) {
  703. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  704. addr = artpec6_crypto_walk_chunk_phys(walk);
  705. /* When destination buffers are not aligned to the cache line
  706. * size we need bounce buffers. The DMA-API requires that the
  707. * entire line is owned by the DMA buffer and this holds also
  708. * for the case when coherent DMA is used.
  709. */
  710. if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
  711. chunk = min_t(dma_addr_t, chunk,
  712. ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
  713. addr);
  714. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  715. ret = setup_bounce_buffer_in(common, walk, chunk);
  716. } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
  717. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  718. ret = setup_bounce_buffer_in(common, walk, chunk);
  719. } else {
  720. dma_addr_t dma_addr;
  721. chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
  722. pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
  723. ret = artpec6_crypto_dma_map_page(common,
  724. sg_page(walk->sg),
  725. walk->sg->offset +
  726. walk->offset,
  727. chunk,
  728. DMA_FROM_DEVICE,
  729. &dma_addr);
  730. if (ret)
  731. return ret;
  732. ret = artpec6_crypto_setup_in_descr_phys(common,
  733. dma_addr,
  734. chunk, false);
  735. }
  736. if (ret)
  737. return ret;
  738. count = count - chunk;
  739. artpec6_crypto_walk_advance(walk, chunk);
  740. }
  741. if (count)
  742. pr_err("EOL unexpected %zu bytes left\n", count);
  743. return count ? -EINVAL : 0;
  744. }
  745. static int
  746. artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
  747. struct artpec6_crypto_walk *walk,
  748. size_t count)
  749. {
  750. size_t chunk;
  751. int ret;
  752. dma_addr_t addr;
  753. while (walk->sg && count) {
  754. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  755. addr = artpec6_crypto_walk_chunk_phys(walk);
  756. pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
  757. if (addr & 3) {
  758. char buf[3];
  759. chunk = min_t(size_t, chunk, (4-(addr&3)));
  760. sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
  761. walk->offset);
  762. ret = artpec6_crypto_setup_out_descr_short(common, buf,
  763. chunk,
  764. false);
  765. } else {
  766. dma_addr_t dma_addr;
  767. ret = artpec6_crypto_dma_map_page(common,
  768. sg_page(walk->sg),
  769. walk->sg->offset +
  770. walk->offset,
  771. chunk,
  772. DMA_TO_DEVICE,
  773. &dma_addr);
  774. if (ret)
  775. return ret;
  776. ret = artpec6_crypto_setup_out_descr_phys(common,
  777. dma_addr,
  778. chunk, false);
  779. }
  780. if (ret)
  781. return ret;
  782. count = count - chunk;
  783. artpec6_crypto_walk_advance(walk, chunk);
  784. }
  785. if (count)
  786. pr_err("EOL unexpected %zu bytes left\n", count);
  787. return count ? -EINVAL : 0;
  788. }
  789. /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
  790. *
  791. * If the out descriptor list is non-empty, then the eop flag on the
  792. * last used out descriptor will be set.
  793. *
  794. * @return 0 on success
  795. * -EINVAL if the out descriptor is empty or has overflown
  796. */
  797. static int
  798. artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
  799. {
  800. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  801. struct pdma_descr *d;
  802. if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
  803. pr_err("%s: OUT descriptor list is %s\n",
  804. MODULE_NAME, dma->out_cnt ? "empty" : "full");
  805. return -EINVAL;
  806. }
  807. d = &dma->out[dma->out_cnt-1];
  808. d->ctrl.eop = 1;
  809. return 0;
  810. }
  811. /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
  812. * in descriptor
  813. *
  814. * See artpec6_crypto_terminate_out_descrs() for return values
  815. */
  816. static int
  817. artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
  818. {
  819. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  820. struct pdma_descr *d;
  821. if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
  822. pr_err("%s: IN descriptor list is %s\n",
  823. MODULE_NAME, dma->in_cnt ? "empty" : "full");
  824. return -EINVAL;
  825. }
  826. d = &dma->in[dma->in_cnt-1];
  827. d->ctrl.intr = 1;
  828. return 0;
  829. }
  830. /** create_hash_pad - Create a Secure Hash conformant pad
  831. *
  832. * @dst: The destination buffer to write the pad. Must be at least 64 bytes
  833. * @dgstlen: The total length of the hash digest in bytes
  834. * @bitcount: The total length of the digest in bits
  835. *
  836. * @return The total number of padding bytes written to @dst
  837. */
  838. static size_t
  839. create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
  840. {
  841. unsigned int mod, target, diff, pad_bytes, size_bytes;
  842. __be64 bits = __cpu_to_be64(bitcount);
  843. switch (oper) {
  844. case regk_crypto_sha1:
  845. case regk_crypto_sha256:
  846. case regk_crypto_hmac_sha1:
  847. case regk_crypto_hmac_sha256:
  848. target = 448 / 8;
  849. mod = 512 / 8;
  850. size_bytes = 8;
  851. break;
  852. default:
  853. target = 896 / 8;
  854. mod = 1024 / 8;
  855. size_bytes = 16;
  856. break;
  857. }
  858. target -= 1;
  859. diff = dgstlen & (mod - 1);
  860. pad_bytes = diff > target ? target + mod - diff : target - diff;
  861. memset(dst + 1, 0, pad_bytes);
  862. dst[0] = 0x80;
  863. if (size_bytes == 16) {
  864. memset(dst + 1 + pad_bytes, 0, 8);
  865. memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
  866. } else {
  867. memcpy(dst + 1 + pad_bytes, &bits, 8);
  868. }
  869. return pad_bytes + size_bytes + 1;
  870. }
  871. static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
  872. struct crypto_async_request *parent,
  873. void (*complete)(struct crypto_async_request *req),
  874. struct scatterlist *dstsg, unsigned int nbytes)
  875. {
  876. gfp_t flags;
  877. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  878. flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  879. GFP_KERNEL : GFP_ATOMIC;
  880. common->gfp_flags = flags;
  881. common->dma = kmem_cache_alloc(ac->dma_cache, flags);
  882. if (!common->dma)
  883. return -ENOMEM;
  884. common->req = parent;
  885. common->complete = complete;
  886. return 0;
  887. }
  888. static void
  889. artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
  890. {
  891. struct artpec6_crypto_bounce_buffer *b;
  892. struct artpec6_crypto_bounce_buffer *next;
  893. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  894. kfree(b);
  895. }
  896. }
  897. static int
  898. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
  899. {
  900. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  901. artpec6_crypto_dma_unmap_all(common);
  902. artpec6_crypto_bounce_destroy(common->dma);
  903. kmem_cache_free(ac->dma_cache, common->dma);
  904. common->dma = NULL;
  905. return 0;
  906. }
  907. /*
  908. * Ciphering functions.
  909. */
  910. static int artpec6_crypto_encrypt(struct skcipher_request *req)
  911. {
  912. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  913. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  914. struct artpec6_crypto_request_context *req_ctx = NULL;
  915. void (*complete)(struct crypto_async_request *req);
  916. int ret;
  917. req_ctx = skcipher_request_ctx(req);
  918. switch (ctx->crypto_type) {
  919. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  920. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  921. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  922. req_ctx->decrypt = 0;
  923. break;
  924. default:
  925. break;
  926. }
  927. switch (ctx->crypto_type) {
  928. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  929. complete = artpec6_crypto_complete_cbc_encrypt;
  930. break;
  931. default:
  932. complete = artpec6_crypto_complete_crypto;
  933. break;
  934. }
  935. ret = artpec6_crypto_common_init(&req_ctx->common,
  936. &req->base,
  937. complete,
  938. req->dst, req->cryptlen);
  939. if (ret)
  940. return ret;
  941. ret = artpec6_crypto_prepare_crypto(req);
  942. if (ret) {
  943. artpec6_crypto_common_destroy(&req_ctx->common);
  944. return ret;
  945. }
  946. return artpec6_crypto_submit(&req_ctx->common);
  947. }
  948. static int artpec6_crypto_decrypt(struct skcipher_request *req)
  949. {
  950. int ret;
  951. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  952. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  953. struct artpec6_crypto_request_context *req_ctx = NULL;
  954. void (*complete)(struct crypto_async_request *req);
  955. req_ctx = skcipher_request_ctx(req);
  956. switch (ctx->crypto_type) {
  957. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  958. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  959. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  960. req_ctx->decrypt = 1;
  961. break;
  962. default:
  963. break;
  964. }
  965. switch (ctx->crypto_type) {
  966. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  967. complete = artpec6_crypto_complete_cbc_decrypt;
  968. break;
  969. default:
  970. complete = artpec6_crypto_complete_crypto;
  971. break;
  972. }
  973. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  974. complete,
  975. req->dst, req->cryptlen);
  976. if (ret)
  977. return ret;
  978. ret = artpec6_crypto_prepare_crypto(req);
  979. if (ret) {
  980. artpec6_crypto_common_destroy(&req_ctx->common);
  981. return ret;
  982. }
  983. return artpec6_crypto_submit(&req_ctx->common);
  984. }
  985. static int
  986. artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
  987. {
  988. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  989. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  990. size_t iv_len = crypto_skcipher_ivsize(cipher);
  991. unsigned int counter = be32_to_cpup((__be32 *)
  992. (req->iv + iv_len - 4));
  993. unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
  994. AES_BLOCK_SIZE;
  995. /*
  996. * The hardware uses only the last 32-bits as the counter while the
  997. * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
  998. * the whole IV is a counter. So fallback if the counter is going to
  999. * overlow.
  1000. */
  1001. if (counter + nblks < counter) {
  1002. int ret;
  1003. pr_debug("counter %x will overflow (nblks %u), falling back\n",
  1004. counter, counter + nblks);
  1005. ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
  1006. ctx->key_length);
  1007. if (ret)
  1008. return ret;
  1009. {
  1010. SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
  1011. skcipher_request_set_tfm(subreq, ctx->fallback);
  1012. skcipher_request_set_callback(subreq, req->base.flags,
  1013. NULL, NULL);
  1014. skcipher_request_set_crypt(subreq, req->src, req->dst,
  1015. req->cryptlen, req->iv);
  1016. ret = encrypt ? crypto_skcipher_encrypt(subreq)
  1017. : crypto_skcipher_decrypt(subreq);
  1018. skcipher_request_zero(subreq);
  1019. }
  1020. return ret;
  1021. }
  1022. return encrypt ? artpec6_crypto_encrypt(req)
  1023. : artpec6_crypto_decrypt(req);
  1024. }
  1025. static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
  1026. {
  1027. return artpec6_crypto_ctr_crypt(req, true);
  1028. }
  1029. static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
  1030. {
  1031. return artpec6_crypto_ctr_crypt(req, false);
  1032. }
  1033. /*
  1034. * AEAD functions
  1035. */
  1036. static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
  1037. {
  1038. struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
  1039. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  1040. crypto_aead_set_reqsize(tfm,
  1041. sizeof(struct artpec6_crypto_aead_req_ctx));
  1042. return 0;
  1043. }
  1044. static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
  1045. unsigned int len)
  1046. {
  1047. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
  1048. if (len != 16 && len != 24 && len != 32) {
  1049. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1050. return -1;
  1051. }
  1052. ctx->key_length = len;
  1053. memcpy(ctx->aes_key, key, len);
  1054. return 0;
  1055. }
  1056. static int artpec6_crypto_aead_encrypt(struct aead_request *req)
  1057. {
  1058. int ret;
  1059. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1060. req_ctx->decrypt = false;
  1061. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  1062. artpec6_crypto_complete_aead,
  1063. NULL, 0);
  1064. if (ret)
  1065. return ret;
  1066. ret = artpec6_crypto_prepare_aead(req);
  1067. if (ret) {
  1068. artpec6_crypto_common_destroy(&req_ctx->common);
  1069. return ret;
  1070. }
  1071. return artpec6_crypto_submit(&req_ctx->common);
  1072. }
  1073. static int artpec6_crypto_aead_decrypt(struct aead_request *req)
  1074. {
  1075. int ret;
  1076. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1077. req_ctx->decrypt = true;
  1078. if (req->cryptlen < AES_BLOCK_SIZE)
  1079. return -EINVAL;
  1080. ret = artpec6_crypto_common_init(&req_ctx->common,
  1081. &req->base,
  1082. artpec6_crypto_complete_aead,
  1083. NULL, 0);
  1084. if (ret)
  1085. return ret;
  1086. ret = artpec6_crypto_prepare_aead(req);
  1087. if (ret) {
  1088. artpec6_crypto_common_destroy(&req_ctx->common);
  1089. return ret;
  1090. }
  1091. return artpec6_crypto_submit(&req_ctx->common);
  1092. }
  1093. static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
  1094. {
  1095. struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1096. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
  1097. size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  1098. size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
  1099. SHA512_DIGEST_SIZE : digestsize;
  1100. size_t blocksize = crypto_tfm_alg_blocksize(
  1101. crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
  1102. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1103. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1104. enum artpec6_crypto_variant variant = ac->variant;
  1105. u32 sel_ctx;
  1106. bool ext_ctx = false;
  1107. bool run_hw = false;
  1108. int error = 0;
  1109. artpec6_crypto_init_dma_operation(common);
  1110. /* Upload HMAC key, must be first the first packet */
  1111. if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
  1112. if (variant == ARTPEC6_CRYPTO) {
  1113. req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1114. a6_regk_crypto_dlkey);
  1115. } else {
  1116. req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1117. a7_regk_crypto_dlkey);
  1118. }
  1119. /* Copy and pad up the key */
  1120. memcpy(req_ctx->key_buffer, ctx->hmac_key,
  1121. ctx->hmac_key_length);
  1122. memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
  1123. blocksize - ctx->hmac_key_length);
  1124. error = artpec6_crypto_setup_out_descr(common,
  1125. (void *)&req_ctx->key_md,
  1126. sizeof(req_ctx->key_md), false, false);
  1127. if (error)
  1128. return error;
  1129. error = artpec6_crypto_setup_out_descr(common,
  1130. req_ctx->key_buffer, blocksize,
  1131. true, false);
  1132. if (error)
  1133. return error;
  1134. }
  1135. if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
  1136. /* Restore context */
  1137. sel_ctx = regk_crypto_ext;
  1138. ext_ctx = true;
  1139. } else {
  1140. sel_ctx = regk_crypto_init;
  1141. }
  1142. if (variant == ARTPEC6_CRYPTO) {
  1143. req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
  1144. req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1145. /* If this is the final round, set the final flag */
  1146. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1147. req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
  1148. } else {
  1149. req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
  1150. req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1151. /* If this is the final round, set the final flag */
  1152. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1153. req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
  1154. }
  1155. /* Setup up metadata descriptors */
  1156. error = artpec6_crypto_setup_out_descr(common,
  1157. (void *)&req_ctx->hash_md,
  1158. sizeof(req_ctx->hash_md), false, false);
  1159. if (error)
  1160. return error;
  1161. error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1162. if (error)
  1163. return error;
  1164. if (ext_ctx) {
  1165. error = artpec6_crypto_setup_out_descr(common,
  1166. req_ctx->digeststate,
  1167. contextsize, false, false);
  1168. if (error)
  1169. return error;
  1170. }
  1171. if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
  1172. size_t done_bytes = 0;
  1173. size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
  1174. size_t ready_bytes = round_down(total_bytes, blocksize);
  1175. struct artpec6_crypto_walk walk;
  1176. run_hw = ready_bytes > 0;
  1177. if (req_ctx->partial_bytes && ready_bytes) {
  1178. /* We have a partial buffer and will at least some bytes
  1179. * to the HW. Empty this partial buffer before tackling
  1180. * the SG lists
  1181. */
  1182. memcpy(req_ctx->partial_buffer_out,
  1183. req_ctx->partial_buffer,
  1184. req_ctx->partial_bytes);
  1185. error = artpec6_crypto_setup_out_descr(common,
  1186. req_ctx->partial_buffer_out,
  1187. req_ctx->partial_bytes,
  1188. false, true);
  1189. if (error)
  1190. return error;
  1191. /* Reset partial buffer */
  1192. done_bytes += req_ctx->partial_bytes;
  1193. req_ctx->partial_bytes = 0;
  1194. }
  1195. artpec6_crypto_walk_init(&walk, areq->src);
  1196. error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
  1197. ready_bytes -
  1198. done_bytes);
  1199. if (error)
  1200. return error;
  1201. if (walk.sg) {
  1202. size_t sg_skip = ready_bytes - done_bytes;
  1203. size_t sg_rem = areq->nbytes - sg_skip;
  1204. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  1205. req_ctx->partial_buffer +
  1206. req_ctx->partial_bytes,
  1207. sg_rem, sg_skip);
  1208. req_ctx->partial_bytes += sg_rem;
  1209. }
  1210. req_ctx->digcnt += ready_bytes;
  1211. req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
  1212. }
  1213. /* Finalize */
  1214. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
  1215. bool needtrim = contextsize != digestsize;
  1216. size_t hash_pad_len;
  1217. u64 digest_bits;
  1218. u32 oper;
  1219. if (variant == ARTPEC6_CRYPTO)
  1220. oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
  1221. else
  1222. oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
  1223. /* Write out the partial buffer if present */
  1224. if (req_ctx->partial_bytes) {
  1225. memcpy(req_ctx->partial_buffer_out,
  1226. req_ctx->partial_buffer,
  1227. req_ctx->partial_bytes);
  1228. error = artpec6_crypto_setup_out_descr(common,
  1229. req_ctx->partial_buffer_out,
  1230. req_ctx->partial_bytes,
  1231. false, true);
  1232. if (error)
  1233. return error;
  1234. req_ctx->digcnt += req_ctx->partial_bytes;
  1235. req_ctx->partial_bytes = 0;
  1236. }
  1237. if (req_ctx->hash_flags & HASH_FLAG_HMAC)
  1238. digest_bits = 8 * (req_ctx->digcnt + blocksize);
  1239. else
  1240. digest_bits = 8 * req_ctx->digcnt;
  1241. /* Add the hash pad */
  1242. hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
  1243. req_ctx->digcnt, digest_bits);
  1244. error = artpec6_crypto_setup_out_descr(common,
  1245. req_ctx->pad_buffer,
  1246. hash_pad_len, false,
  1247. true);
  1248. req_ctx->digcnt = 0;
  1249. if (error)
  1250. return error;
  1251. /* Descriptor for the final result */
  1252. error = artpec6_crypto_setup_in_descr(common, areq->result,
  1253. digestsize,
  1254. !needtrim);
  1255. if (error)
  1256. return error;
  1257. if (needtrim) {
  1258. /* Discard the extra context bytes for SHA-384 */
  1259. error = artpec6_crypto_setup_in_descr(common,
  1260. req_ctx->partial_buffer,
  1261. digestsize - contextsize, true);
  1262. if (error)
  1263. return error;
  1264. }
  1265. } else { /* This is not the final operation for this request */
  1266. if (!run_hw)
  1267. return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
  1268. /* Save the result to the context */
  1269. error = artpec6_crypto_setup_in_descr(common,
  1270. req_ctx->digeststate,
  1271. contextsize, false);
  1272. if (error)
  1273. return error;
  1274. /* fall through */
  1275. }
  1276. req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
  1277. HASH_FLAG_FINALIZE);
  1278. error = artpec6_crypto_terminate_in_descrs(common);
  1279. if (error)
  1280. return error;
  1281. error = artpec6_crypto_terminate_out_descrs(common);
  1282. if (error)
  1283. return error;
  1284. error = artpec6_crypto_dma_map_descs(common);
  1285. if (error)
  1286. return error;
  1287. return ARTPEC6_CRYPTO_PREPARE_HASH_START;
  1288. }
  1289. static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
  1290. {
  1291. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1292. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1293. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
  1294. return 0;
  1295. }
  1296. static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
  1297. {
  1298. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1299. ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
  1300. 0,
  1301. CRYPTO_ALG_ASYNC |
  1302. CRYPTO_ALG_NEED_FALLBACK);
  1303. if (IS_ERR(ctx->fallback))
  1304. return PTR_ERR(ctx->fallback);
  1305. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1306. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
  1307. return 0;
  1308. }
  1309. static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
  1310. {
  1311. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1312. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1313. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
  1314. return 0;
  1315. }
  1316. static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
  1317. {
  1318. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1319. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1320. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
  1321. return 0;
  1322. }
  1323. static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
  1324. {
  1325. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1326. memset(ctx, 0, sizeof(*ctx));
  1327. }
  1328. static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
  1329. {
  1330. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1331. crypto_free_skcipher(ctx->fallback);
  1332. artpec6_crypto_aes_exit(tfm);
  1333. }
  1334. static int
  1335. artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1336. unsigned int keylen)
  1337. {
  1338. struct artpec6_cryptotfm_context *ctx =
  1339. crypto_skcipher_ctx(cipher);
  1340. switch (keylen) {
  1341. case 16:
  1342. case 24:
  1343. case 32:
  1344. break;
  1345. default:
  1346. crypto_skcipher_set_flags(cipher,
  1347. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1348. return -EINVAL;
  1349. }
  1350. memcpy(ctx->aes_key, key, keylen);
  1351. ctx->key_length = keylen;
  1352. return 0;
  1353. }
  1354. static int
  1355. artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1356. unsigned int keylen)
  1357. {
  1358. struct artpec6_cryptotfm_context *ctx =
  1359. crypto_skcipher_ctx(cipher);
  1360. int ret;
  1361. ret = xts_check_key(&cipher->base, key, keylen);
  1362. if (ret)
  1363. return ret;
  1364. switch (keylen) {
  1365. case 32:
  1366. case 48:
  1367. case 64:
  1368. break;
  1369. default:
  1370. crypto_skcipher_set_flags(cipher,
  1371. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1372. return -EINVAL;
  1373. }
  1374. memcpy(ctx->aes_key, key, keylen);
  1375. ctx->key_length = keylen;
  1376. return 0;
  1377. }
  1378. /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
  1379. *
  1380. * @req: The asynch request to process
  1381. *
  1382. * @return 0 if the dma job was successfully prepared
  1383. * <0 on error
  1384. *
  1385. * This function sets up the PDMA descriptors for a block cipher request.
  1386. *
  1387. * The required padding is added for AES-CTR using a statically defined
  1388. * buffer.
  1389. *
  1390. * The PDMA descriptor list will be as follows:
  1391. *
  1392. * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
  1393. * IN: <CIPHER_MD><data_0>...[data_n]<intr>
  1394. *
  1395. */
  1396. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
  1397. {
  1398. int ret;
  1399. struct artpec6_crypto_walk walk;
  1400. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
  1401. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  1402. struct artpec6_crypto_request_context *req_ctx = NULL;
  1403. size_t iv_len = crypto_skcipher_ivsize(cipher);
  1404. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1405. enum artpec6_crypto_variant variant = ac->variant;
  1406. struct artpec6_crypto_req_common *common;
  1407. bool cipher_decr = false;
  1408. size_t cipher_klen;
  1409. u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
  1410. u32 oper;
  1411. req_ctx = skcipher_request_ctx(areq);
  1412. common = &req_ctx->common;
  1413. artpec6_crypto_init_dma_operation(common);
  1414. if (variant == ARTPEC6_CRYPTO)
  1415. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
  1416. else
  1417. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
  1418. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1419. sizeof(ctx->key_md), false, false);
  1420. if (ret)
  1421. return ret;
  1422. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1423. ctx->key_length, true, false);
  1424. if (ret)
  1425. return ret;
  1426. req_ctx->cipher_md = 0;
  1427. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
  1428. cipher_klen = ctx->key_length/2;
  1429. else
  1430. cipher_klen = ctx->key_length;
  1431. /* Metadata */
  1432. switch (cipher_klen) {
  1433. case 16:
  1434. cipher_len = regk_crypto_key_128;
  1435. break;
  1436. case 24:
  1437. cipher_len = regk_crypto_key_192;
  1438. break;
  1439. case 32:
  1440. cipher_len = regk_crypto_key_256;
  1441. break;
  1442. default:
  1443. pr_err("%s: Invalid key length %d!\n",
  1444. MODULE_NAME, ctx->key_length);
  1445. return -EINVAL;
  1446. }
  1447. switch (ctx->crypto_type) {
  1448. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  1449. oper = regk_crypto_aes_ecb;
  1450. cipher_decr = req_ctx->decrypt;
  1451. break;
  1452. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  1453. oper = regk_crypto_aes_cbc;
  1454. cipher_decr = req_ctx->decrypt;
  1455. break;
  1456. case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
  1457. oper = regk_crypto_aes_ctr;
  1458. cipher_decr = false;
  1459. break;
  1460. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  1461. oper = regk_crypto_aes_xts;
  1462. cipher_decr = req_ctx->decrypt;
  1463. if (variant == ARTPEC6_CRYPTO)
  1464. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
  1465. else
  1466. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
  1467. break;
  1468. default:
  1469. pr_err("%s: Invalid cipher mode %d!\n",
  1470. MODULE_NAME, ctx->crypto_type);
  1471. return -EINVAL;
  1472. }
  1473. if (variant == ARTPEC6_CRYPTO) {
  1474. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
  1475. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1476. cipher_len);
  1477. if (cipher_decr)
  1478. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1479. } else {
  1480. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
  1481. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1482. cipher_len);
  1483. if (cipher_decr)
  1484. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1485. }
  1486. ret = artpec6_crypto_setup_out_descr(common,
  1487. &req_ctx->cipher_md,
  1488. sizeof(req_ctx->cipher_md),
  1489. false, false);
  1490. if (ret)
  1491. return ret;
  1492. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1493. if (ret)
  1494. return ret;
  1495. if (iv_len) {
  1496. ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
  1497. false, false);
  1498. if (ret)
  1499. return ret;
  1500. }
  1501. /* Data out */
  1502. artpec6_crypto_walk_init(&walk, areq->src);
  1503. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
  1504. if (ret)
  1505. return ret;
  1506. /* Data in */
  1507. artpec6_crypto_walk_init(&walk, areq->dst);
  1508. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
  1509. if (ret)
  1510. return ret;
  1511. /* CTR-mode padding required by the HW. */
  1512. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
  1513. ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
  1514. size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
  1515. areq->cryptlen;
  1516. if (pad) {
  1517. ret = artpec6_crypto_setup_out_descr(common,
  1518. ac->pad_buffer,
  1519. pad, false, false);
  1520. if (ret)
  1521. return ret;
  1522. ret = artpec6_crypto_setup_in_descr(common,
  1523. ac->pad_buffer, pad,
  1524. false);
  1525. if (ret)
  1526. return ret;
  1527. }
  1528. }
  1529. ret = artpec6_crypto_terminate_out_descrs(common);
  1530. if (ret)
  1531. return ret;
  1532. ret = artpec6_crypto_terminate_in_descrs(common);
  1533. if (ret)
  1534. return ret;
  1535. return artpec6_crypto_dma_map_descs(common);
  1536. }
  1537. static int artpec6_crypto_prepare_aead(struct aead_request *areq)
  1538. {
  1539. size_t count;
  1540. int ret;
  1541. size_t input_length;
  1542. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1543. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1544. struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
  1545. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1546. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1547. enum artpec6_crypto_variant variant = ac->variant;
  1548. u32 md_cipher_len;
  1549. artpec6_crypto_init_dma_operation(common);
  1550. /* Key */
  1551. if (variant == ARTPEC6_CRYPTO) {
  1552. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1553. a6_regk_crypto_dlkey);
  1554. } else {
  1555. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1556. a7_regk_crypto_dlkey);
  1557. }
  1558. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1559. sizeof(ctx->key_md), false, false);
  1560. if (ret)
  1561. return ret;
  1562. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1563. ctx->key_length, true, false);
  1564. if (ret)
  1565. return ret;
  1566. req_ctx->cipher_md = 0;
  1567. switch (ctx->key_length) {
  1568. case 16:
  1569. md_cipher_len = regk_crypto_key_128;
  1570. break;
  1571. case 24:
  1572. md_cipher_len = regk_crypto_key_192;
  1573. break;
  1574. case 32:
  1575. md_cipher_len = regk_crypto_key_256;
  1576. break;
  1577. default:
  1578. return -EINVAL;
  1579. }
  1580. if (variant == ARTPEC6_CRYPTO) {
  1581. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
  1582. regk_crypto_aes_gcm);
  1583. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1584. md_cipher_len);
  1585. if (req_ctx->decrypt)
  1586. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1587. } else {
  1588. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
  1589. regk_crypto_aes_gcm);
  1590. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1591. md_cipher_len);
  1592. if (req_ctx->decrypt)
  1593. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1594. }
  1595. ret = artpec6_crypto_setup_out_descr(common,
  1596. (void *) &req_ctx->cipher_md,
  1597. sizeof(req_ctx->cipher_md), false,
  1598. false);
  1599. if (ret)
  1600. return ret;
  1601. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1602. if (ret)
  1603. return ret;
  1604. /* For the decryption, cryptlen includes the tag. */
  1605. input_length = areq->cryptlen;
  1606. if (req_ctx->decrypt)
  1607. input_length -= AES_BLOCK_SIZE;
  1608. /* Prepare the context buffer */
  1609. req_ctx->hw_ctx.aad_length_bits =
  1610. __cpu_to_be64(8*areq->assoclen);
  1611. req_ctx->hw_ctx.text_length_bits =
  1612. __cpu_to_be64(8*input_length);
  1613. memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
  1614. // The HW omits the initial increment of the counter field.
  1615. memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
  1616. ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
  1617. sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
  1618. if (ret)
  1619. return ret;
  1620. {
  1621. struct artpec6_crypto_walk walk;
  1622. artpec6_crypto_walk_init(&walk, areq->src);
  1623. /* Associated data */
  1624. count = areq->assoclen;
  1625. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1626. if (ret)
  1627. return ret;
  1628. if (!IS_ALIGNED(areq->assoclen, 16)) {
  1629. size_t assoc_pad = 16 - (areq->assoclen % 16);
  1630. /* The HW mandates zero padding here */
  1631. ret = artpec6_crypto_setup_out_descr(common,
  1632. ac->zero_buffer,
  1633. assoc_pad, false,
  1634. false);
  1635. if (ret)
  1636. return ret;
  1637. }
  1638. /* Data to crypto */
  1639. count = input_length;
  1640. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1641. if (ret)
  1642. return ret;
  1643. if (!IS_ALIGNED(input_length, 16)) {
  1644. size_t crypto_pad = 16 - (input_length % 16);
  1645. /* The HW mandates zero padding here */
  1646. ret = artpec6_crypto_setup_out_descr(common,
  1647. ac->zero_buffer,
  1648. crypto_pad,
  1649. false,
  1650. false);
  1651. if (ret)
  1652. return ret;
  1653. }
  1654. }
  1655. /* Data from crypto */
  1656. {
  1657. struct artpec6_crypto_walk walk;
  1658. size_t output_len = areq->cryptlen;
  1659. if (req_ctx->decrypt)
  1660. output_len -= AES_BLOCK_SIZE;
  1661. artpec6_crypto_walk_init(&walk, areq->dst);
  1662. /* skip associated data in the output */
  1663. count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
  1664. if (count)
  1665. return -EINVAL;
  1666. count = output_len;
  1667. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
  1668. if (ret)
  1669. return ret;
  1670. /* Put padding between the cryptotext and the auth tag */
  1671. if (!IS_ALIGNED(output_len, 16)) {
  1672. size_t crypto_pad = 16 - (output_len % 16);
  1673. ret = artpec6_crypto_setup_in_descr(common,
  1674. ac->pad_buffer,
  1675. crypto_pad, false);
  1676. if (ret)
  1677. return ret;
  1678. }
  1679. /* The authentication tag shall follow immediately after
  1680. * the output ciphertext. For decryption it is put in a context
  1681. * buffer for later compare against the input tag.
  1682. */
  1683. count = AES_BLOCK_SIZE;
  1684. if (req_ctx->decrypt) {
  1685. ret = artpec6_crypto_setup_in_descr(common,
  1686. req_ctx->decryption_tag, count, false);
  1687. if (ret)
  1688. return ret;
  1689. } else {
  1690. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
  1691. count);
  1692. if (ret)
  1693. return ret;
  1694. }
  1695. }
  1696. ret = artpec6_crypto_terminate_in_descrs(common);
  1697. if (ret)
  1698. return ret;
  1699. ret = artpec6_crypto_terminate_out_descrs(common);
  1700. if (ret)
  1701. return ret;
  1702. return artpec6_crypto_dma_map_descs(common);
  1703. }
  1704. static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
  1705. {
  1706. struct artpec6_crypto_req_common *req;
  1707. while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
  1708. req = list_first_entry(&ac->queue,
  1709. struct artpec6_crypto_req_common,
  1710. list);
  1711. list_move_tail(&req->list, &ac->pending);
  1712. artpec6_crypto_start_dma(req);
  1713. req->req->complete(req->req, -EINPROGRESS);
  1714. }
  1715. /*
  1716. * In some cases, the hardware can raise an in_eop_flush interrupt
  1717. * before actually updating the status, so we have an timer which will
  1718. * recheck the status on timeout. Since the cases are expected to be
  1719. * very rare, we use a relatively large timeout value. There should be
  1720. * no noticeable negative effect if we timeout spuriously.
  1721. */
  1722. if (ac->pending_count)
  1723. mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
  1724. else
  1725. del_timer(&ac->timer);
  1726. }
  1727. static void artpec6_crypto_timeout(struct timer_list *t)
  1728. {
  1729. struct artpec6_crypto *ac = from_timer(ac, t, timer);
  1730. dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
  1731. tasklet_schedule(&ac->task);
  1732. }
  1733. static void artpec6_crypto_task(unsigned long data)
  1734. {
  1735. struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
  1736. struct artpec6_crypto_req_common *req;
  1737. struct artpec6_crypto_req_common *n;
  1738. if (list_empty(&ac->pending)) {
  1739. pr_debug("Spurious IRQ\n");
  1740. return;
  1741. }
  1742. spin_lock_bh(&ac->queue_lock);
  1743. list_for_each_entry_safe(req, n, &ac->pending, list) {
  1744. struct artpec6_crypto_dma_descriptors *dma = req->dma;
  1745. u32 stat;
  1746. dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
  1747. sizeof(dma->stat[0]),
  1748. DMA_BIDIRECTIONAL);
  1749. stat = req->dma->stat[req->dma->in_cnt-1];
  1750. /* A non-zero final status descriptor indicates
  1751. * this job has finished.
  1752. */
  1753. pr_debug("Request %p status is %X\n", req, stat);
  1754. if (!stat)
  1755. break;
  1756. /* Allow testing of timeout handling with fault injection */
  1757. #ifdef CONFIG_FAULT_INJECTION
  1758. if (should_fail(&artpec6_crypto_fail_status_read, 1))
  1759. continue;
  1760. #endif
  1761. pr_debug("Completing request %p\n", req);
  1762. list_del(&req->list);
  1763. artpec6_crypto_dma_unmap_all(req);
  1764. artpec6_crypto_copy_bounce_buffers(req);
  1765. ac->pending_count--;
  1766. artpec6_crypto_common_destroy(req);
  1767. req->complete(req->req);
  1768. }
  1769. artpec6_crypto_process_queue(ac);
  1770. spin_unlock_bh(&ac->queue_lock);
  1771. }
  1772. static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
  1773. {
  1774. req->complete(req, 0);
  1775. }
  1776. static void
  1777. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
  1778. {
  1779. struct skcipher_request *cipher_req = container_of(req,
  1780. struct skcipher_request, base);
  1781. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
  1782. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1783. AES_BLOCK_SIZE, 0);
  1784. req->complete(req, 0);
  1785. }
  1786. static void
  1787. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
  1788. {
  1789. struct skcipher_request *cipher_req = container_of(req,
  1790. struct skcipher_request, base);
  1791. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
  1792. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1793. AES_BLOCK_SIZE, 0);
  1794. req->complete(req, 0);
  1795. }
  1796. static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
  1797. {
  1798. int result = 0;
  1799. /* Verify GCM hashtag. */
  1800. struct aead_request *areq = container_of(req,
  1801. struct aead_request, base);
  1802. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1803. if (req_ctx->decrypt) {
  1804. u8 input_tag[AES_BLOCK_SIZE];
  1805. sg_pcopy_to_buffer(areq->src,
  1806. sg_nents(areq->src),
  1807. input_tag,
  1808. AES_BLOCK_SIZE,
  1809. areq->assoclen + areq->cryptlen -
  1810. AES_BLOCK_SIZE);
  1811. if (memcmp(req_ctx->decryption_tag,
  1812. input_tag,
  1813. AES_BLOCK_SIZE)) {
  1814. pr_debug("***EBADMSG:\n");
  1815. print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
  1816. input_tag, AES_BLOCK_SIZE, true);
  1817. print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
  1818. req_ctx->decryption_tag,
  1819. AES_BLOCK_SIZE, true);
  1820. result = -EBADMSG;
  1821. }
  1822. }
  1823. req->complete(req, result);
  1824. }
  1825. static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
  1826. {
  1827. req->complete(req, 0);
  1828. }
  1829. /*------------------- Hash functions -----------------------------------------*/
  1830. static int
  1831. artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
  1832. const u8 *key, unsigned int keylen)
  1833. {
  1834. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
  1835. size_t blocksize;
  1836. int ret;
  1837. if (!keylen) {
  1838. pr_err("Invalid length (%d) of HMAC key\n",
  1839. keylen);
  1840. return -EINVAL;
  1841. }
  1842. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  1843. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1844. if (keylen > blocksize) {
  1845. SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
  1846. hdesc->tfm = tfm_ctx->child_hash;
  1847. hdesc->flags = crypto_ahash_get_flags(tfm) &
  1848. CRYPTO_TFM_REQ_MAY_SLEEP;
  1849. tfm_ctx->hmac_key_length = blocksize;
  1850. ret = crypto_shash_digest(hdesc, key, keylen,
  1851. tfm_ctx->hmac_key);
  1852. if (ret)
  1853. return ret;
  1854. } else {
  1855. memcpy(tfm_ctx->hmac_key, key, keylen);
  1856. tfm_ctx->hmac_key_length = keylen;
  1857. }
  1858. return 0;
  1859. }
  1860. static int
  1861. artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
  1862. {
  1863. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1864. enum artpec6_crypto_variant variant = ac->variant;
  1865. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1866. u32 oper;
  1867. memset(req_ctx, 0, sizeof(*req_ctx));
  1868. req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
  1869. if (hmac)
  1870. req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
  1871. switch (type) {
  1872. case ARTPEC6_CRYPTO_HASH_SHA1:
  1873. oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
  1874. break;
  1875. case ARTPEC6_CRYPTO_HASH_SHA256:
  1876. oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
  1877. break;
  1878. case ARTPEC6_CRYPTO_HASH_SHA384:
  1879. oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
  1880. break;
  1881. case ARTPEC6_CRYPTO_HASH_SHA512:
  1882. oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
  1883. break;
  1884. default:
  1885. pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
  1886. return -EINVAL;
  1887. }
  1888. if (variant == ARTPEC6_CRYPTO)
  1889. req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
  1890. else
  1891. req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
  1892. return 0;
  1893. }
  1894. static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
  1895. {
  1896. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1897. int ret;
  1898. if (!req_ctx->common.dma) {
  1899. ret = artpec6_crypto_common_init(&req_ctx->common,
  1900. &req->base,
  1901. artpec6_crypto_complete_hash,
  1902. NULL, 0);
  1903. if (ret)
  1904. return ret;
  1905. }
  1906. ret = artpec6_crypto_prepare_hash(req);
  1907. switch (ret) {
  1908. case ARTPEC6_CRYPTO_PREPARE_HASH_START:
  1909. ret = artpec6_crypto_submit(&req_ctx->common);
  1910. break;
  1911. case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
  1912. ret = 0;
  1913. /* Fallthrough */
  1914. default:
  1915. artpec6_crypto_common_destroy(&req_ctx->common);
  1916. break;
  1917. }
  1918. return ret;
  1919. }
  1920. static int artpec6_crypto_hash_final(struct ahash_request *req)
  1921. {
  1922. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1923. req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
  1924. return artpec6_crypto_prepare_submit_hash(req);
  1925. }
  1926. static int artpec6_crypto_hash_update(struct ahash_request *req)
  1927. {
  1928. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1929. req_ctx->hash_flags |= HASH_FLAG_UPDATE;
  1930. return artpec6_crypto_prepare_submit_hash(req);
  1931. }
  1932. static int artpec6_crypto_sha1_init(struct ahash_request *req)
  1933. {
  1934. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1935. }
  1936. static int artpec6_crypto_sha1_digest(struct ahash_request *req)
  1937. {
  1938. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1939. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1940. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1941. return artpec6_crypto_prepare_submit_hash(req);
  1942. }
  1943. static int artpec6_crypto_sha256_init(struct ahash_request *req)
  1944. {
  1945. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1946. }
  1947. static int artpec6_crypto_sha256_digest(struct ahash_request *req)
  1948. {
  1949. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1950. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1951. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1952. return artpec6_crypto_prepare_submit_hash(req);
  1953. }
  1954. static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
  1955. {
  1956. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
  1957. }
  1958. static int __maybe_unused
  1959. artpec6_crypto_sha384_digest(struct ahash_request *req)
  1960. {
  1961. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1962. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
  1963. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1964. return artpec6_crypto_prepare_submit_hash(req);
  1965. }
  1966. static int artpec6_crypto_sha512_init(struct ahash_request *req)
  1967. {
  1968. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
  1969. }
  1970. static int artpec6_crypto_sha512_digest(struct ahash_request *req)
  1971. {
  1972. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1973. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
  1974. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1975. return artpec6_crypto_prepare_submit_hash(req);
  1976. }
  1977. static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
  1978. {
  1979. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1980. }
  1981. static int __maybe_unused
  1982. artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
  1983. {
  1984. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
  1985. }
  1986. static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
  1987. {
  1988. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
  1989. }
  1990. static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
  1991. {
  1992. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1993. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1994. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1995. return artpec6_crypto_prepare_submit_hash(req);
  1996. }
  1997. static int __maybe_unused
  1998. artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
  1999. {
  2000. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2001. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
  2002. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2003. return artpec6_crypto_prepare_submit_hash(req);
  2004. }
  2005. static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
  2006. {
  2007. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2008. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
  2009. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2010. return artpec6_crypto_prepare_submit_hash(req);
  2011. }
  2012. static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
  2013. const char *base_hash_name)
  2014. {
  2015. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  2016. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  2017. sizeof(struct artpec6_hash_request_context));
  2018. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  2019. if (base_hash_name) {
  2020. struct crypto_shash *child;
  2021. child = crypto_alloc_shash(base_hash_name, 0,
  2022. CRYPTO_ALG_NEED_FALLBACK);
  2023. if (IS_ERR(child))
  2024. return PTR_ERR(child);
  2025. tfm_ctx->child_hash = child;
  2026. }
  2027. return 0;
  2028. }
  2029. static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
  2030. {
  2031. return artpec6_crypto_ahash_init_common(tfm, NULL);
  2032. }
  2033. static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
  2034. {
  2035. return artpec6_crypto_ahash_init_common(tfm, "sha256");
  2036. }
  2037. static int __maybe_unused
  2038. artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
  2039. {
  2040. return artpec6_crypto_ahash_init_common(tfm, "sha384");
  2041. }
  2042. static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
  2043. {
  2044. return artpec6_crypto_ahash_init_common(tfm, "sha512");
  2045. }
  2046. static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
  2047. {
  2048. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  2049. if (tfm_ctx->child_hash)
  2050. crypto_free_shash(tfm_ctx->child_hash);
  2051. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  2052. tfm_ctx->hmac_key_length = 0;
  2053. }
  2054. static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
  2055. {
  2056. const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2057. struct artpec6_hash_export_state *state = out;
  2058. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2059. enum artpec6_crypto_variant variant = ac->variant;
  2060. BUILD_BUG_ON(sizeof(state->partial_buffer) !=
  2061. sizeof(ctx->partial_buffer));
  2062. BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
  2063. state->digcnt = ctx->digcnt;
  2064. state->partial_bytes = ctx->partial_bytes;
  2065. state->hash_flags = ctx->hash_flags;
  2066. if (variant == ARTPEC6_CRYPTO)
  2067. state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
  2068. else
  2069. state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
  2070. memcpy(state->partial_buffer, ctx->partial_buffer,
  2071. sizeof(state->partial_buffer));
  2072. memcpy(state->digeststate, ctx->digeststate,
  2073. sizeof(state->digeststate));
  2074. return 0;
  2075. }
  2076. static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
  2077. {
  2078. struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2079. const struct artpec6_hash_export_state *state = in;
  2080. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2081. enum artpec6_crypto_variant variant = ac->variant;
  2082. memset(ctx, 0, sizeof(*ctx));
  2083. ctx->digcnt = state->digcnt;
  2084. ctx->partial_bytes = state->partial_bytes;
  2085. ctx->hash_flags = state->hash_flags;
  2086. if (variant == ARTPEC6_CRYPTO)
  2087. ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
  2088. else
  2089. ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
  2090. memcpy(ctx->partial_buffer, state->partial_buffer,
  2091. sizeof(state->partial_buffer));
  2092. memcpy(ctx->digeststate, state->digeststate,
  2093. sizeof(state->digeststate));
  2094. return 0;
  2095. }
  2096. static int init_crypto_hw(struct artpec6_crypto *ac)
  2097. {
  2098. enum artpec6_crypto_variant variant = ac->variant;
  2099. void __iomem *base = ac->base;
  2100. u32 out_descr_buf_size;
  2101. u32 out_data_buf_size;
  2102. u32 in_data_buf_size;
  2103. u32 in_descr_buf_size;
  2104. u32 in_stat_buf_size;
  2105. u32 in, out;
  2106. /*
  2107. * The PDMA unit contains 1984 bytes of internal memory for the OUT
  2108. * channels and 1024 bytes for the IN channel. This is an elastic
  2109. * memory used to internally store the descriptors and data. The values
  2110. * ares specified in 64 byte incremements. Trustzone buffers are not
  2111. * used at this stage.
  2112. */
  2113. out_data_buf_size = 16; /* 1024 bytes for data */
  2114. out_descr_buf_size = 15; /* 960 bytes for descriptors */
  2115. in_data_buf_size = 8; /* 512 bytes for data */
  2116. in_descr_buf_size = 4; /* 256 bytes for descriptors */
  2117. in_stat_buf_size = 4; /* 256 bytes for stat descrs */
  2118. BUILD_BUG_ON_MSG((out_data_buf_size
  2119. + out_descr_buf_size) * 64 > 1984,
  2120. "Invalid OUT configuration");
  2121. BUILD_BUG_ON_MSG((in_data_buf_size
  2122. + in_descr_buf_size
  2123. + in_stat_buf_size) * 64 > 1024,
  2124. "Invalid IN configuration");
  2125. in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
  2126. FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
  2127. FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
  2128. out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
  2129. FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
  2130. writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
  2131. writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
  2132. if (variant == ARTPEC6_CRYPTO) {
  2133. writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
  2134. writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
  2135. writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
  2136. A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2137. base + A6_PDMA_INTR_MASK);
  2138. } else {
  2139. writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
  2140. writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
  2141. writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
  2142. A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2143. base + A7_PDMA_INTR_MASK);
  2144. }
  2145. return 0;
  2146. }
  2147. static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
  2148. {
  2149. enum artpec6_crypto_variant variant = ac->variant;
  2150. void __iomem *base = ac->base;
  2151. if (variant == ARTPEC6_CRYPTO) {
  2152. writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
  2153. writel_relaxed(0, base + A6_PDMA_IN_CFG);
  2154. writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2155. } else {
  2156. writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
  2157. writel_relaxed(0, base + A7_PDMA_IN_CFG);
  2158. writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2159. }
  2160. writel_relaxed(0, base + PDMA_OUT_CFG);
  2161. }
  2162. static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
  2163. {
  2164. struct artpec6_crypto *ac = dev_id;
  2165. enum artpec6_crypto_variant variant = ac->variant;
  2166. void __iomem *base = ac->base;
  2167. u32 mask_in_data, mask_in_eop_flush;
  2168. u32 in_cmd_flush_stat, in_cmd_reg;
  2169. u32 ack_intr_reg;
  2170. u32 ack = 0;
  2171. u32 intr;
  2172. if (variant == ARTPEC6_CRYPTO) {
  2173. intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
  2174. mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
  2175. mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2176. in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
  2177. in_cmd_reg = A6_PDMA_IN_CMD;
  2178. ack_intr_reg = A6_PDMA_ACK_INTR;
  2179. } else {
  2180. intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
  2181. mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
  2182. mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2183. in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
  2184. in_cmd_reg = A7_PDMA_IN_CMD;
  2185. ack_intr_reg = A7_PDMA_ACK_INTR;
  2186. }
  2187. /* We get two interrupt notifications from each job.
  2188. * The in_data means all data was sent to memory and then
  2189. * we request a status flush command to write the per-job
  2190. * status to its status vector. This ensures that the
  2191. * tasklet can detect exactly how many submitted jobs
  2192. * that have finished.
  2193. */
  2194. if (intr & mask_in_data)
  2195. ack |= mask_in_data;
  2196. if (intr & mask_in_eop_flush)
  2197. ack |= mask_in_eop_flush;
  2198. else
  2199. writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
  2200. writel_relaxed(ack, base + ack_intr_reg);
  2201. if (intr & mask_in_eop_flush)
  2202. tasklet_schedule(&ac->task);
  2203. return IRQ_HANDLED;
  2204. }
  2205. /*------------------- Algorithm definitions ----------------------------------*/
  2206. /* Hashes */
  2207. static struct ahash_alg hash_algos[] = {
  2208. /* SHA-1 */
  2209. {
  2210. .init = artpec6_crypto_sha1_init,
  2211. .update = artpec6_crypto_hash_update,
  2212. .final = artpec6_crypto_hash_final,
  2213. .digest = artpec6_crypto_sha1_digest,
  2214. .import = artpec6_crypto_hash_import,
  2215. .export = artpec6_crypto_hash_export,
  2216. .halg.digestsize = SHA1_DIGEST_SIZE,
  2217. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2218. .halg.base = {
  2219. .cra_name = "sha1",
  2220. .cra_driver_name = "artpec-sha1",
  2221. .cra_priority = 300,
  2222. .cra_flags = CRYPTO_ALG_ASYNC,
  2223. .cra_blocksize = SHA1_BLOCK_SIZE,
  2224. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2225. .cra_alignmask = 3,
  2226. .cra_module = THIS_MODULE,
  2227. .cra_init = artpec6_crypto_ahash_init,
  2228. .cra_exit = artpec6_crypto_ahash_exit,
  2229. }
  2230. },
  2231. /* SHA-256 */
  2232. {
  2233. .init = artpec6_crypto_sha256_init,
  2234. .update = artpec6_crypto_hash_update,
  2235. .final = artpec6_crypto_hash_final,
  2236. .digest = artpec6_crypto_sha256_digest,
  2237. .import = artpec6_crypto_hash_import,
  2238. .export = artpec6_crypto_hash_export,
  2239. .halg.digestsize = SHA256_DIGEST_SIZE,
  2240. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2241. .halg.base = {
  2242. .cra_name = "sha256",
  2243. .cra_driver_name = "artpec-sha256",
  2244. .cra_priority = 300,
  2245. .cra_flags = CRYPTO_ALG_ASYNC,
  2246. .cra_blocksize = SHA256_BLOCK_SIZE,
  2247. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2248. .cra_alignmask = 3,
  2249. .cra_module = THIS_MODULE,
  2250. .cra_init = artpec6_crypto_ahash_init,
  2251. .cra_exit = artpec6_crypto_ahash_exit,
  2252. }
  2253. },
  2254. /* HMAC SHA-256 */
  2255. {
  2256. .init = artpec6_crypto_hmac_sha256_init,
  2257. .update = artpec6_crypto_hash_update,
  2258. .final = artpec6_crypto_hash_final,
  2259. .digest = artpec6_crypto_hmac_sha256_digest,
  2260. .import = artpec6_crypto_hash_import,
  2261. .export = artpec6_crypto_hash_export,
  2262. .setkey = artpec6_crypto_hash_set_key,
  2263. .halg.digestsize = SHA256_DIGEST_SIZE,
  2264. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2265. .halg.base = {
  2266. .cra_name = "hmac(sha256)",
  2267. .cra_driver_name = "artpec-hmac-sha256",
  2268. .cra_priority = 300,
  2269. .cra_flags = CRYPTO_ALG_ASYNC,
  2270. .cra_blocksize = SHA256_BLOCK_SIZE,
  2271. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2272. .cra_alignmask = 3,
  2273. .cra_module = THIS_MODULE,
  2274. .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
  2275. .cra_exit = artpec6_crypto_ahash_exit,
  2276. }
  2277. },
  2278. };
  2279. static struct ahash_alg artpec7_hash_algos[] = {
  2280. /* SHA-384 */
  2281. {
  2282. .init = artpec6_crypto_sha384_init,
  2283. .update = artpec6_crypto_hash_update,
  2284. .final = artpec6_crypto_hash_final,
  2285. .digest = artpec6_crypto_sha384_digest,
  2286. .import = artpec6_crypto_hash_import,
  2287. .export = artpec6_crypto_hash_export,
  2288. .halg.digestsize = SHA384_DIGEST_SIZE,
  2289. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2290. .halg.base = {
  2291. .cra_name = "sha384",
  2292. .cra_driver_name = "artpec-sha384",
  2293. .cra_priority = 300,
  2294. .cra_flags = CRYPTO_ALG_ASYNC,
  2295. .cra_blocksize = SHA384_BLOCK_SIZE,
  2296. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2297. .cra_alignmask = 3,
  2298. .cra_module = THIS_MODULE,
  2299. .cra_init = artpec6_crypto_ahash_init,
  2300. .cra_exit = artpec6_crypto_ahash_exit,
  2301. }
  2302. },
  2303. /* HMAC SHA-384 */
  2304. {
  2305. .init = artpec6_crypto_hmac_sha384_init,
  2306. .update = artpec6_crypto_hash_update,
  2307. .final = artpec6_crypto_hash_final,
  2308. .digest = artpec6_crypto_hmac_sha384_digest,
  2309. .import = artpec6_crypto_hash_import,
  2310. .export = artpec6_crypto_hash_export,
  2311. .setkey = artpec6_crypto_hash_set_key,
  2312. .halg.digestsize = SHA384_DIGEST_SIZE,
  2313. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2314. .halg.base = {
  2315. .cra_name = "hmac(sha384)",
  2316. .cra_driver_name = "artpec-hmac-sha384",
  2317. .cra_priority = 300,
  2318. .cra_flags = CRYPTO_ALG_ASYNC,
  2319. .cra_blocksize = SHA384_BLOCK_SIZE,
  2320. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2321. .cra_alignmask = 3,
  2322. .cra_module = THIS_MODULE,
  2323. .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
  2324. .cra_exit = artpec6_crypto_ahash_exit,
  2325. }
  2326. },
  2327. /* SHA-512 */
  2328. {
  2329. .init = artpec6_crypto_sha512_init,
  2330. .update = artpec6_crypto_hash_update,
  2331. .final = artpec6_crypto_hash_final,
  2332. .digest = artpec6_crypto_sha512_digest,
  2333. .import = artpec6_crypto_hash_import,
  2334. .export = artpec6_crypto_hash_export,
  2335. .halg.digestsize = SHA512_DIGEST_SIZE,
  2336. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2337. .halg.base = {
  2338. .cra_name = "sha512",
  2339. .cra_driver_name = "artpec-sha512",
  2340. .cra_priority = 300,
  2341. .cra_flags = CRYPTO_ALG_ASYNC,
  2342. .cra_blocksize = SHA512_BLOCK_SIZE,
  2343. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2344. .cra_alignmask = 3,
  2345. .cra_module = THIS_MODULE,
  2346. .cra_init = artpec6_crypto_ahash_init,
  2347. .cra_exit = artpec6_crypto_ahash_exit,
  2348. }
  2349. },
  2350. /* HMAC SHA-512 */
  2351. {
  2352. .init = artpec6_crypto_hmac_sha512_init,
  2353. .update = artpec6_crypto_hash_update,
  2354. .final = artpec6_crypto_hash_final,
  2355. .digest = artpec6_crypto_hmac_sha512_digest,
  2356. .import = artpec6_crypto_hash_import,
  2357. .export = artpec6_crypto_hash_export,
  2358. .setkey = artpec6_crypto_hash_set_key,
  2359. .halg.digestsize = SHA512_DIGEST_SIZE,
  2360. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2361. .halg.base = {
  2362. .cra_name = "hmac(sha512)",
  2363. .cra_driver_name = "artpec-hmac-sha512",
  2364. .cra_priority = 300,
  2365. .cra_flags = CRYPTO_ALG_ASYNC,
  2366. .cra_blocksize = SHA512_BLOCK_SIZE,
  2367. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2368. .cra_alignmask = 3,
  2369. .cra_module = THIS_MODULE,
  2370. .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
  2371. .cra_exit = artpec6_crypto_ahash_exit,
  2372. }
  2373. },
  2374. };
  2375. /* Crypto */
  2376. static struct skcipher_alg crypto_algos[] = {
  2377. /* AES - ECB */
  2378. {
  2379. .base = {
  2380. .cra_name = "ecb(aes)",
  2381. .cra_driver_name = "artpec6-ecb-aes",
  2382. .cra_priority = 300,
  2383. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  2384. CRYPTO_ALG_ASYNC,
  2385. .cra_blocksize = AES_BLOCK_SIZE,
  2386. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2387. .cra_alignmask = 3,
  2388. .cra_module = THIS_MODULE,
  2389. },
  2390. .min_keysize = AES_MIN_KEY_SIZE,
  2391. .max_keysize = AES_MAX_KEY_SIZE,
  2392. .setkey = artpec6_crypto_cipher_set_key,
  2393. .encrypt = artpec6_crypto_encrypt,
  2394. .decrypt = artpec6_crypto_decrypt,
  2395. .init = artpec6_crypto_aes_ecb_init,
  2396. .exit = artpec6_crypto_aes_exit,
  2397. },
  2398. /* AES - CTR */
  2399. {
  2400. .base = {
  2401. .cra_name = "ctr(aes)",
  2402. .cra_driver_name = "artpec6-ctr-aes",
  2403. .cra_priority = 300,
  2404. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  2405. CRYPTO_ALG_ASYNC |
  2406. CRYPTO_ALG_NEED_FALLBACK,
  2407. .cra_blocksize = 1,
  2408. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2409. .cra_alignmask = 3,
  2410. .cra_module = THIS_MODULE,
  2411. },
  2412. .min_keysize = AES_MIN_KEY_SIZE,
  2413. .max_keysize = AES_MAX_KEY_SIZE,
  2414. .ivsize = AES_BLOCK_SIZE,
  2415. .setkey = artpec6_crypto_cipher_set_key,
  2416. .encrypt = artpec6_crypto_ctr_encrypt,
  2417. .decrypt = artpec6_crypto_ctr_decrypt,
  2418. .init = artpec6_crypto_aes_ctr_init,
  2419. .exit = artpec6_crypto_aes_ctr_exit,
  2420. },
  2421. /* AES - CBC */
  2422. {
  2423. .base = {
  2424. .cra_name = "cbc(aes)",
  2425. .cra_driver_name = "artpec6-cbc-aes",
  2426. .cra_priority = 300,
  2427. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  2428. CRYPTO_ALG_ASYNC,
  2429. .cra_blocksize = AES_BLOCK_SIZE,
  2430. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2431. .cra_alignmask = 3,
  2432. .cra_module = THIS_MODULE,
  2433. },
  2434. .min_keysize = AES_MIN_KEY_SIZE,
  2435. .max_keysize = AES_MAX_KEY_SIZE,
  2436. .ivsize = AES_BLOCK_SIZE,
  2437. .setkey = artpec6_crypto_cipher_set_key,
  2438. .encrypt = artpec6_crypto_encrypt,
  2439. .decrypt = artpec6_crypto_decrypt,
  2440. .init = artpec6_crypto_aes_cbc_init,
  2441. .exit = artpec6_crypto_aes_exit
  2442. },
  2443. /* AES - XTS */
  2444. {
  2445. .base = {
  2446. .cra_name = "xts(aes)",
  2447. .cra_driver_name = "artpec6-xts-aes",
  2448. .cra_priority = 300,
  2449. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  2450. CRYPTO_ALG_ASYNC,
  2451. .cra_blocksize = 1,
  2452. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2453. .cra_alignmask = 3,
  2454. .cra_module = THIS_MODULE,
  2455. },
  2456. .min_keysize = 2*AES_MIN_KEY_SIZE,
  2457. .max_keysize = 2*AES_MAX_KEY_SIZE,
  2458. .ivsize = 16,
  2459. .setkey = artpec6_crypto_xts_set_key,
  2460. .encrypt = artpec6_crypto_encrypt,
  2461. .decrypt = artpec6_crypto_decrypt,
  2462. .init = artpec6_crypto_aes_xts_init,
  2463. .exit = artpec6_crypto_aes_exit,
  2464. },
  2465. };
  2466. static struct aead_alg aead_algos[] = {
  2467. {
  2468. .init = artpec6_crypto_aead_init,
  2469. .setkey = artpec6_crypto_aead_set_key,
  2470. .encrypt = artpec6_crypto_aead_encrypt,
  2471. .decrypt = artpec6_crypto_aead_decrypt,
  2472. .ivsize = GCM_AES_IV_SIZE,
  2473. .maxauthsize = AES_BLOCK_SIZE,
  2474. .base = {
  2475. .cra_name = "gcm(aes)",
  2476. .cra_driver_name = "artpec-gcm-aes",
  2477. .cra_priority = 300,
  2478. .cra_flags = CRYPTO_ALG_ASYNC |
  2479. CRYPTO_ALG_KERN_DRIVER_ONLY,
  2480. .cra_blocksize = 1,
  2481. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2482. .cra_alignmask = 3,
  2483. .cra_module = THIS_MODULE,
  2484. },
  2485. }
  2486. };
  2487. #ifdef CONFIG_DEBUG_FS
  2488. struct dbgfs_u32 {
  2489. char *name;
  2490. mode_t mode;
  2491. u32 *flag;
  2492. char *desc;
  2493. };
  2494. static struct dentry *dbgfs_root;
  2495. static void artpec6_crypto_init_debugfs(void)
  2496. {
  2497. dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
  2498. if (!dbgfs_root || IS_ERR(dbgfs_root)) {
  2499. dbgfs_root = NULL;
  2500. pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
  2501. return;
  2502. }
  2503. #ifdef CONFIG_FAULT_INJECTION
  2504. fault_create_debugfs_attr("fail_status_read", dbgfs_root,
  2505. &artpec6_crypto_fail_status_read);
  2506. fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
  2507. &artpec6_crypto_fail_dma_array_full);
  2508. #endif
  2509. }
  2510. static void artpec6_crypto_free_debugfs(void)
  2511. {
  2512. if (!dbgfs_root)
  2513. return;
  2514. debugfs_remove_recursive(dbgfs_root);
  2515. dbgfs_root = NULL;
  2516. }
  2517. #endif
  2518. static const struct of_device_id artpec6_crypto_of_match[] = {
  2519. { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
  2520. { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
  2521. {}
  2522. };
  2523. MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
  2524. static int artpec6_crypto_probe(struct platform_device *pdev)
  2525. {
  2526. const struct of_device_id *match;
  2527. enum artpec6_crypto_variant variant;
  2528. struct artpec6_crypto *ac;
  2529. struct device *dev = &pdev->dev;
  2530. void __iomem *base;
  2531. struct resource *res;
  2532. int irq;
  2533. int err;
  2534. if (artpec6_crypto_dev)
  2535. return -ENODEV;
  2536. match = of_match_node(artpec6_crypto_of_match, dev->of_node);
  2537. if (!match)
  2538. return -EINVAL;
  2539. variant = (enum artpec6_crypto_variant)match->data;
  2540. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2541. base = devm_ioremap_resource(&pdev->dev, res);
  2542. if (IS_ERR(base))
  2543. return PTR_ERR(base);
  2544. irq = platform_get_irq(pdev, 0);
  2545. if (irq < 0)
  2546. return -ENODEV;
  2547. ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
  2548. GFP_KERNEL);
  2549. if (!ac)
  2550. return -ENOMEM;
  2551. platform_set_drvdata(pdev, ac);
  2552. ac->variant = variant;
  2553. spin_lock_init(&ac->queue_lock);
  2554. INIT_LIST_HEAD(&ac->queue);
  2555. INIT_LIST_HEAD(&ac->pending);
  2556. timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
  2557. ac->base = base;
  2558. ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
  2559. sizeof(struct artpec6_crypto_dma_descriptors),
  2560. 64,
  2561. 0,
  2562. NULL);
  2563. if (!ac->dma_cache)
  2564. return -ENOMEM;
  2565. #ifdef CONFIG_DEBUG_FS
  2566. artpec6_crypto_init_debugfs();
  2567. #endif
  2568. tasklet_init(&ac->task, artpec6_crypto_task,
  2569. (unsigned long)ac);
  2570. ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2571. GFP_KERNEL);
  2572. if (!ac->pad_buffer)
  2573. return -ENOMEM;
  2574. ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
  2575. ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2576. GFP_KERNEL);
  2577. if (!ac->zero_buffer)
  2578. return -ENOMEM;
  2579. ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
  2580. err = init_crypto_hw(ac);
  2581. if (err)
  2582. goto free_cache;
  2583. err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
  2584. "artpec6-crypto", ac);
  2585. if (err)
  2586. goto disable_hw;
  2587. artpec6_crypto_dev = &pdev->dev;
  2588. err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2589. if (err) {
  2590. dev_err(dev, "Failed to register ahashes\n");
  2591. goto disable_hw;
  2592. }
  2593. if (variant != ARTPEC6_CRYPTO) {
  2594. err = crypto_register_ahashes(artpec7_hash_algos,
  2595. ARRAY_SIZE(artpec7_hash_algos));
  2596. if (err) {
  2597. dev_err(dev, "Failed to register ahashes\n");
  2598. goto unregister_ahashes;
  2599. }
  2600. }
  2601. err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2602. if (err) {
  2603. dev_err(dev, "Failed to register ciphers\n");
  2604. goto unregister_a7_ahashes;
  2605. }
  2606. err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2607. if (err) {
  2608. dev_err(dev, "Failed to register aeads\n");
  2609. goto unregister_algs;
  2610. }
  2611. return 0;
  2612. unregister_algs:
  2613. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2614. unregister_a7_ahashes:
  2615. if (variant != ARTPEC6_CRYPTO)
  2616. crypto_unregister_ahashes(artpec7_hash_algos,
  2617. ARRAY_SIZE(artpec7_hash_algos));
  2618. unregister_ahashes:
  2619. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2620. disable_hw:
  2621. artpec6_crypto_disable_hw(ac);
  2622. free_cache:
  2623. kmem_cache_destroy(ac->dma_cache);
  2624. return err;
  2625. }
  2626. static int artpec6_crypto_remove(struct platform_device *pdev)
  2627. {
  2628. struct artpec6_crypto *ac = platform_get_drvdata(pdev);
  2629. int irq = platform_get_irq(pdev, 0);
  2630. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2631. if (ac->variant != ARTPEC6_CRYPTO)
  2632. crypto_unregister_ahashes(artpec7_hash_algos,
  2633. ARRAY_SIZE(artpec7_hash_algos));
  2634. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2635. crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2636. tasklet_disable(&ac->task);
  2637. devm_free_irq(&pdev->dev, irq, ac);
  2638. tasklet_kill(&ac->task);
  2639. del_timer_sync(&ac->timer);
  2640. artpec6_crypto_disable_hw(ac);
  2641. kmem_cache_destroy(ac->dma_cache);
  2642. #ifdef CONFIG_DEBUG_FS
  2643. artpec6_crypto_free_debugfs();
  2644. #endif
  2645. return 0;
  2646. }
  2647. static struct platform_driver artpec6_crypto_driver = {
  2648. .probe = artpec6_crypto_probe,
  2649. .remove = artpec6_crypto_remove,
  2650. .driver = {
  2651. .name = "artpec6-crypto",
  2652. .owner = THIS_MODULE,
  2653. .of_match_table = artpec6_crypto_of_match,
  2654. },
  2655. };
  2656. module_platform_driver(artpec6_crypto_driver);
  2657. MODULE_AUTHOR("Axis Communications AB");
  2658. MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
  2659. MODULE_LICENSE("GPL");