artpec6_crypto.c 83 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186
  1. /*
  2. * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
  3. *
  4. * Copyright (C) 2014-2017 Axis Communications AB
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/bitfield.h>
  8. #include <linux/crypto.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/fault-inject.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel.h>
  16. #include <linux/list.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/slab.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/gcm.h>
  24. #include <crypto/internal/aead.h>
  25. #include <crypto/internal/hash.h>
  26. #include <crypto/internal/skcipher.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/sha.h>
  29. #include <crypto/xts.h>
  30. /* Max length of a line in all cache levels for Artpec SoCs. */
  31. #define ARTPEC_CACHE_LINE_MAX 32
  32. #define PDMA_OUT_CFG 0x0000
  33. #define PDMA_OUT_BUF_CFG 0x0004
  34. #define PDMA_OUT_CMD 0x0008
  35. #define PDMA_OUT_DESCRQ_PUSH 0x0010
  36. #define PDMA_OUT_DESCRQ_STAT 0x0014
  37. #define A6_PDMA_IN_CFG 0x0028
  38. #define A6_PDMA_IN_BUF_CFG 0x002c
  39. #define A6_PDMA_IN_CMD 0x0030
  40. #define A6_PDMA_IN_STATQ_PUSH 0x0038
  41. #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
  42. #define A6_PDMA_IN_DESCRQ_STAT 0x0048
  43. #define A6_PDMA_INTR_MASK 0x0068
  44. #define A6_PDMA_ACK_INTR 0x006c
  45. #define A6_PDMA_MASKED_INTR 0x0074
  46. #define A7_PDMA_IN_CFG 0x002c
  47. #define A7_PDMA_IN_BUF_CFG 0x0030
  48. #define A7_PDMA_IN_CMD 0x0034
  49. #define A7_PDMA_IN_STATQ_PUSH 0x003c
  50. #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
  51. #define A7_PDMA_IN_DESCRQ_STAT 0x004C
  52. #define A7_PDMA_INTR_MASK 0x006c
  53. #define A7_PDMA_ACK_INTR 0x0070
  54. #define A7_PDMA_MASKED_INTR 0x0078
  55. #define PDMA_OUT_CFG_EN BIT(0)
  56. #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  57. #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  58. #define PDMA_OUT_CMD_START BIT(0)
  59. #define A6_PDMA_OUT_CMD_STOP BIT(3)
  60. #define A7_PDMA_OUT_CMD_STOP BIT(2)
  61. #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
  62. #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  63. #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  64. #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
  65. #define PDMA_IN_CFG_EN BIT(0)
  66. #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  67. #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  68. #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
  69. #define PDMA_IN_CMD_START BIT(0)
  70. #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
  71. #define A6_PDMA_IN_CMD_STOP BIT(3)
  72. #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
  73. #define A7_PDMA_IN_CMD_STOP BIT(2)
  74. #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
  75. #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
  76. #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
  77. #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  78. #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  79. #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
  80. #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
  81. #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
  82. #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
  83. #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
  84. #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
  85. #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
  86. #define A6_CRY_MD_OPER GENMASK(19, 16)
  87. #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
  88. #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
  89. #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
  90. #define A6_CRY_MD_CIPHER_DECR BIT(22)
  91. #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
  92. #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
  93. #define A7_CRY_MD_OPER GENMASK(11, 8)
  94. #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
  95. #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
  96. #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
  97. #define A7_CRY_MD_CIPHER_DECR BIT(14)
  98. #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
  99. #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
  100. /* DMA metadata constants */
  101. #define regk_crypto_aes_cbc 0x00000002
  102. #define regk_crypto_aes_ctr 0x00000003
  103. #define regk_crypto_aes_ecb 0x00000001
  104. #define regk_crypto_aes_gcm 0x00000004
  105. #define regk_crypto_aes_xts 0x00000005
  106. #define regk_crypto_cache 0x00000002
  107. #define a6_regk_crypto_dlkey 0x0000000a
  108. #define a7_regk_crypto_dlkey 0x0000000e
  109. #define regk_crypto_ext 0x00000001
  110. #define regk_crypto_hmac_sha1 0x00000007
  111. #define regk_crypto_hmac_sha256 0x00000009
  112. #define regk_crypto_hmac_sha384 0x0000000b
  113. #define regk_crypto_hmac_sha512 0x0000000d
  114. #define regk_crypto_init 0x00000000
  115. #define regk_crypto_key_128 0x00000000
  116. #define regk_crypto_key_192 0x00000001
  117. #define regk_crypto_key_256 0x00000002
  118. #define regk_crypto_null 0x00000000
  119. #define regk_crypto_sha1 0x00000006
  120. #define regk_crypto_sha256 0x00000008
  121. #define regk_crypto_sha384 0x0000000a
  122. #define regk_crypto_sha512 0x0000000c
  123. /* DMA descriptor structures */
  124. struct pdma_descr_ctrl {
  125. unsigned char short_descr : 1;
  126. unsigned char pad1 : 1;
  127. unsigned char eop : 1;
  128. unsigned char intr : 1;
  129. unsigned char short_len : 3;
  130. unsigned char pad2 : 1;
  131. } __packed;
  132. struct pdma_data_descr {
  133. unsigned int len : 24;
  134. unsigned int buf : 32;
  135. } __packed;
  136. struct pdma_short_descr {
  137. unsigned char data[7];
  138. } __packed;
  139. struct pdma_descr {
  140. struct pdma_descr_ctrl ctrl;
  141. union {
  142. struct pdma_data_descr data;
  143. struct pdma_short_descr shrt;
  144. };
  145. };
  146. struct pdma_stat_descr {
  147. unsigned char pad1 : 1;
  148. unsigned char pad2 : 1;
  149. unsigned char eop : 1;
  150. unsigned char pad3 : 5;
  151. unsigned int len : 24;
  152. };
  153. /* Each descriptor array can hold max 64 entries */
  154. #define PDMA_DESCR_COUNT 64
  155. #define MODULE_NAME "Artpec-6 CA"
  156. /* Hash modes (including HMAC variants) */
  157. #define ARTPEC6_CRYPTO_HASH_SHA1 1
  158. #define ARTPEC6_CRYPTO_HASH_SHA256 2
  159. #define ARTPEC6_CRYPTO_HASH_SHA384 3
  160. #define ARTPEC6_CRYPTO_HASH_SHA512 4
  161. /* Crypto modes */
  162. #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
  163. #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
  164. #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
  165. #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
  166. /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
  167. * It operates on a descriptor array with up to 64 descriptor entries.
  168. * The arrays must be 64 byte aligned in memory.
  169. *
  170. * The ciphering unit has no registers and is completely controlled by
  171. * a 4-byte metadata that is inserted at the beginning of each dma packet.
  172. *
  173. * A dma packet is a sequence of descriptors terminated by setting the .eop
  174. * field in the final descriptor of the packet.
  175. *
  176. * Multiple packets are used for providing context data, key data and
  177. * the plain/ciphertext.
  178. *
  179. * PDMA Descriptors (Array)
  180. * +------+------+------+~~+-------+------+----
  181. * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
  182. * +--+---+--+---+----+-+~~+-------+----+-+----
  183. * | | | | |
  184. * | | | | |
  185. * __|__ +-------++-------++-------+ +----+
  186. * | MD | |Payload||Payload||Payload| | MD |
  187. * +-----+ +-------++-------++-------+ +----+
  188. */
  189. struct artpec6_crypto_bounce_buffer {
  190. struct list_head list;
  191. size_t length;
  192. struct scatterlist *sg;
  193. size_t offset;
  194. /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
  195. * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
  196. */
  197. void *buf;
  198. };
  199. struct artpec6_crypto_dma_map {
  200. dma_addr_t dma_addr;
  201. size_t size;
  202. enum dma_data_direction dir;
  203. };
  204. struct artpec6_crypto_dma_descriptors {
  205. struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
  206. struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
  207. u32 stat[PDMA_DESCR_COUNT] __aligned(64);
  208. struct list_head bounce_buffers;
  209. /* Enough maps for all out/in buffers, and all three descr. arrays */
  210. struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
  211. dma_addr_t out_dma_addr;
  212. dma_addr_t in_dma_addr;
  213. dma_addr_t stat_dma_addr;
  214. size_t out_cnt;
  215. size_t in_cnt;
  216. size_t map_count;
  217. };
  218. enum artpec6_crypto_variant {
  219. ARTPEC6_CRYPTO,
  220. ARTPEC7_CRYPTO,
  221. };
  222. struct artpec6_crypto {
  223. void __iomem *base;
  224. spinlock_t queue_lock;
  225. struct list_head queue; /* waiting for pdma fifo space */
  226. struct list_head pending; /* submitted to pdma fifo */
  227. struct tasklet_struct task;
  228. struct kmem_cache *dma_cache;
  229. int pending_count;
  230. struct timer_list timer;
  231. enum artpec6_crypto_variant variant;
  232. void *pad_buffer; /* cache-aligned block padding buffer */
  233. void *zero_buffer;
  234. };
  235. enum artpec6_crypto_hash_flags {
  236. HASH_FLAG_INIT_CTX = 2,
  237. HASH_FLAG_UPDATE = 4,
  238. HASH_FLAG_FINALIZE = 8,
  239. HASH_FLAG_HMAC = 16,
  240. HASH_FLAG_UPDATE_KEY = 32,
  241. };
  242. struct artpec6_crypto_req_common {
  243. struct list_head list;
  244. struct artpec6_crypto_dma_descriptors *dma;
  245. struct crypto_async_request *req;
  246. void (*complete)(struct crypto_async_request *req);
  247. gfp_t gfp_flags;
  248. };
  249. struct artpec6_hash_request_context {
  250. char partial_buffer[SHA512_BLOCK_SIZE];
  251. char partial_buffer_out[SHA512_BLOCK_SIZE];
  252. char key_buffer[SHA512_BLOCK_SIZE];
  253. char pad_buffer[SHA512_BLOCK_SIZE + 32];
  254. unsigned char digeststate[SHA512_DIGEST_SIZE];
  255. size_t partial_bytes;
  256. u64 digcnt;
  257. u32 key_md;
  258. u32 hash_md;
  259. enum artpec6_crypto_hash_flags hash_flags;
  260. struct artpec6_crypto_req_common common;
  261. };
  262. struct artpec6_hash_export_state {
  263. char partial_buffer[SHA512_BLOCK_SIZE];
  264. unsigned char digeststate[SHA512_DIGEST_SIZE];
  265. size_t partial_bytes;
  266. u64 digcnt;
  267. int oper;
  268. unsigned int hash_flags;
  269. };
  270. struct artpec6_hashalg_context {
  271. char hmac_key[SHA512_BLOCK_SIZE];
  272. size_t hmac_key_length;
  273. struct crypto_shash *child_hash;
  274. };
  275. struct artpec6_crypto_request_context {
  276. u32 cipher_md;
  277. bool decrypt;
  278. struct artpec6_crypto_req_common common;
  279. };
  280. struct artpec6_cryptotfm_context {
  281. unsigned char aes_key[2*AES_MAX_KEY_SIZE];
  282. size_t key_length;
  283. u32 key_md;
  284. int crypto_type;
  285. struct crypto_skcipher *fallback;
  286. };
  287. struct artpec6_crypto_aead_hw_ctx {
  288. __be64 aad_length_bits;
  289. __be64 text_length_bits;
  290. __u8 J0[AES_BLOCK_SIZE];
  291. };
  292. struct artpec6_crypto_aead_req_ctx {
  293. struct artpec6_crypto_aead_hw_ctx hw_ctx;
  294. u32 cipher_md;
  295. bool decrypt;
  296. struct artpec6_crypto_req_common common;
  297. __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
  298. };
  299. /* The crypto framework makes it hard to avoid this global. */
  300. static struct device *artpec6_crypto_dev;
  301. #ifdef CONFIG_FAULT_INJECTION
  302. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
  303. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
  304. #endif
  305. enum {
  306. ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
  307. ARTPEC6_CRYPTO_PREPARE_HASH_START,
  308. };
  309. static int artpec6_crypto_prepare_aead(struct aead_request *areq);
  310. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
  311. static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
  312. static void
  313. artpec6_crypto_complete_crypto(struct crypto_async_request *req);
  314. static void
  315. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
  316. static void
  317. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
  318. static void
  319. artpec6_crypto_complete_aead(struct crypto_async_request *req);
  320. static void
  321. artpec6_crypto_complete_hash(struct crypto_async_request *req);
  322. static int
  323. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
  324. static void
  325. artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
  326. struct artpec6_crypto_walk {
  327. struct scatterlist *sg;
  328. size_t offset;
  329. };
  330. static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
  331. struct scatterlist *sg)
  332. {
  333. awalk->sg = sg;
  334. awalk->offset = 0;
  335. }
  336. static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
  337. size_t nbytes)
  338. {
  339. while (nbytes && awalk->sg) {
  340. size_t piece;
  341. WARN_ON(awalk->offset > awalk->sg->length);
  342. piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
  343. nbytes -= piece;
  344. awalk->offset += piece;
  345. if (awalk->offset == awalk->sg->length) {
  346. awalk->sg = sg_next(awalk->sg);
  347. awalk->offset = 0;
  348. }
  349. }
  350. return nbytes;
  351. }
  352. static size_t
  353. artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
  354. {
  355. WARN_ON(awalk->sg->length == awalk->offset);
  356. return awalk->sg->length - awalk->offset;
  357. }
  358. static dma_addr_t
  359. artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
  360. {
  361. return sg_phys(awalk->sg) + awalk->offset;
  362. }
  363. static void
  364. artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
  365. {
  366. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  367. struct artpec6_crypto_bounce_buffer *b;
  368. struct artpec6_crypto_bounce_buffer *next;
  369. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  370. pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
  371. b, b->length, b->offset, b->buf);
  372. sg_pcopy_from_buffer(b->sg,
  373. 1,
  374. b->buf,
  375. b->length,
  376. b->offset);
  377. list_del(&b->list);
  378. kfree(b);
  379. }
  380. }
  381. static inline bool artpec6_crypto_busy(void)
  382. {
  383. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  384. int fifo_count = ac->pending_count;
  385. return fifo_count > 6;
  386. }
  387. static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
  388. {
  389. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  390. int ret = -EBUSY;
  391. spin_lock_bh(&ac->queue_lock);
  392. if (!artpec6_crypto_busy()) {
  393. list_add_tail(&req->list, &ac->pending);
  394. artpec6_crypto_start_dma(req);
  395. ret = -EINPROGRESS;
  396. } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
  397. list_add_tail(&req->list, &ac->queue);
  398. } else {
  399. artpec6_crypto_common_destroy(req);
  400. }
  401. spin_unlock_bh(&ac->queue_lock);
  402. return ret;
  403. }
  404. static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
  405. {
  406. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  407. enum artpec6_crypto_variant variant = ac->variant;
  408. void __iomem *base = ac->base;
  409. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  410. u32 ind, statd, outd;
  411. /* Make descriptor content visible to the DMA before starting it. */
  412. wmb();
  413. ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
  414. FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
  415. statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
  416. FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
  417. outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
  418. FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
  419. if (variant == ARTPEC6_CRYPTO) {
  420. writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
  421. writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
  422. writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
  423. } else {
  424. writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
  425. writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
  426. writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
  427. }
  428. writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
  429. writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
  430. ac->pending_count++;
  431. }
  432. static void
  433. artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
  434. {
  435. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  436. dma->out_cnt = 0;
  437. dma->in_cnt = 0;
  438. dma->map_count = 0;
  439. INIT_LIST_HEAD(&dma->bounce_buffers);
  440. }
  441. static bool fault_inject_dma_descr(void)
  442. {
  443. #ifdef CONFIG_FAULT_INJECTION
  444. return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
  445. #else
  446. return false;
  447. #endif
  448. }
  449. /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
  450. * physical address
  451. *
  452. * @addr: The physical address of the data buffer
  453. * @len: The length of the data buffer
  454. * @eop: True if this is the last buffer in the packet
  455. *
  456. * @return 0 on success or -ENOSPC if there are no more descriptors available
  457. */
  458. static int
  459. artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
  460. dma_addr_t addr, size_t len, bool eop)
  461. {
  462. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  463. struct pdma_descr *d;
  464. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  465. fault_inject_dma_descr()) {
  466. pr_err("No free OUT DMA descriptors available!\n");
  467. return -ENOSPC;
  468. }
  469. d = &dma->out[dma->out_cnt++];
  470. memset(d, 0, sizeof(*d));
  471. d->ctrl.short_descr = 0;
  472. d->ctrl.eop = eop;
  473. d->data.len = len;
  474. d->data.buf = addr;
  475. return 0;
  476. }
  477. /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
  478. *
  479. * @dst: The virtual address of the data
  480. * @len: The length of the data, must be between 1 to 7 bytes
  481. * @eop: True if this is the last buffer in the packet
  482. *
  483. * @return 0 on success
  484. * -ENOSPC if no more descriptors are available
  485. * -EINVAL if the data length exceeds 7 bytes
  486. */
  487. static int
  488. artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
  489. void *dst, unsigned int len, bool eop)
  490. {
  491. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  492. struct pdma_descr *d;
  493. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  494. fault_inject_dma_descr()) {
  495. pr_err("No free OUT DMA descriptors available!\n");
  496. return -ENOSPC;
  497. } else if (len > 7 || len < 1) {
  498. return -EINVAL;
  499. }
  500. d = &dma->out[dma->out_cnt++];
  501. memset(d, 0, sizeof(*d));
  502. d->ctrl.short_descr = 1;
  503. d->ctrl.short_len = len;
  504. d->ctrl.eop = eop;
  505. memcpy(d->shrt.data, dst, len);
  506. return 0;
  507. }
  508. static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
  509. struct page *page, size_t offset,
  510. size_t size,
  511. enum dma_data_direction dir,
  512. dma_addr_t *dma_addr_out)
  513. {
  514. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  515. struct device *dev = artpec6_crypto_dev;
  516. struct artpec6_crypto_dma_map *map;
  517. dma_addr_t dma_addr;
  518. *dma_addr_out = 0;
  519. if (dma->map_count >= ARRAY_SIZE(dma->maps))
  520. return -ENOMEM;
  521. dma_addr = dma_map_page(dev, page, offset, size, dir);
  522. if (dma_mapping_error(dev, dma_addr))
  523. return -ENOMEM;
  524. map = &dma->maps[dma->map_count++];
  525. map->size = size;
  526. map->dma_addr = dma_addr;
  527. map->dir = dir;
  528. *dma_addr_out = dma_addr;
  529. return 0;
  530. }
  531. static int
  532. artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
  533. void *ptr, size_t size,
  534. enum dma_data_direction dir,
  535. dma_addr_t *dma_addr_out)
  536. {
  537. struct page *page = virt_to_page(ptr);
  538. size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
  539. return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
  540. dma_addr_out);
  541. }
  542. static int
  543. artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
  544. {
  545. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  546. int ret;
  547. ret = artpec6_crypto_dma_map_single(common, dma->in,
  548. sizeof(dma->in[0]) * dma->in_cnt,
  549. DMA_TO_DEVICE, &dma->in_dma_addr);
  550. if (ret)
  551. return ret;
  552. ret = artpec6_crypto_dma_map_single(common, dma->out,
  553. sizeof(dma->out[0]) * dma->out_cnt,
  554. DMA_TO_DEVICE, &dma->out_dma_addr);
  555. if (ret)
  556. return ret;
  557. /* We only read one stat descriptor */
  558. dma->stat[dma->in_cnt - 1] = 0;
  559. /*
  560. * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
  561. * to be written.
  562. */
  563. return artpec6_crypto_dma_map_single(common,
  564. dma->stat + dma->in_cnt - 1,
  565. sizeof(dma->stat[0]),
  566. DMA_BIDIRECTIONAL,
  567. &dma->stat_dma_addr);
  568. }
  569. static void
  570. artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
  571. {
  572. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  573. struct device *dev = artpec6_crypto_dev;
  574. int i;
  575. for (i = 0; i < dma->map_count; i++) {
  576. struct artpec6_crypto_dma_map *map = &dma->maps[i];
  577. dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
  578. }
  579. dma->map_count = 0;
  580. }
  581. /** artpec6_crypto_setup_out_descr - Setup an out descriptor
  582. *
  583. * @dst: The virtual address of the data
  584. * @len: The length of the data
  585. * @eop: True if this is the last buffer in the packet
  586. * @use_short: If this is true and the data length is 7 bytes or less then
  587. * a short descriptor will be used
  588. *
  589. * @return 0 on success
  590. * Any errors from artpec6_crypto_setup_out_descr_short() or
  591. * setup_out_descr_phys()
  592. */
  593. static int
  594. artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
  595. void *dst, unsigned int len, bool eop,
  596. bool use_short)
  597. {
  598. if (use_short && len < 7) {
  599. return artpec6_crypto_setup_out_descr_short(common, dst, len,
  600. eop);
  601. } else {
  602. int ret;
  603. dma_addr_t dma_addr;
  604. ret = artpec6_crypto_dma_map_single(common, dst, len,
  605. DMA_TO_DEVICE,
  606. &dma_addr);
  607. if (ret)
  608. return ret;
  609. return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
  610. len, eop);
  611. }
  612. }
  613. /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
  614. * physical address
  615. *
  616. * @addr: The physical address of the data buffer
  617. * @len: The length of the data buffer
  618. * @intr: True if an interrupt should be fired after HW processing of this
  619. * descriptor
  620. *
  621. */
  622. static int
  623. artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
  624. dma_addr_t addr, unsigned int len, bool intr)
  625. {
  626. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  627. struct pdma_descr *d;
  628. if (dma->in_cnt >= PDMA_DESCR_COUNT ||
  629. fault_inject_dma_descr()) {
  630. pr_err("No free IN DMA descriptors available!\n");
  631. return -ENOSPC;
  632. }
  633. d = &dma->in[dma->in_cnt++];
  634. memset(d, 0, sizeof(*d));
  635. d->ctrl.intr = intr;
  636. d->data.len = len;
  637. d->data.buf = addr;
  638. return 0;
  639. }
  640. /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
  641. *
  642. * @buffer: The virtual address to of the data buffer
  643. * @len: The length of the data buffer
  644. * @last: If this is the last data buffer in the request (i.e. an interrupt
  645. * is needed
  646. *
  647. * Short descriptors are not used for the in channel
  648. */
  649. static int
  650. artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
  651. void *buffer, unsigned int len, bool last)
  652. {
  653. dma_addr_t dma_addr;
  654. int ret;
  655. ret = artpec6_crypto_dma_map_single(common, buffer, len,
  656. DMA_FROM_DEVICE, &dma_addr);
  657. if (ret)
  658. return ret;
  659. return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
  660. }
  661. static struct artpec6_crypto_bounce_buffer *
  662. artpec6_crypto_alloc_bounce(gfp_t flags)
  663. {
  664. void *base;
  665. size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
  666. 2 * ARTPEC_CACHE_LINE_MAX;
  667. struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
  668. if (!bbuf)
  669. return NULL;
  670. base = bbuf + 1;
  671. bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
  672. return bbuf;
  673. }
  674. static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
  675. struct artpec6_crypto_walk *walk, size_t size)
  676. {
  677. struct artpec6_crypto_bounce_buffer *bbuf;
  678. int ret;
  679. bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
  680. if (!bbuf)
  681. return -ENOMEM;
  682. bbuf->length = size;
  683. bbuf->sg = walk->sg;
  684. bbuf->offset = walk->offset;
  685. ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
  686. if (ret) {
  687. kfree(bbuf);
  688. return ret;
  689. }
  690. pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
  691. list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
  692. return 0;
  693. }
  694. static int
  695. artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
  696. struct artpec6_crypto_walk *walk,
  697. size_t count)
  698. {
  699. size_t chunk;
  700. int ret;
  701. dma_addr_t addr;
  702. while (walk->sg && count) {
  703. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  704. addr = artpec6_crypto_walk_chunk_phys(walk);
  705. /* When destination buffers are not aligned to the cache line
  706. * size we need bounce buffers. The DMA-API requires that the
  707. * entire line is owned by the DMA buffer and this holds also
  708. * for the case when coherent DMA is used.
  709. */
  710. if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
  711. chunk = min_t(dma_addr_t, chunk,
  712. ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
  713. addr);
  714. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  715. ret = setup_bounce_buffer_in(common, walk, chunk);
  716. } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
  717. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  718. ret = setup_bounce_buffer_in(common, walk, chunk);
  719. } else {
  720. dma_addr_t dma_addr;
  721. chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
  722. pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
  723. ret = artpec6_crypto_dma_map_page(common,
  724. sg_page(walk->sg),
  725. walk->sg->offset +
  726. walk->offset,
  727. chunk,
  728. DMA_FROM_DEVICE,
  729. &dma_addr);
  730. if (ret)
  731. return ret;
  732. ret = artpec6_crypto_setup_in_descr_phys(common,
  733. dma_addr,
  734. chunk, false);
  735. }
  736. if (ret)
  737. return ret;
  738. count = count - chunk;
  739. artpec6_crypto_walk_advance(walk, chunk);
  740. }
  741. if (count)
  742. pr_err("EOL unexpected %zu bytes left\n", count);
  743. return count ? -EINVAL : 0;
  744. }
  745. static int
  746. artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
  747. struct artpec6_crypto_walk *walk,
  748. size_t count)
  749. {
  750. size_t chunk;
  751. int ret;
  752. dma_addr_t addr;
  753. while (walk->sg && count) {
  754. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  755. addr = artpec6_crypto_walk_chunk_phys(walk);
  756. pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
  757. if (addr & 3) {
  758. char buf[3];
  759. chunk = min_t(size_t, chunk, (4-(addr&3)));
  760. sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
  761. walk->offset);
  762. ret = artpec6_crypto_setup_out_descr_short(common, buf,
  763. chunk,
  764. false);
  765. } else {
  766. dma_addr_t dma_addr;
  767. ret = artpec6_crypto_dma_map_page(common,
  768. sg_page(walk->sg),
  769. walk->sg->offset +
  770. walk->offset,
  771. chunk,
  772. DMA_TO_DEVICE,
  773. &dma_addr);
  774. if (ret)
  775. return ret;
  776. ret = artpec6_crypto_setup_out_descr_phys(common,
  777. dma_addr,
  778. chunk, false);
  779. }
  780. if (ret)
  781. return ret;
  782. count = count - chunk;
  783. artpec6_crypto_walk_advance(walk, chunk);
  784. }
  785. if (count)
  786. pr_err("EOL unexpected %zu bytes left\n", count);
  787. return count ? -EINVAL : 0;
  788. }
  789. /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
  790. *
  791. * If the out descriptor list is non-empty, then the eop flag on the
  792. * last used out descriptor will be set.
  793. *
  794. * @return 0 on success
  795. * -EINVAL if the out descriptor is empty or has overflown
  796. */
  797. static int
  798. artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
  799. {
  800. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  801. struct pdma_descr *d;
  802. if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
  803. pr_err("%s: OUT descriptor list is %s\n",
  804. MODULE_NAME, dma->out_cnt ? "empty" : "full");
  805. return -EINVAL;
  806. }
  807. d = &dma->out[dma->out_cnt-1];
  808. d->ctrl.eop = 1;
  809. return 0;
  810. }
  811. /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
  812. * in descriptor
  813. *
  814. * See artpec6_crypto_terminate_out_descrs() for return values
  815. */
  816. static int
  817. artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
  818. {
  819. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  820. struct pdma_descr *d;
  821. if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
  822. pr_err("%s: IN descriptor list is %s\n",
  823. MODULE_NAME, dma->in_cnt ? "empty" : "full");
  824. return -EINVAL;
  825. }
  826. d = &dma->in[dma->in_cnt-1];
  827. d->ctrl.intr = 1;
  828. return 0;
  829. }
  830. /** create_hash_pad - Create a Secure Hash conformant pad
  831. *
  832. * @dst: The destination buffer to write the pad. Must be at least 64 bytes
  833. * @dgstlen: The total length of the hash digest in bytes
  834. * @bitcount: The total length of the digest in bits
  835. *
  836. * @return The total number of padding bytes written to @dst
  837. */
  838. static size_t
  839. create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
  840. {
  841. unsigned int mod, target, diff, pad_bytes, size_bytes;
  842. __be64 bits = __cpu_to_be64(bitcount);
  843. switch (oper) {
  844. case regk_crypto_sha1:
  845. case regk_crypto_sha256:
  846. case regk_crypto_hmac_sha1:
  847. case regk_crypto_hmac_sha256:
  848. target = 448 / 8;
  849. mod = 512 / 8;
  850. size_bytes = 8;
  851. break;
  852. default:
  853. target = 896 / 8;
  854. mod = 1024 / 8;
  855. size_bytes = 16;
  856. break;
  857. }
  858. target -= 1;
  859. diff = dgstlen & (mod - 1);
  860. pad_bytes = diff > target ? target + mod - diff : target - diff;
  861. memset(dst + 1, 0, pad_bytes);
  862. dst[0] = 0x80;
  863. if (size_bytes == 16) {
  864. memset(dst + 1 + pad_bytes, 0, 8);
  865. memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
  866. } else {
  867. memcpy(dst + 1 + pad_bytes, &bits, 8);
  868. }
  869. return pad_bytes + size_bytes + 1;
  870. }
  871. static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
  872. struct crypto_async_request *parent,
  873. void (*complete)(struct crypto_async_request *req),
  874. struct scatterlist *dstsg, unsigned int nbytes)
  875. {
  876. gfp_t flags;
  877. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  878. flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  879. GFP_KERNEL : GFP_ATOMIC;
  880. common->gfp_flags = flags;
  881. common->dma = kmem_cache_alloc(ac->dma_cache, flags);
  882. if (!common->dma)
  883. return -ENOMEM;
  884. common->req = parent;
  885. common->complete = complete;
  886. return 0;
  887. }
  888. static void
  889. artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
  890. {
  891. struct artpec6_crypto_bounce_buffer *b;
  892. struct artpec6_crypto_bounce_buffer *next;
  893. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  894. kfree(b);
  895. }
  896. }
  897. static int
  898. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
  899. {
  900. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  901. artpec6_crypto_dma_unmap_all(common);
  902. artpec6_crypto_bounce_destroy(common->dma);
  903. kmem_cache_free(ac->dma_cache, common->dma);
  904. common->dma = NULL;
  905. return 0;
  906. }
  907. /*
  908. * Ciphering functions.
  909. */
  910. static int artpec6_crypto_encrypt(struct skcipher_request *req)
  911. {
  912. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  913. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  914. struct artpec6_crypto_request_context *req_ctx = NULL;
  915. void (*complete)(struct crypto_async_request *req);
  916. int ret;
  917. req_ctx = skcipher_request_ctx(req);
  918. switch (ctx->crypto_type) {
  919. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  920. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  921. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  922. req_ctx->decrypt = 0;
  923. break;
  924. default:
  925. break;
  926. }
  927. switch (ctx->crypto_type) {
  928. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  929. complete = artpec6_crypto_complete_cbc_encrypt;
  930. break;
  931. default:
  932. complete = artpec6_crypto_complete_crypto;
  933. break;
  934. }
  935. ret = artpec6_crypto_common_init(&req_ctx->common,
  936. &req->base,
  937. complete,
  938. req->dst, req->cryptlen);
  939. if (ret)
  940. return ret;
  941. ret = artpec6_crypto_prepare_crypto(req);
  942. if (ret) {
  943. artpec6_crypto_common_destroy(&req_ctx->common);
  944. return ret;
  945. }
  946. return artpec6_crypto_submit(&req_ctx->common);
  947. }
  948. static int artpec6_crypto_decrypt(struct skcipher_request *req)
  949. {
  950. int ret;
  951. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  952. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  953. struct artpec6_crypto_request_context *req_ctx = NULL;
  954. void (*complete)(struct crypto_async_request *req);
  955. req_ctx = skcipher_request_ctx(req);
  956. switch (ctx->crypto_type) {
  957. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  958. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  959. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  960. req_ctx->decrypt = 1;
  961. break;
  962. default:
  963. break;
  964. }
  965. switch (ctx->crypto_type) {
  966. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  967. complete = artpec6_crypto_complete_cbc_decrypt;
  968. break;
  969. default:
  970. complete = artpec6_crypto_complete_crypto;
  971. break;
  972. }
  973. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  974. complete,
  975. req->dst, req->cryptlen);
  976. if (ret)
  977. return ret;
  978. ret = artpec6_crypto_prepare_crypto(req);
  979. if (ret) {
  980. artpec6_crypto_common_destroy(&req_ctx->common);
  981. return ret;
  982. }
  983. return artpec6_crypto_submit(&req_ctx->common);
  984. }
  985. static int
  986. artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
  987. {
  988. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  989. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  990. size_t iv_len = crypto_skcipher_ivsize(cipher);
  991. unsigned int counter = be32_to_cpup((__be32 *)
  992. (req->iv + iv_len - 4));
  993. unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
  994. AES_BLOCK_SIZE;
  995. /*
  996. * The hardware uses only the last 32-bits as the counter while the
  997. * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
  998. * the whole IV is a counter. So fallback if the counter is going to
  999. * overlow.
  1000. */
  1001. if (counter + nblks < counter) {
  1002. int ret;
  1003. pr_debug("counter %x will overflow (nblks %u), falling back\n",
  1004. counter, counter + nblks);
  1005. ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
  1006. ctx->key_length);
  1007. if (ret)
  1008. return ret;
  1009. {
  1010. SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
  1011. skcipher_request_set_tfm(subreq, ctx->fallback);
  1012. skcipher_request_set_callback(subreq, req->base.flags,
  1013. NULL, NULL);
  1014. skcipher_request_set_crypt(subreq, req->src, req->dst,
  1015. req->cryptlen, req->iv);
  1016. ret = encrypt ? crypto_skcipher_encrypt(subreq)
  1017. : crypto_skcipher_decrypt(subreq);
  1018. skcipher_request_zero(subreq);
  1019. }
  1020. return ret;
  1021. }
  1022. return encrypt ? artpec6_crypto_encrypt(req)
  1023. : artpec6_crypto_decrypt(req);
  1024. }
  1025. static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
  1026. {
  1027. return artpec6_crypto_ctr_crypt(req, true);
  1028. }
  1029. static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
  1030. {
  1031. return artpec6_crypto_ctr_crypt(req, false);
  1032. }
  1033. /*
  1034. * AEAD functions
  1035. */
  1036. static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
  1037. {
  1038. struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
  1039. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  1040. crypto_aead_set_reqsize(tfm,
  1041. sizeof(struct artpec6_crypto_aead_req_ctx));
  1042. return 0;
  1043. }
  1044. static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
  1045. unsigned int len)
  1046. {
  1047. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
  1048. if (len != 16 && len != 24 && len != 32) {
  1049. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1050. return -1;
  1051. }
  1052. ctx->key_length = len;
  1053. memcpy(ctx->aes_key, key, len);
  1054. return 0;
  1055. }
  1056. static int artpec6_crypto_aead_encrypt(struct aead_request *req)
  1057. {
  1058. int ret;
  1059. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1060. req_ctx->decrypt = false;
  1061. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  1062. artpec6_crypto_complete_aead,
  1063. NULL, 0);
  1064. if (ret)
  1065. return ret;
  1066. ret = artpec6_crypto_prepare_aead(req);
  1067. if (ret) {
  1068. artpec6_crypto_common_destroy(&req_ctx->common);
  1069. return ret;
  1070. }
  1071. return artpec6_crypto_submit(&req_ctx->common);
  1072. }
  1073. static int artpec6_crypto_aead_decrypt(struct aead_request *req)
  1074. {
  1075. int ret;
  1076. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1077. req_ctx->decrypt = true;
  1078. if (req->cryptlen < AES_BLOCK_SIZE)
  1079. return -EINVAL;
  1080. ret = artpec6_crypto_common_init(&req_ctx->common,
  1081. &req->base,
  1082. artpec6_crypto_complete_aead,
  1083. NULL, 0);
  1084. if (ret)
  1085. return ret;
  1086. ret = artpec6_crypto_prepare_aead(req);
  1087. if (ret) {
  1088. artpec6_crypto_common_destroy(&req_ctx->common);
  1089. return ret;
  1090. }
  1091. return artpec6_crypto_submit(&req_ctx->common);
  1092. }
  1093. static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
  1094. {
  1095. struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1096. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
  1097. size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  1098. size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
  1099. SHA512_DIGEST_SIZE : digestsize;
  1100. size_t blocksize = crypto_tfm_alg_blocksize(
  1101. crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
  1102. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1103. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1104. enum artpec6_crypto_variant variant = ac->variant;
  1105. u32 sel_ctx;
  1106. bool ext_ctx = false;
  1107. bool run_hw = false;
  1108. int error = 0;
  1109. artpec6_crypto_init_dma_operation(common);
  1110. /* Upload HMAC key, must be first the first packet */
  1111. if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
  1112. if (variant == ARTPEC6_CRYPTO) {
  1113. req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1114. a6_regk_crypto_dlkey);
  1115. } else {
  1116. req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1117. a7_regk_crypto_dlkey);
  1118. }
  1119. /* Copy and pad up the key */
  1120. memcpy(req_ctx->key_buffer, ctx->hmac_key,
  1121. ctx->hmac_key_length);
  1122. memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
  1123. blocksize - ctx->hmac_key_length);
  1124. error = artpec6_crypto_setup_out_descr(common,
  1125. (void *)&req_ctx->key_md,
  1126. sizeof(req_ctx->key_md), false, false);
  1127. if (error)
  1128. return error;
  1129. error = artpec6_crypto_setup_out_descr(common,
  1130. req_ctx->key_buffer, blocksize,
  1131. true, false);
  1132. if (error)
  1133. return error;
  1134. }
  1135. if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
  1136. /* Restore context */
  1137. sel_ctx = regk_crypto_ext;
  1138. ext_ctx = true;
  1139. } else {
  1140. sel_ctx = regk_crypto_init;
  1141. }
  1142. if (variant == ARTPEC6_CRYPTO) {
  1143. req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
  1144. req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1145. /* If this is the final round, set the final flag */
  1146. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1147. req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
  1148. } else {
  1149. req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
  1150. req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1151. /* If this is the final round, set the final flag */
  1152. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1153. req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
  1154. }
  1155. /* Setup up metadata descriptors */
  1156. error = artpec6_crypto_setup_out_descr(common,
  1157. (void *)&req_ctx->hash_md,
  1158. sizeof(req_ctx->hash_md), false, false);
  1159. if (error)
  1160. return error;
  1161. error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1162. if (error)
  1163. return error;
  1164. if (ext_ctx) {
  1165. error = artpec6_crypto_setup_out_descr(common,
  1166. req_ctx->digeststate,
  1167. contextsize, false, false);
  1168. if (error)
  1169. return error;
  1170. }
  1171. if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
  1172. size_t done_bytes = 0;
  1173. size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
  1174. size_t ready_bytes = round_down(total_bytes, blocksize);
  1175. struct artpec6_crypto_walk walk;
  1176. run_hw = ready_bytes > 0;
  1177. if (req_ctx->partial_bytes && ready_bytes) {
  1178. /* We have a partial buffer and will at least some bytes
  1179. * to the HW. Empty this partial buffer before tackling
  1180. * the SG lists
  1181. */
  1182. memcpy(req_ctx->partial_buffer_out,
  1183. req_ctx->partial_buffer,
  1184. req_ctx->partial_bytes);
  1185. error = artpec6_crypto_setup_out_descr(common,
  1186. req_ctx->partial_buffer_out,
  1187. req_ctx->partial_bytes,
  1188. false, true);
  1189. if (error)
  1190. return error;
  1191. /* Reset partial buffer */
  1192. done_bytes += req_ctx->partial_bytes;
  1193. req_ctx->partial_bytes = 0;
  1194. }
  1195. artpec6_crypto_walk_init(&walk, areq->src);
  1196. error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
  1197. ready_bytes -
  1198. done_bytes);
  1199. if (error)
  1200. return error;
  1201. if (walk.sg) {
  1202. size_t sg_skip = ready_bytes - done_bytes;
  1203. size_t sg_rem = areq->nbytes - sg_skip;
  1204. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  1205. req_ctx->partial_buffer +
  1206. req_ctx->partial_bytes,
  1207. sg_rem, sg_skip);
  1208. req_ctx->partial_bytes += sg_rem;
  1209. }
  1210. req_ctx->digcnt += ready_bytes;
  1211. req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
  1212. }
  1213. /* Finalize */
  1214. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
  1215. bool needtrim = contextsize != digestsize;
  1216. size_t hash_pad_len;
  1217. u64 digest_bits;
  1218. u32 oper;
  1219. if (variant == ARTPEC6_CRYPTO)
  1220. oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
  1221. else
  1222. oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
  1223. /* Write out the partial buffer if present */
  1224. if (req_ctx->partial_bytes) {
  1225. memcpy(req_ctx->partial_buffer_out,
  1226. req_ctx->partial_buffer,
  1227. req_ctx->partial_bytes);
  1228. error = artpec6_crypto_setup_out_descr(common,
  1229. req_ctx->partial_buffer_out,
  1230. req_ctx->partial_bytes,
  1231. false, true);
  1232. if (error)
  1233. return error;
  1234. req_ctx->digcnt += req_ctx->partial_bytes;
  1235. req_ctx->partial_bytes = 0;
  1236. }
  1237. if (req_ctx->hash_flags & HASH_FLAG_HMAC)
  1238. digest_bits = 8 * (req_ctx->digcnt + blocksize);
  1239. else
  1240. digest_bits = 8 * req_ctx->digcnt;
  1241. /* Add the hash pad */
  1242. hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
  1243. req_ctx->digcnt, digest_bits);
  1244. error = artpec6_crypto_setup_out_descr(common,
  1245. req_ctx->pad_buffer,
  1246. hash_pad_len, false,
  1247. true);
  1248. req_ctx->digcnt = 0;
  1249. if (error)
  1250. return error;
  1251. /* Descriptor for the final result */
  1252. error = artpec6_crypto_setup_in_descr(common, areq->result,
  1253. digestsize,
  1254. !needtrim);
  1255. if (error)
  1256. return error;
  1257. if (needtrim) {
  1258. /* Discard the extra context bytes for SHA-384 */
  1259. error = artpec6_crypto_setup_in_descr(common,
  1260. req_ctx->partial_buffer,
  1261. digestsize - contextsize, true);
  1262. if (error)
  1263. return error;
  1264. }
  1265. } else { /* This is not the final operation for this request */
  1266. if (!run_hw)
  1267. return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
  1268. /* Save the result to the context */
  1269. error = artpec6_crypto_setup_in_descr(common,
  1270. req_ctx->digeststate,
  1271. contextsize, false);
  1272. if (error)
  1273. return error;
  1274. /* fall through */
  1275. }
  1276. req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
  1277. HASH_FLAG_FINALIZE);
  1278. error = artpec6_crypto_terminate_in_descrs(common);
  1279. if (error)
  1280. return error;
  1281. error = artpec6_crypto_terminate_out_descrs(common);
  1282. if (error)
  1283. return error;
  1284. error = artpec6_crypto_dma_map_descs(common);
  1285. if (error)
  1286. return error;
  1287. return ARTPEC6_CRYPTO_PREPARE_HASH_START;
  1288. }
  1289. static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
  1290. {
  1291. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1292. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1293. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
  1294. return 0;
  1295. }
  1296. static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
  1297. {
  1298. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1299. ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
  1300. 0,
  1301. CRYPTO_ALG_ASYNC |
  1302. CRYPTO_ALG_NEED_FALLBACK);
  1303. if (IS_ERR(ctx->fallback))
  1304. return PTR_ERR(ctx->fallback);
  1305. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1306. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
  1307. return 0;
  1308. }
  1309. static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
  1310. {
  1311. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1312. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1313. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
  1314. return 0;
  1315. }
  1316. static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
  1317. {
  1318. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1319. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1320. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
  1321. return 0;
  1322. }
  1323. static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
  1324. {
  1325. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1326. memset(ctx, 0, sizeof(*ctx));
  1327. }
  1328. static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
  1329. {
  1330. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1331. crypto_free_skcipher(ctx->fallback);
  1332. artpec6_crypto_aes_exit(tfm);
  1333. }
  1334. static int
  1335. artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1336. unsigned int keylen)
  1337. {
  1338. struct artpec6_cryptotfm_context *ctx =
  1339. crypto_skcipher_ctx(cipher);
  1340. switch (keylen) {
  1341. case 16:
  1342. case 24:
  1343. case 32:
  1344. break;
  1345. default:
  1346. crypto_skcipher_set_flags(cipher,
  1347. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1348. return -EINVAL;
  1349. }
  1350. memcpy(ctx->aes_key, key, keylen);
  1351. ctx->key_length = keylen;
  1352. return 0;
  1353. }
  1354. static int
  1355. artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1356. unsigned int keylen)
  1357. {
  1358. struct artpec6_cryptotfm_context *ctx =
  1359. crypto_skcipher_ctx(cipher);
  1360. int ret;
  1361. ret = xts_check_key(&cipher->base, key, keylen);
  1362. if (ret)
  1363. return ret;
  1364. switch (keylen) {
  1365. case 32:
  1366. case 48:
  1367. case 64:
  1368. break;
  1369. default:
  1370. crypto_skcipher_set_flags(cipher,
  1371. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1372. return -EINVAL;
  1373. }
  1374. memcpy(ctx->aes_key, key, keylen);
  1375. ctx->key_length = keylen;
  1376. return 0;
  1377. }
  1378. /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
  1379. *
  1380. * @req: The asynch request to process
  1381. *
  1382. * @return 0 if the dma job was successfully prepared
  1383. * <0 on error
  1384. *
  1385. * This function sets up the PDMA descriptors for a block cipher request.
  1386. *
  1387. * The required padding is added for AES-CTR using a statically defined
  1388. * buffer.
  1389. *
  1390. * The PDMA descriptor list will be as follows:
  1391. *
  1392. * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
  1393. * IN: <CIPHER_MD><data_0>...[data_n]<intr>
  1394. *
  1395. */
  1396. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
  1397. {
  1398. int ret;
  1399. struct artpec6_crypto_walk walk;
  1400. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
  1401. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  1402. struct artpec6_crypto_request_context *req_ctx = NULL;
  1403. size_t iv_len = crypto_skcipher_ivsize(cipher);
  1404. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1405. enum artpec6_crypto_variant variant = ac->variant;
  1406. struct artpec6_crypto_req_common *common;
  1407. bool cipher_decr = false;
  1408. size_t cipher_klen;
  1409. u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
  1410. u32 oper;
  1411. req_ctx = skcipher_request_ctx(areq);
  1412. common = &req_ctx->common;
  1413. artpec6_crypto_init_dma_operation(common);
  1414. if (variant == ARTPEC6_CRYPTO)
  1415. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
  1416. else
  1417. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
  1418. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1419. sizeof(ctx->key_md), false, false);
  1420. if (ret)
  1421. return ret;
  1422. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1423. ctx->key_length, true, false);
  1424. if (ret)
  1425. return ret;
  1426. req_ctx->cipher_md = 0;
  1427. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
  1428. cipher_klen = ctx->key_length/2;
  1429. else
  1430. cipher_klen = ctx->key_length;
  1431. /* Metadata */
  1432. switch (cipher_klen) {
  1433. case 16:
  1434. cipher_len = regk_crypto_key_128;
  1435. break;
  1436. case 24:
  1437. cipher_len = regk_crypto_key_192;
  1438. break;
  1439. case 32:
  1440. cipher_len = regk_crypto_key_256;
  1441. break;
  1442. default:
  1443. pr_err("%s: Invalid key length %d!\n",
  1444. MODULE_NAME, ctx->key_length);
  1445. return -EINVAL;
  1446. }
  1447. switch (ctx->crypto_type) {
  1448. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  1449. oper = regk_crypto_aes_ecb;
  1450. cipher_decr = req_ctx->decrypt;
  1451. break;
  1452. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  1453. oper = regk_crypto_aes_cbc;
  1454. cipher_decr = req_ctx->decrypt;
  1455. break;
  1456. case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
  1457. oper = regk_crypto_aes_ctr;
  1458. cipher_decr = false;
  1459. break;
  1460. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  1461. oper = regk_crypto_aes_xts;
  1462. cipher_decr = req_ctx->decrypt;
  1463. if (variant == ARTPEC6_CRYPTO)
  1464. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
  1465. else
  1466. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
  1467. break;
  1468. default:
  1469. pr_err("%s: Invalid cipher mode %d!\n",
  1470. MODULE_NAME, ctx->crypto_type);
  1471. return -EINVAL;
  1472. }
  1473. if (variant == ARTPEC6_CRYPTO) {
  1474. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
  1475. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1476. cipher_len);
  1477. if (cipher_decr)
  1478. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1479. } else {
  1480. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
  1481. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1482. cipher_len);
  1483. if (cipher_decr)
  1484. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1485. }
  1486. ret = artpec6_crypto_setup_out_descr(common,
  1487. &req_ctx->cipher_md,
  1488. sizeof(req_ctx->cipher_md),
  1489. false, false);
  1490. if (ret)
  1491. return ret;
  1492. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1493. if (ret)
  1494. return ret;
  1495. if (iv_len) {
  1496. ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
  1497. false, false);
  1498. if (ret)
  1499. return ret;
  1500. }
  1501. /* Data out */
  1502. artpec6_crypto_walk_init(&walk, areq->src);
  1503. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
  1504. if (ret)
  1505. return ret;
  1506. /* Data in */
  1507. artpec6_crypto_walk_init(&walk, areq->dst);
  1508. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
  1509. if (ret)
  1510. return ret;
  1511. /* CTR-mode padding required by the HW. */
  1512. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
  1513. ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
  1514. size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
  1515. areq->cryptlen;
  1516. if (pad) {
  1517. ret = artpec6_crypto_setup_out_descr(common,
  1518. ac->pad_buffer,
  1519. pad, false, false);
  1520. if (ret)
  1521. return ret;
  1522. ret = artpec6_crypto_setup_in_descr(common,
  1523. ac->pad_buffer, pad,
  1524. false);
  1525. if (ret)
  1526. return ret;
  1527. }
  1528. }
  1529. ret = artpec6_crypto_terminate_out_descrs(common);
  1530. if (ret)
  1531. return ret;
  1532. ret = artpec6_crypto_terminate_in_descrs(common);
  1533. if (ret)
  1534. return ret;
  1535. return artpec6_crypto_dma_map_descs(common);
  1536. }
  1537. static int artpec6_crypto_prepare_aead(struct aead_request *areq)
  1538. {
  1539. size_t count;
  1540. int ret;
  1541. size_t input_length;
  1542. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1543. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1544. struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
  1545. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1546. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1547. enum artpec6_crypto_variant variant = ac->variant;
  1548. u32 md_cipher_len;
  1549. artpec6_crypto_init_dma_operation(common);
  1550. /* Key */
  1551. if (variant == ARTPEC6_CRYPTO) {
  1552. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1553. a6_regk_crypto_dlkey);
  1554. } else {
  1555. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1556. a7_regk_crypto_dlkey);
  1557. }
  1558. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1559. sizeof(ctx->key_md), false, false);
  1560. if (ret)
  1561. return ret;
  1562. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1563. ctx->key_length, true, false);
  1564. if (ret)
  1565. return ret;
  1566. req_ctx->cipher_md = 0;
  1567. switch (ctx->key_length) {
  1568. case 16:
  1569. md_cipher_len = regk_crypto_key_128;
  1570. break;
  1571. case 24:
  1572. md_cipher_len = regk_crypto_key_192;
  1573. break;
  1574. case 32:
  1575. md_cipher_len = regk_crypto_key_256;
  1576. break;
  1577. default:
  1578. return -EINVAL;
  1579. }
  1580. if (variant == ARTPEC6_CRYPTO) {
  1581. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
  1582. regk_crypto_aes_gcm);
  1583. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1584. md_cipher_len);
  1585. if (req_ctx->decrypt)
  1586. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1587. } else {
  1588. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
  1589. regk_crypto_aes_gcm);
  1590. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1591. md_cipher_len);
  1592. if (req_ctx->decrypt)
  1593. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1594. }
  1595. ret = artpec6_crypto_setup_out_descr(common,
  1596. (void *) &req_ctx->cipher_md,
  1597. sizeof(req_ctx->cipher_md), false,
  1598. false);
  1599. if (ret)
  1600. return ret;
  1601. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1602. if (ret)
  1603. return ret;
  1604. /* For the decryption, cryptlen includes the tag. */
  1605. input_length = areq->cryptlen;
  1606. if (req_ctx->decrypt)
  1607. input_length -= AES_BLOCK_SIZE;
  1608. /* Prepare the context buffer */
  1609. req_ctx->hw_ctx.aad_length_bits =
  1610. __cpu_to_be64(8*areq->assoclen);
  1611. req_ctx->hw_ctx.text_length_bits =
  1612. __cpu_to_be64(8*input_length);
  1613. memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
  1614. // The HW omits the initial increment of the counter field.
  1615. memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
  1616. ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
  1617. sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
  1618. if (ret)
  1619. return ret;
  1620. {
  1621. struct artpec6_crypto_walk walk;
  1622. artpec6_crypto_walk_init(&walk, areq->src);
  1623. /* Associated data */
  1624. count = areq->assoclen;
  1625. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1626. if (ret)
  1627. return ret;
  1628. if (!IS_ALIGNED(areq->assoclen, 16)) {
  1629. size_t assoc_pad = 16 - (areq->assoclen % 16);
  1630. /* The HW mandates zero padding here */
  1631. ret = artpec6_crypto_setup_out_descr(common,
  1632. ac->zero_buffer,
  1633. assoc_pad, false,
  1634. false);
  1635. if (ret)
  1636. return ret;
  1637. }
  1638. /* Data to crypto */
  1639. count = input_length;
  1640. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1641. if (ret)
  1642. return ret;
  1643. if (!IS_ALIGNED(input_length, 16)) {
  1644. size_t crypto_pad = 16 - (input_length % 16);
  1645. /* The HW mandates zero padding here */
  1646. ret = artpec6_crypto_setup_out_descr(common,
  1647. ac->zero_buffer,
  1648. crypto_pad,
  1649. false,
  1650. false);
  1651. if (ret)
  1652. return ret;
  1653. }
  1654. }
  1655. /* Data from crypto */
  1656. {
  1657. struct artpec6_crypto_walk walk;
  1658. size_t output_len = areq->cryptlen;
  1659. if (req_ctx->decrypt)
  1660. output_len -= AES_BLOCK_SIZE;
  1661. artpec6_crypto_walk_init(&walk, areq->dst);
  1662. /* skip associated data in the output */
  1663. count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
  1664. if (count)
  1665. return -EINVAL;
  1666. count = output_len;
  1667. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
  1668. if (ret)
  1669. return ret;
  1670. /* Put padding between the cryptotext and the auth tag */
  1671. if (!IS_ALIGNED(output_len, 16)) {
  1672. size_t crypto_pad = 16 - (output_len % 16);
  1673. ret = artpec6_crypto_setup_in_descr(common,
  1674. ac->pad_buffer,
  1675. crypto_pad, false);
  1676. if (ret)
  1677. return ret;
  1678. }
  1679. /* The authentication tag shall follow immediately after
  1680. * the output ciphertext. For decryption it is put in a context
  1681. * buffer for later compare against the input tag.
  1682. */
  1683. count = AES_BLOCK_SIZE;
  1684. if (req_ctx->decrypt) {
  1685. ret = artpec6_crypto_setup_in_descr(common,
  1686. req_ctx->decryption_tag, count, false);
  1687. if (ret)
  1688. return ret;
  1689. } else {
  1690. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
  1691. count);
  1692. if (ret)
  1693. return ret;
  1694. }
  1695. }
  1696. ret = artpec6_crypto_terminate_in_descrs(common);
  1697. if (ret)
  1698. return ret;
  1699. ret = artpec6_crypto_terminate_out_descrs(common);
  1700. if (ret)
  1701. return ret;
  1702. return artpec6_crypto_dma_map_descs(common);
  1703. }
  1704. static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
  1705. {
  1706. struct artpec6_crypto_req_common *req;
  1707. while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
  1708. req = list_first_entry(&ac->queue,
  1709. struct artpec6_crypto_req_common,
  1710. list);
  1711. list_move_tail(&req->list, &ac->pending);
  1712. artpec6_crypto_start_dma(req);
  1713. req->req->complete(req->req, -EINPROGRESS);
  1714. }
  1715. /*
  1716. * In some cases, the hardware can raise an in_eop_flush interrupt
  1717. * before actually updating the status, so we have an timer which will
  1718. * recheck the status on timeout. Since the cases are expected to be
  1719. * very rare, we use a relatively large timeout value. There should be
  1720. * no noticeable negative effect if we timeout spuriously.
  1721. */
  1722. if (ac->pending_count)
  1723. mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
  1724. else
  1725. del_timer(&ac->timer);
  1726. }
  1727. static void artpec6_crypto_timeout(struct timer_list *t)
  1728. {
  1729. struct artpec6_crypto *ac = from_timer(ac, t, timer);
  1730. dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
  1731. tasklet_schedule(&ac->task);
  1732. }
  1733. static void artpec6_crypto_task(unsigned long data)
  1734. {
  1735. struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
  1736. struct artpec6_crypto_req_common *req;
  1737. struct artpec6_crypto_req_common *n;
  1738. if (list_empty(&ac->pending)) {
  1739. pr_debug("Spurious IRQ\n");
  1740. return;
  1741. }
  1742. spin_lock_bh(&ac->queue_lock);
  1743. list_for_each_entry_safe(req, n, &ac->pending, list) {
  1744. struct artpec6_crypto_dma_descriptors *dma = req->dma;
  1745. u32 stat;
  1746. dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
  1747. sizeof(dma->stat[0]),
  1748. DMA_BIDIRECTIONAL);
  1749. stat = req->dma->stat[req->dma->in_cnt-1];
  1750. /* A non-zero final status descriptor indicates
  1751. * this job has finished.
  1752. */
  1753. pr_debug("Request %p status is %X\n", req, stat);
  1754. if (!stat)
  1755. break;
  1756. /* Allow testing of timeout handling with fault injection */
  1757. #ifdef CONFIG_FAULT_INJECTION
  1758. if (should_fail(&artpec6_crypto_fail_status_read, 1))
  1759. continue;
  1760. #endif
  1761. pr_debug("Completing request %p\n", req);
  1762. list_del(&req->list);
  1763. artpec6_crypto_dma_unmap_all(req);
  1764. artpec6_crypto_copy_bounce_buffers(req);
  1765. ac->pending_count--;
  1766. artpec6_crypto_common_destroy(req);
  1767. req->complete(req->req);
  1768. }
  1769. artpec6_crypto_process_queue(ac);
  1770. spin_unlock_bh(&ac->queue_lock);
  1771. }
  1772. static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
  1773. {
  1774. req->complete(req, 0);
  1775. }
  1776. static void
  1777. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
  1778. {
  1779. struct skcipher_request *cipher_req = container_of(req,
  1780. struct skcipher_request, base);
  1781. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
  1782. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1783. AES_BLOCK_SIZE, 0);
  1784. req->complete(req, 0);
  1785. }
  1786. static void
  1787. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
  1788. {
  1789. struct skcipher_request *cipher_req = container_of(req,
  1790. struct skcipher_request, base);
  1791. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
  1792. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1793. AES_BLOCK_SIZE, 0);
  1794. req->complete(req, 0);
  1795. }
  1796. static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
  1797. {
  1798. int result = 0;
  1799. /* Verify GCM hashtag. */
  1800. struct aead_request *areq = container_of(req,
  1801. struct aead_request, base);
  1802. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1803. if (req_ctx->decrypt) {
  1804. u8 input_tag[AES_BLOCK_SIZE];
  1805. sg_pcopy_to_buffer(areq->src,
  1806. sg_nents(areq->src),
  1807. input_tag,
  1808. AES_BLOCK_SIZE,
  1809. areq->assoclen + areq->cryptlen -
  1810. AES_BLOCK_SIZE);
  1811. if (memcmp(req_ctx->decryption_tag,
  1812. input_tag,
  1813. AES_BLOCK_SIZE)) {
  1814. pr_debug("***EBADMSG:\n");
  1815. print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
  1816. input_tag, AES_BLOCK_SIZE, true);
  1817. print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
  1818. req_ctx->decryption_tag,
  1819. AES_BLOCK_SIZE, true);
  1820. result = -EBADMSG;
  1821. }
  1822. }
  1823. req->complete(req, result);
  1824. }
  1825. static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
  1826. {
  1827. req->complete(req, 0);
  1828. }
  1829. /*------------------- Hash functions -----------------------------------------*/
  1830. static int
  1831. artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
  1832. const u8 *key, unsigned int keylen)
  1833. {
  1834. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
  1835. size_t blocksize;
  1836. int ret;
  1837. if (!keylen) {
  1838. pr_err("Invalid length (%d) of HMAC key\n",
  1839. keylen);
  1840. return -EINVAL;
  1841. }
  1842. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  1843. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1844. if (keylen > blocksize) {
  1845. SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
  1846. hdesc->tfm = tfm_ctx->child_hash;
  1847. hdesc->flags = crypto_ahash_get_flags(tfm) &
  1848. CRYPTO_TFM_REQ_MAY_SLEEP;
  1849. tfm_ctx->hmac_key_length = blocksize;
  1850. ret = crypto_shash_digest(hdesc, key, keylen,
  1851. tfm_ctx->hmac_key);
  1852. if (ret)
  1853. return ret;
  1854. } else {
  1855. memcpy(tfm_ctx->hmac_key, key, keylen);
  1856. tfm_ctx->hmac_key_length = keylen;
  1857. }
  1858. return 0;
  1859. }
  1860. static int
  1861. artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
  1862. {
  1863. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1864. enum artpec6_crypto_variant variant = ac->variant;
  1865. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1866. u32 oper;
  1867. memset(req_ctx, 0, sizeof(*req_ctx));
  1868. req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
  1869. if (hmac)
  1870. req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
  1871. switch (type) {
  1872. case ARTPEC6_CRYPTO_HASH_SHA1:
  1873. oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
  1874. break;
  1875. case ARTPEC6_CRYPTO_HASH_SHA256:
  1876. oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
  1877. break;
  1878. case ARTPEC6_CRYPTO_HASH_SHA384:
  1879. oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
  1880. break;
  1881. case ARTPEC6_CRYPTO_HASH_SHA512:
  1882. oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
  1883. break;
  1884. default:
  1885. pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
  1886. return -EINVAL;
  1887. }
  1888. if (variant == ARTPEC6_CRYPTO)
  1889. req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
  1890. else
  1891. req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
  1892. return 0;
  1893. }
  1894. static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
  1895. {
  1896. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1897. int ret;
  1898. if (!req_ctx->common.dma) {
  1899. ret = artpec6_crypto_common_init(&req_ctx->common,
  1900. &req->base,
  1901. artpec6_crypto_complete_hash,
  1902. NULL, 0);
  1903. if (ret)
  1904. return ret;
  1905. }
  1906. ret = artpec6_crypto_prepare_hash(req);
  1907. switch (ret) {
  1908. case ARTPEC6_CRYPTO_PREPARE_HASH_START:
  1909. ret = artpec6_crypto_submit(&req_ctx->common);
  1910. break;
  1911. case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
  1912. ret = 0;
  1913. /* Fallthrough */
  1914. default:
  1915. artpec6_crypto_common_destroy(&req_ctx->common);
  1916. break;
  1917. }
  1918. return ret;
  1919. }
  1920. static int artpec6_crypto_hash_final(struct ahash_request *req)
  1921. {
  1922. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1923. req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
  1924. return artpec6_crypto_prepare_submit_hash(req);
  1925. }
  1926. static int artpec6_crypto_hash_update(struct ahash_request *req)
  1927. {
  1928. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1929. req_ctx->hash_flags |= HASH_FLAG_UPDATE;
  1930. return artpec6_crypto_prepare_submit_hash(req);
  1931. }
  1932. static int artpec6_crypto_sha1_init(struct ahash_request *req)
  1933. {
  1934. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1935. }
  1936. static int artpec6_crypto_sha1_digest(struct ahash_request *req)
  1937. {
  1938. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1939. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1940. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1941. return artpec6_crypto_prepare_submit_hash(req);
  1942. }
  1943. static int artpec6_crypto_sha256_init(struct ahash_request *req)
  1944. {
  1945. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1946. }
  1947. static int artpec6_crypto_sha256_digest(struct ahash_request *req)
  1948. {
  1949. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1950. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1951. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1952. return artpec6_crypto_prepare_submit_hash(req);
  1953. }
  1954. static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
  1955. {
  1956. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
  1957. }
  1958. static int __maybe_unused
  1959. artpec6_crypto_sha384_digest(struct ahash_request *req)
  1960. {
  1961. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1962. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
  1963. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1964. return artpec6_crypto_prepare_submit_hash(req);
  1965. }
  1966. static int artpec6_crypto_sha512_init(struct ahash_request *req)
  1967. {
  1968. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
  1969. }
  1970. static int artpec6_crypto_sha512_digest(struct ahash_request *req)
  1971. {
  1972. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1973. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
  1974. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1975. return artpec6_crypto_prepare_submit_hash(req);
  1976. }
  1977. static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
  1978. {
  1979. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1980. }
  1981. static int __maybe_unused
  1982. artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
  1983. {
  1984. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
  1985. }
  1986. static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
  1987. {
  1988. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
  1989. }
  1990. static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
  1991. {
  1992. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1993. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1994. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1995. return artpec6_crypto_prepare_submit_hash(req);
  1996. }
  1997. static int __maybe_unused
  1998. artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
  1999. {
  2000. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2001. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
  2002. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2003. return artpec6_crypto_prepare_submit_hash(req);
  2004. }
  2005. static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
  2006. {
  2007. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2008. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
  2009. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2010. return artpec6_crypto_prepare_submit_hash(req);
  2011. }
  2012. static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
  2013. const char *base_hash_name)
  2014. {
  2015. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  2016. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  2017. sizeof(struct artpec6_hash_request_context));
  2018. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  2019. if (base_hash_name) {
  2020. struct crypto_shash *child;
  2021. child = crypto_alloc_shash(base_hash_name, 0,
  2022. CRYPTO_ALG_NEED_FALLBACK);
  2023. if (IS_ERR(child))
  2024. return PTR_ERR(child);
  2025. tfm_ctx->child_hash = child;
  2026. }
  2027. return 0;
  2028. }
  2029. static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
  2030. {
  2031. return artpec6_crypto_ahash_init_common(tfm, NULL);
  2032. }
  2033. static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
  2034. {
  2035. return artpec6_crypto_ahash_init_common(tfm, "sha256");
  2036. }
  2037. static int __maybe_unused
  2038. artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
  2039. {
  2040. return artpec6_crypto_ahash_init_common(tfm, "sha384");
  2041. }
  2042. static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
  2043. {
  2044. return artpec6_crypto_ahash_init_common(tfm, "sha512");
  2045. }
  2046. static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
  2047. {
  2048. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  2049. if (tfm_ctx->child_hash)
  2050. crypto_free_shash(tfm_ctx->child_hash);
  2051. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  2052. tfm_ctx->hmac_key_length = 0;
  2053. }
  2054. static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
  2055. {
  2056. const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2057. struct artpec6_hash_export_state *state = out;
  2058. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2059. enum artpec6_crypto_variant variant = ac->variant;
  2060. BUILD_BUG_ON(sizeof(state->partial_buffer) !=
  2061. sizeof(ctx->partial_buffer));
  2062. BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
  2063. state->digcnt = ctx->digcnt;
  2064. state->partial_bytes = ctx->partial_bytes;
  2065. state->hash_flags = ctx->hash_flags;
  2066. if (variant == ARTPEC6_CRYPTO)
  2067. state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
  2068. else
  2069. state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
  2070. memcpy(state->partial_buffer, ctx->partial_buffer,
  2071. sizeof(state->partial_buffer));
  2072. memcpy(state->digeststate, ctx->digeststate,
  2073. sizeof(state->digeststate));
  2074. return 0;
  2075. }
  2076. static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
  2077. {
  2078. struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2079. const struct artpec6_hash_export_state *state = in;
  2080. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2081. enum artpec6_crypto_variant variant = ac->variant;
  2082. memset(ctx, 0, sizeof(*ctx));
  2083. ctx->digcnt = state->digcnt;
  2084. ctx->partial_bytes = state->partial_bytes;
  2085. ctx->hash_flags = state->hash_flags;
  2086. if (variant == ARTPEC6_CRYPTO)
  2087. ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
  2088. else
  2089. ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
  2090. memcpy(ctx->partial_buffer, state->partial_buffer,
  2091. sizeof(state->partial_buffer));
  2092. memcpy(ctx->digeststate, state->digeststate,
  2093. sizeof(state->digeststate));
  2094. return 0;
  2095. }
  2096. static int init_crypto_hw(struct artpec6_crypto *ac)
  2097. {
  2098. enum artpec6_crypto_variant variant = ac->variant;
  2099. void __iomem *base = ac->base;
  2100. u32 out_descr_buf_size;
  2101. u32 out_data_buf_size;
  2102. u32 in_data_buf_size;
  2103. u32 in_descr_buf_size;
  2104. u32 in_stat_buf_size;
  2105. u32 in, out;
  2106. /*
  2107. * The PDMA unit contains 1984 bytes of internal memory for the OUT
  2108. * channels and 1024 bytes for the IN channel. This is an elastic
  2109. * memory used to internally store the descriptors and data. The values
  2110. * ares specified in 64 byte incremements. Trustzone buffers are not
  2111. * used at this stage.
  2112. */
  2113. out_data_buf_size = 16; /* 1024 bytes for data */
  2114. out_descr_buf_size = 15; /* 960 bytes for descriptors */
  2115. in_data_buf_size = 8; /* 512 bytes for data */
  2116. in_descr_buf_size = 4; /* 256 bytes for descriptors */
  2117. in_stat_buf_size = 4; /* 256 bytes for stat descrs */
  2118. BUILD_BUG_ON_MSG((out_data_buf_size
  2119. + out_descr_buf_size) * 64 > 1984,
  2120. "Invalid OUT configuration");
  2121. BUILD_BUG_ON_MSG((in_data_buf_size
  2122. + in_descr_buf_size
  2123. + in_stat_buf_size) * 64 > 1024,
  2124. "Invalid IN configuration");
  2125. in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
  2126. FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
  2127. FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
  2128. out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
  2129. FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
  2130. writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
  2131. writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
  2132. if (variant == ARTPEC6_CRYPTO) {
  2133. writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
  2134. writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
  2135. writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
  2136. A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2137. base + A6_PDMA_INTR_MASK);
  2138. } else {
  2139. writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
  2140. writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
  2141. writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
  2142. A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2143. base + A7_PDMA_INTR_MASK);
  2144. }
  2145. return 0;
  2146. }
  2147. static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
  2148. {
  2149. enum artpec6_crypto_variant variant = ac->variant;
  2150. void __iomem *base = ac->base;
  2151. if (variant == ARTPEC6_CRYPTO) {
  2152. writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
  2153. writel_relaxed(0, base + A6_PDMA_IN_CFG);
  2154. writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2155. } else {
  2156. writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
  2157. writel_relaxed(0, base + A7_PDMA_IN_CFG);
  2158. writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2159. }
  2160. writel_relaxed(0, base + PDMA_OUT_CFG);
  2161. }
  2162. static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
  2163. {
  2164. struct artpec6_crypto *ac = dev_id;
  2165. enum artpec6_crypto_variant variant = ac->variant;
  2166. void __iomem *base = ac->base;
  2167. u32 mask_in_data, mask_in_eop_flush;
  2168. u32 in_cmd_flush_stat, in_cmd_reg;
  2169. u32 ack_intr_reg;
  2170. u32 ack = 0;
  2171. u32 intr;
  2172. if (variant == ARTPEC6_CRYPTO) {
  2173. intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
  2174. mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
  2175. mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2176. in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
  2177. in_cmd_reg = A6_PDMA_IN_CMD;
  2178. ack_intr_reg = A6_PDMA_ACK_INTR;
  2179. } else {
  2180. intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
  2181. mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
  2182. mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2183. in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
  2184. in_cmd_reg = A7_PDMA_IN_CMD;
  2185. ack_intr_reg = A7_PDMA_ACK_INTR;
  2186. }
  2187. /* We get two interrupt notifications from each job.
  2188. * The in_data means all data was sent to memory and then
  2189. * we request a status flush command to write the per-job
  2190. * status to its status vector. This ensures that the
  2191. * tasklet can detect exactly how many submitted jobs
  2192. * that have finished.
  2193. */
  2194. if (intr & mask_in_data)
  2195. ack |= mask_in_data;
  2196. if (intr & mask_in_eop_flush)
  2197. ack |= mask_in_eop_flush;
  2198. else
  2199. writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
  2200. writel_relaxed(ack, base + ack_intr_reg);
  2201. if (intr & mask_in_eop_flush)
  2202. tasklet_schedule(&ac->task);
  2203. return IRQ_HANDLED;
  2204. }
  2205. /*------------------- Algorithm definitions ----------------------------------*/
  2206. /* Hashes */
  2207. static struct ahash_alg hash_algos[] = {
  2208. /* SHA-1 */
  2209. {
  2210. .init = artpec6_crypto_sha1_init,
  2211. .update = artpec6_crypto_hash_update,
  2212. .final = artpec6_crypto_hash_final,
  2213. .digest = artpec6_crypto_sha1_digest,
  2214. .import = artpec6_crypto_hash_import,
  2215. .export = artpec6_crypto_hash_export,
  2216. .halg.digestsize = SHA1_DIGEST_SIZE,
  2217. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2218. .halg.base = {
  2219. .cra_name = "sha1",
  2220. .cra_driver_name = "artpec-sha1",
  2221. .cra_priority = 300,
  2222. .cra_flags = CRYPTO_ALG_ASYNC,
  2223. .cra_blocksize = SHA1_BLOCK_SIZE,
  2224. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2225. .cra_alignmask = 3,
  2226. .cra_module = THIS_MODULE,
  2227. .cra_init = artpec6_crypto_ahash_init,
  2228. .cra_exit = artpec6_crypto_ahash_exit,
  2229. }
  2230. },
  2231. /* SHA-256 */
  2232. {
  2233. .init = artpec6_crypto_sha256_init,
  2234. .update = artpec6_crypto_hash_update,
  2235. .final = artpec6_crypto_hash_final,
  2236. .digest = artpec6_crypto_sha256_digest,
  2237. .import = artpec6_crypto_hash_import,
  2238. .export = artpec6_crypto_hash_export,
  2239. .halg.digestsize = SHA256_DIGEST_SIZE,
  2240. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2241. .halg.base = {
  2242. .cra_name = "sha256",
  2243. .cra_driver_name = "artpec-sha256",
  2244. .cra_priority = 300,
  2245. .cra_flags = CRYPTO_ALG_ASYNC,
  2246. .cra_blocksize = SHA256_BLOCK_SIZE,
  2247. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2248. .cra_alignmask = 3,
  2249. .cra_module = THIS_MODULE,
  2250. .cra_init = artpec6_crypto_ahash_init,
  2251. .cra_exit = artpec6_crypto_ahash_exit,
  2252. }
  2253. },
  2254. /* HMAC SHA-256 */
  2255. {
  2256. .init = artpec6_crypto_hmac_sha256_init,
  2257. .update = artpec6_crypto_hash_update,
  2258. .final = artpec6_crypto_hash_final,
  2259. .digest = artpec6_crypto_hmac_sha256_digest,
  2260. .import = artpec6_crypto_hash_import,
  2261. .export = artpec6_crypto_hash_export,
  2262. .setkey = artpec6_crypto_hash_set_key,
  2263. .halg.digestsize = SHA256_DIGEST_SIZE,
  2264. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2265. .halg.base = {
  2266. .cra_name = "hmac(sha256)",
  2267. .cra_driver_name = "artpec-hmac-sha256",
  2268. .cra_priority = 300,
  2269. .cra_flags = CRYPTO_ALG_ASYNC,
  2270. .cra_blocksize = SHA256_BLOCK_SIZE,
  2271. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2272. .cra_alignmask = 3,
  2273. .cra_module = THIS_MODULE,
  2274. .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
  2275. .cra_exit = artpec6_crypto_ahash_exit,
  2276. }
  2277. },
  2278. };
  2279. static struct ahash_alg artpec7_hash_algos[] = {
  2280. /* SHA-384 */
  2281. {
  2282. .init = artpec6_crypto_sha384_init,
  2283. .update = artpec6_crypto_hash_update,
  2284. .final = artpec6_crypto_hash_final,
  2285. .digest = artpec6_crypto_sha384_digest,
  2286. .import = artpec6_crypto_hash_import,
  2287. .export = artpec6_crypto_hash_export,
  2288. .halg.digestsize = SHA384_DIGEST_SIZE,
  2289. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2290. .halg.base = {
  2291. .cra_name = "sha384",
  2292. .cra_driver_name = "artpec-sha384",
  2293. .cra_priority = 300,
  2294. .cra_flags = CRYPTO_ALG_ASYNC,
  2295. .cra_blocksize = SHA384_BLOCK_SIZE,
  2296. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2297. .cra_alignmask = 3,
  2298. .cra_module = THIS_MODULE,
  2299. .cra_init = artpec6_crypto_ahash_init,
  2300. .cra_exit = artpec6_crypto_ahash_exit,
  2301. }
  2302. },
  2303. /* HMAC SHA-384 */
  2304. {
  2305. .init = artpec6_crypto_hmac_sha384_init,
  2306. .update = artpec6_crypto_hash_update,
  2307. .final = artpec6_crypto_hash_final,
  2308. .digest = artpec6_crypto_hmac_sha384_digest,
  2309. .import = artpec6_crypto_hash_import,
  2310. .export = artpec6_crypto_hash_export,
  2311. .setkey = artpec6_crypto_hash_set_key,
  2312. .halg.digestsize = SHA384_DIGEST_SIZE,
  2313. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2314. .halg.base = {
  2315. .cra_name = "hmac(sha384)",
  2316. .cra_driver_name = "artpec-hmac-sha384",
  2317. .cra_priority = 300,
  2318. .cra_flags = CRYPTO_ALG_ASYNC,
  2319. .cra_blocksize = SHA384_BLOCK_SIZE,
  2320. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2321. .cra_alignmask = 3,
  2322. .cra_module = THIS_MODULE,
  2323. .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
  2324. .cra_exit = artpec6_crypto_ahash_exit,
  2325. }
  2326. },
  2327. /* SHA-512 */
  2328. {
  2329. .init = artpec6_crypto_sha512_init,
  2330. .update = artpec6_crypto_hash_update,
  2331. .final = artpec6_crypto_hash_final,
  2332. .digest = artpec6_crypto_sha512_digest,
  2333. .import = artpec6_crypto_hash_import,
  2334. .export = artpec6_crypto_hash_export,
  2335. .halg.digestsize = SHA512_DIGEST_SIZE,
  2336. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2337. .halg.base = {
  2338. .cra_name = "sha512",
  2339. .cra_driver_name = "artpec-sha512",
  2340. .cra_priority = 300,
  2341. .cra_flags = CRYPTO_ALG_ASYNC,
  2342. .cra_blocksize = SHA512_BLOCK_SIZE,
  2343. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2344. .cra_alignmask = 3,
  2345. .cra_module = THIS_MODULE,
  2346. .cra_init = artpec6_crypto_ahash_init,
  2347. .cra_exit = artpec6_crypto_ahash_exit,
  2348. }
  2349. },
  2350. /* HMAC SHA-512 */
  2351. {
  2352. .init = artpec6_crypto_hmac_sha512_init,
  2353. .update = artpec6_crypto_hash_update,
  2354. .final = artpec6_crypto_hash_final,
  2355. .digest = artpec6_crypto_hmac_sha512_digest,
  2356. .import = artpec6_crypto_hash_import,
  2357. .export = artpec6_crypto_hash_export,
  2358. .setkey = artpec6_crypto_hash_set_key,
  2359. .halg.digestsize = SHA512_DIGEST_SIZE,
  2360. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2361. .halg.base = {
  2362. .cra_name = "hmac(sha512)",
  2363. .cra_driver_name = "artpec-hmac-sha512",
  2364. .cra_priority = 300,
  2365. .cra_flags = CRYPTO_ALG_ASYNC,
  2366. .cra_blocksize = SHA512_BLOCK_SIZE,
  2367. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2368. .cra_alignmask = 3,
  2369. .cra_module = THIS_MODULE,
  2370. .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
  2371. .cra_exit = artpec6_crypto_ahash_exit,
  2372. }
  2373. },
  2374. };
  2375. /* Crypto */
  2376. static struct skcipher_alg crypto_algos[] = {
  2377. /* AES - ECB */
  2378. {
  2379. .base = {
  2380. .cra_name = "ecb(aes)",
  2381. .cra_driver_name = "artpec6-ecb-aes",
  2382. .cra_priority = 300,
  2383. .cra_flags = CRYPTO_ALG_ASYNC,
  2384. .cra_blocksize = AES_BLOCK_SIZE,
  2385. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2386. .cra_alignmask = 3,
  2387. .cra_module = THIS_MODULE,
  2388. },
  2389. .min_keysize = AES_MIN_KEY_SIZE,
  2390. .max_keysize = AES_MAX_KEY_SIZE,
  2391. .setkey = artpec6_crypto_cipher_set_key,
  2392. .encrypt = artpec6_crypto_encrypt,
  2393. .decrypt = artpec6_crypto_decrypt,
  2394. .init = artpec6_crypto_aes_ecb_init,
  2395. .exit = artpec6_crypto_aes_exit,
  2396. },
  2397. /* AES - CTR */
  2398. {
  2399. .base = {
  2400. .cra_name = "ctr(aes)",
  2401. .cra_driver_name = "artpec6-ctr-aes",
  2402. .cra_priority = 300,
  2403. .cra_flags = CRYPTO_ALG_ASYNC |
  2404. CRYPTO_ALG_NEED_FALLBACK,
  2405. .cra_blocksize = 1,
  2406. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2407. .cra_alignmask = 3,
  2408. .cra_module = THIS_MODULE,
  2409. },
  2410. .min_keysize = AES_MIN_KEY_SIZE,
  2411. .max_keysize = AES_MAX_KEY_SIZE,
  2412. .ivsize = AES_BLOCK_SIZE,
  2413. .setkey = artpec6_crypto_cipher_set_key,
  2414. .encrypt = artpec6_crypto_ctr_encrypt,
  2415. .decrypt = artpec6_crypto_ctr_decrypt,
  2416. .init = artpec6_crypto_aes_ctr_init,
  2417. .exit = artpec6_crypto_aes_ctr_exit,
  2418. },
  2419. /* AES - CBC */
  2420. {
  2421. .base = {
  2422. .cra_name = "cbc(aes)",
  2423. .cra_driver_name = "artpec6-cbc-aes",
  2424. .cra_priority = 300,
  2425. .cra_flags = CRYPTO_ALG_ASYNC,
  2426. .cra_blocksize = AES_BLOCK_SIZE,
  2427. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2428. .cra_alignmask = 3,
  2429. .cra_module = THIS_MODULE,
  2430. },
  2431. .min_keysize = AES_MIN_KEY_SIZE,
  2432. .max_keysize = AES_MAX_KEY_SIZE,
  2433. .ivsize = AES_BLOCK_SIZE,
  2434. .setkey = artpec6_crypto_cipher_set_key,
  2435. .encrypt = artpec6_crypto_encrypt,
  2436. .decrypt = artpec6_crypto_decrypt,
  2437. .init = artpec6_crypto_aes_cbc_init,
  2438. .exit = artpec6_crypto_aes_exit
  2439. },
  2440. /* AES - XTS */
  2441. {
  2442. .base = {
  2443. .cra_name = "xts(aes)",
  2444. .cra_driver_name = "artpec6-xts-aes",
  2445. .cra_priority = 300,
  2446. .cra_flags = CRYPTO_ALG_ASYNC,
  2447. .cra_blocksize = 1,
  2448. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2449. .cra_alignmask = 3,
  2450. .cra_module = THIS_MODULE,
  2451. },
  2452. .min_keysize = 2*AES_MIN_KEY_SIZE,
  2453. .max_keysize = 2*AES_MAX_KEY_SIZE,
  2454. .ivsize = 16,
  2455. .setkey = artpec6_crypto_xts_set_key,
  2456. .encrypt = artpec6_crypto_encrypt,
  2457. .decrypt = artpec6_crypto_decrypt,
  2458. .init = artpec6_crypto_aes_xts_init,
  2459. .exit = artpec6_crypto_aes_exit,
  2460. },
  2461. };
  2462. static struct aead_alg aead_algos[] = {
  2463. {
  2464. .init = artpec6_crypto_aead_init,
  2465. .setkey = artpec6_crypto_aead_set_key,
  2466. .encrypt = artpec6_crypto_aead_encrypt,
  2467. .decrypt = artpec6_crypto_aead_decrypt,
  2468. .ivsize = GCM_AES_IV_SIZE,
  2469. .maxauthsize = AES_BLOCK_SIZE,
  2470. .base = {
  2471. .cra_name = "gcm(aes)",
  2472. .cra_driver_name = "artpec-gcm-aes",
  2473. .cra_priority = 300,
  2474. .cra_flags = CRYPTO_ALG_ASYNC |
  2475. CRYPTO_ALG_KERN_DRIVER_ONLY,
  2476. .cra_blocksize = 1,
  2477. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2478. .cra_alignmask = 3,
  2479. .cra_module = THIS_MODULE,
  2480. },
  2481. }
  2482. };
  2483. #ifdef CONFIG_DEBUG_FS
  2484. struct dbgfs_u32 {
  2485. char *name;
  2486. mode_t mode;
  2487. u32 *flag;
  2488. char *desc;
  2489. };
  2490. static struct dentry *dbgfs_root;
  2491. static void artpec6_crypto_init_debugfs(void)
  2492. {
  2493. dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
  2494. if (!dbgfs_root || IS_ERR(dbgfs_root)) {
  2495. dbgfs_root = NULL;
  2496. pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
  2497. return;
  2498. }
  2499. #ifdef CONFIG_FAULT_INJECTION
  2500. fault_create_debugfs_attr("fail_status_read", dbgfs_root,
  2501. &artpec6_crypto_fail_status_read);
  2502. fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
  2503. &artpec6_crypto_fail_dma_array_full);
  2504. #endif
  2505. }
  2506. static void artpec6_crypto_free_debugfs(void)
  2507. {
  2508. if (!dbgfs_root)
  2509. return;
  2510. debugfs_remove_recursive(dbgfs_root);
  2511. dbgfs_root = NULL;
  2512. }
  2513. #endif
  2514. static const struct of_device_id artpec6_crypto_of_match[] = {
  2515. { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
  2516. { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
  2517. {}
  2518. };
  2519. MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
  2520. static int artpec6_crypto_probe(struct platform_device *pdev)
  2521. {
  2522. const struct of_device_id *match;
  2523. enum artpec6_crypto_variant variant;
  2524. struct artpec6_crypto *ac;
  2525. struct device *dev = &pdev->dev;
  2526. void __iomem *base;
  2527. struct resource *res;
  2528. int irq;
  2529. int err;
  2530. if (artpec6_crypto_dev)
  2531. return -ENODEV;
  2532. match = of_match_node(artpec6_crypto_of_match, dev->of_node);
  2533. if (!match)
  2534. return -EINVAL;
  2535. variant = (enum artpec6_crypto_variant)match->data;
  2536. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2537. base = devm_ioremap_resource(&pdev->dev, res);
  2538. if (IS_ERR(base))
  2539. return PTR_ERR(base);
  2540. irq = platform_get_irq(pdev, 0);
  2541. if (irq < 0)
  2542. return -ENODEV;
  2543. ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
  2544. GFP_KERNEL);
  2545. if (!ac)
  2546. return -ENOMEM;
  2547. platform_set_drvdata(pdev, ac);
  2548. ac->variant = variant;
  2549. spin_lock_init(&ac->queue_lock);
  2550. INIT_LIST_HEAD(&ac->queue);
  2551. INIT_LIST_HEAD(&ac->pending);
  2552. timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
  2553. ac->base = base;
  2554. ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
  2555. sizeof(struct artpec6_crypto_dma_descriptors),
  2556. 64,
  2557. 0,
  2558. NULL);
  2559. if (!ac->dma_cache)
  2560. return -ENOMEM;
  2561. #ifdef CONFIG_DEBUG_FS
  2562. artpec6_crypto_init_debugfs();
  2563. #endif
  2564. tasklet_init(&ac->task, artpec6_crypto_task,
  2565. (unsigned long)ac);
  2566. ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2567. GFP_KERNEL);
  2568. if (!ac->pad_buffer)
  2569. return -ENOMEM;
  2570. ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
  2571. ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2572. GFP_KERNEL);
  2573. if (!ac->zero_buffer)
  2574. return -ENOMEM;
  2575. ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
  2576. err = init_crypto_hw(ac);
  2577. if (err)
  2578. goto free_cache;
  2579. err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
  2580. "artpec6-crypto", ac);
  2581. if (err)
  2582. goto disable_hw;
  2583. artpec6_crypto_dev = &pdev->dev;
  2584. err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2585. if (err) {
  2586. dev_err(dev, "Failed to register ahashes\n");
  2587. goto disable_hw;
  2588. }
  2589. if (variant != ARTPEC6_CRYPTO) {
  2590. err = crypto_register_ahashes(artpec7_hash_algos,
  2591. ARRAY_SIZE(artpec7_hash_algos));
  2592. if (err) {
  2593. dev_err(dev, "Failed to register ahashes\n");
  2594. goto unregister_ahashes;
  2595. }
  2596. }
  2597. err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2598. if (err) {
  2599. dev_err(dev, "Failed to register ciphers\n");
  2600. goto unregister_a7_ahashes;
  2601. }
  2602. err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2603. if (err) {
  2604. dev_err(dev, "Failed to register aeads\n");
  2605. goto unregister_algs;
  2606. }
  2607. return 0;
  2608. unregister_algs:
  2609. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2610. unregister_a7_ahashes:
  2611. if (variant != ARTPEC6_CRYPTO)
  2612. crypto_unregister_ahashes(artpec7_hash_algos,
  2613. ARRAY_SIZE(artpec7_hash_algos));
  2614. unregister_ahashes:
  2615. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2616. disable_hw:
  2617. artpec6_crypto_disable_hw(ac);
  2618. free_cache:
  2619. kmem_cache_destroy(ac->dma_cache);
  2620. return err;
  2621. }
  2622. static int artpec6_crypto_remove(struct platform_device *pdev)
  2623. {
  2624. struct artpec6_crypto *ac = platform_get_drvdata(pdev);
  2625. int irq = platform_get_irq(pdev, 0);
  2626. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2627. if (ac->variant != ARTPEC6_CRYPTO)
  2628. crypto_unregister_ahashes(artpec7_hash_algos,
  2629. ARRAY_SIZE(artpec7_hash_algos));
  2630. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2631. crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2632. tasklet_disable(&ac->task);
  2633. devm_free_irq(&pdev->dev, irq, ac);
  2634. tasklet_kill(&ac->task);
  2635. del_timer_sync(&ac->timer);
  2636. artpec6_crypto_disable_hw(ac);
  2637. kmem_cache_destroy(ac->dma_cache);
  2638. #ifdef CONFIG_DEBUG_FS
  2639. artpec6_crypto_free_debugfs();
  2640. #endif
  2641. return 0;
  2642. }
  2643. static struct platform_driver artpec6_crypto_driver = {
  2644. .probe = artpec6_crypto_probe,
  2645. .remove = artpec6_crypto_remove,
  2646. .driver = {
  2647. .name = "artpec6-crypto",
  2648. .owner = THIS_MODULE,
  2649. .of_match_table = artpec6_crypto_of_match,
  2650. },
  2651. };
  2652. module_platform_driver(artpec6_crypto_driver);
  2653. MODULE_AUTHOR("Axis Communications AB");
  2654. MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
  2655. MODULE_LICENSE("GPL");