talitos.c 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537
  1. /*
  2. * talitos - Freescale Integrated Security Engine (SEC) device driver
  3. *
  4. * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Scatterlist Crypto API glue code copied from files with the following:
  7. * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
  8. *
  9. * Crypto algorithm registration code copied from hifn driver:
  10. * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  11. * All rights reserved.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/mod_devicetable.h>
  30. #include <linux/device.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/crypto.h>
  33. #include <linux/hw_random.h>
  34. #include <linux/of_address.h>
  35. #include <linux/of_irq.h>
  36. #include <linux/of_platform.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/io.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/rtnetlink.h>
  41. #include <linux/slab.h>
  42. #include <crypto/algapi.h>
  43. #include <crypto/aes.h>
  44. #include <crypto/des.h>
  45. #include <crypto/sha.h>
  46. #include <crypto/md5.h>
  47. #include <crypto/internal/aead.h>
  48. #include <crypto/authenc.h>
  49. #include <crypto/skcipher.h>
  50. #include <crypto/hash.h>
  51. #include <crypto/internal/hash.h>
  52. #include <crypto/scatterwalk.h>
  53. #include "talitos.h"
  54. static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
  55. unsigned int len, bool is_sec1)
  56. {
  57. ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
  58. if (is_sec1) {
  59. ptr->len1 = cpu_to_be16(len);
  60. } else {
  61. ptr->len = cpu_to_be16(len);
  62. ptr->eptr = upper_32_bits(dma_addr);
  63. }
  64. }
  65. static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
  66. struct talitos_ptr *src_ptr, bool is_sec1)
  67. {
  68. dst_ptr->ptr = src_ptr->ptr;
  69. if (is_sec1) {
  70. dst_ptr->len1 = src_ptr->len1;
  71. } else {
  72. dst_ptr->len = src_ptr->len;
  73. dst_ptr->eptr = src_ptr->eptr;
  74. }
  75. }
  76. static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
  77. bool is_sec1)
  78. {
  79. if (is_sec1)
  80. return be16_to_cpu(ptr->len1);
  81. else
  82. return be16_to_cpu(ptr->len);
  83. }
  84. static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
  85. bool is_sec1)
  86. {
  87. if (!is_sec1)
  88. ptr->j_extent = val;
  89. }
  90. static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
  91. {
  92. if (!is_sec1)
  93. ptr->j_extent |= val;
  94. }
  95. /*
  96. * map virtual single (contiguous) pointer to h/w descriptor pointer
  97. */
  98. static void __map_single_talitos_ptr(struct device *dev,
  99. struct talitos_ptr *ptr,
  100. unsigned int len, void *data,
  101. enum dma_data_direction dir,
  102. unsigned long attrs)
  103. {
  104. dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
  105. struct talitos_private *priv = dev_get_drvdata(dev);
  106. bool is_sec1 = has_ftr_sec1(priv);
  107. to_talitos_ptr(ptr, dma_addr, len, is_sec1);
  108. }
  109. static void map_single_talitos_ptr(struct device *dev,
  110. struct talitos_ptr *ptr,
  111. unsigned int len, void *data,
  112. enum dma_data_direction dir)
  113. {
  114. __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
  115. }
  116. static void map_single_talitos_ptr_nosync(struct device *dev,
  117. struct talitos_ptr *ptr,
  118. unsigned int len, void *data,
  119. enum dma_data_direction dir)
  120. {
  121. __map_single_talitos_ptr(dev, ptr, len, data, dir,
  122. DMA_ATTR_SKIP_CPU_SYNC);
  123. }
  124. /*
  125. * unmap bus single (contiguous) h/w descriptor pointer
  126. */
  127. static void unmap_single_talitos_ptr(struct device *dev,
  128. struct talitos_ptr *ptr,
  129. enum dma_data_direction dir)
  130. {
  131. struct talitos_private *priv = dev_get_drvdata(dev);
  132. bool is_sec1 = has_ftr_sec1(priv);
  133. dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
  134. from_talitos_ptr_len(ptr, is_sec1), dir);
  135. }
  136. static int reset_channel(struct device *dev, int ch)
  137. {
  138. struct talitos_private *priv = dev_get_drvdata(dev);
  139. unsigned int timeout = TALITOS_TIMEOUT;
  140. bool is_sec1 = has_ftr_sec1(priv);
  141. if (is_sec1) {
  142. setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
  143. TALITOS1_CCCR_LO_RESET);
  144. while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
  145. TALITOS1_CCCR_LO_RESET) && --timeout)
  146. cpu_relax();
  147. } else {
  148. setbits32(priv->chan[ch].reg + TALITOS_CCCR,
  149. TALITOS2_CCCR_RESET);
  150. while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
  151. TALITOS2_CCCR_RESET) && --timeout)
  152. cpu_relax();
  153. }
  154. if (timeout == 0) {
  155. dev_err(dev, "failed to reset channel %d\n", ch);
  156. return -EIO;
  157. }
  158. /* set 36-bit addressing, done writeback enable and done IRQ enable */
  159. setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
  160. TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
  161. /* enable chaining descriptors */
  162. if (is_sec1)
  163. setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
  164. TALITOS_CCCR_LO_NE);
  165. /* and ICCR writeback, if available */
  166. if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
  167. setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
  168. TALITOS_CCCR_LO_IWSE);
  169. return 0;
  170. }
  171. static int reset_device(struct device *dev)
  172. {
  173. struct talitos_private *priv = dev_get_drvdata(dev);
  174. unsigned int timeout = TALITOS_TIMEOUT;
  175. bool is_sec1 = has_ftr_sec1(priv);
  176. u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
  177. setbits32(priv->reg + TALITOS_MCR, mcr);
  178. while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
  179. && --timeout)
  180. cpu_relax();
  181. if (priv->irq[1]) {
  182. mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
  183. setbits32(priv->reg + TALITOS_MCR, mcr);
  184. }
  185. if (timeout == 0) {
  186. dev_err(dev, "failed to reset device\n");
  187. return -EIO;
  188. }
  189. return 0;
  190. }
  191. /*
  192. * Reset and initialize the device
  193. */
  194. static int init_device(struct device *dev)
  195. {
  196. struct talitos_private *priv = dev_get_drvdata(dev);
  197. int ch, err;
  198. bool is_sec1 = has_ftr_sec1(priv);
  199. /*
  200. * Master reset
  201. * errata documentation: warning: certain SEC interrupts
  202. * are not fully cleared by writing the MCR:SWR bit,
  203. * set bit twice to completely reset
  204. */
  205. err = reset_device(dev);
  206. if (err)
  207. return err;
  208. err = reset_device(dev);
  209. if (err)
  210. return err;
  211. /* reset channels */
  212. for (ch = 0; ch < priv->num_channels; ch++) {
  213. err = reset_channel(dev, ch);
  214. if (err)
  215. return err;
  216. }
  217. /* enable channel done and error interrupts */
  218. if (is_sec1) {
  219. clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
  220. clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
  221. /* disable parity error check in DEU (erroneous? test vect.) */
  222. setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
  223. } else {
  224. setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
  225. setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
  226. }
  227. /* disable integrity check error interrupts (use writeback instead) */
  228. if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
  229. setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
  230. TALITOS_MDEUICR_LO_ICE);
  231. return 0;
  232. }
  233. /**
  234. * talitos_submit - submits a descriptor to the device for processing
  235. * @dev: the SEC device to be used
  236. * @ch: the SEC device channel to be used
  237. * @desc: the descriptor to be processed by the device
  238. * @callback: whom to call when processing is complete
  239. * @context: a handle for use by caller (optional)
  240. *
  241. * desc must contain valid dma-mapped (bus physical) address pointers.
  242. * callback must check err and feedback in descriptor header
  243. * for device processing status.
  244. */
  245. int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
  246. void (*callback)(struct device *dev,
  247. struct talitos_desc *desc,
  248. void *context, int error),
  249. void *context)
  250. {
  251. struct talitos_private *priv = dev_get_drvdata(dev);
  252. struct talitos_request *request;
  253. unsigned long flags;
  254. int head;
  255. bool is_sec1 = has_ftr_sec1(priv);
  256. spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
  257. if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
  258. /* h/w fifo is full */
  259. spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
  260. return -EAGAIN;
  261. }
  262. head = priv->chan[ch].head;
  263. request = &priv->chan[ch].fifo[head];
  264. /* map descriptor and save caller data */
  265. if (is_sec1) {
  266. desc->hdr1 = desc->hdr;
  267. request->dma_desc = dma_map_single(dev, &desc->hdr1,
  268. TALITOS_DESC_SIZE,
  269. DMA_BIDIRECTIONAL);
  270. } else {
  271. request->dma_desc = dma_map_single(dev, desc,
  272. TALITOS_DESC_SIZE,
  273. DMA_BIDIRECTIONAL);
  274. }
  275. request->callback = callback;
  276. request->context = context;
  277. /* increment fifo head */
  278. priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
  279. smp_wmb();
  280. request->desc = desc;
  281. /* GO! */
  282. wmb();
  283. out_be32(priv->chan[ch].reg + TALITOS_FF,
  284. upper_32_bits(request->dma_desc));
  285. out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
  286. lower_32_bits(request->dma_desc));
  287. spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
  288. return -EINPROGRESS;
  289. }
  290. EXPORT_SYMBOL(talitos_submit);
  291. static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
  292. {
  293. struct talitos_edesc *edesc;
  294. if (!is_sec1)
  295. return request->desc->hdr;
  296. if (!request->desc->next_desc)
  297. return request->desc->hdr1;
  298. edesc = container_of(request->desc, struct talitos_edesc, desc);
  299. return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
  300. }
  301. /*
  302. * process what was done, notify callback of error if not
  303. */
  304. static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
  305. {
  306. struct talitos_private *priv = dev_get_drvdata(dev);
  307. struct talitos_request *request, saved_req;
  308. unsigned long flags;
  309. int tail, status;
  310. bool is_sec1 = has_ftr_sec1(priv);
  311. spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
  312. tail = priv->chan[ch].tail;
  313. while (priv->chan[ch].fifo[tail].desc) {
  314. __be32 hdr;
  315. request = &priv->chan[ch].fifo[tail];
  316. /* descriptors with their done bits set don't get the error */
  317. rmb();
  318. hdr = get_request_hdr(request, is_sec1);
  319. if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
  320. status = 0;
  321. else
  322. if (!error)
  323. break;
  324. else
  325. status = error;
  326. dma_unmap_single(dev, request->dma_desc,
  327. TALITOS_DESC_SIZE,
  328. DMA_BIDIRECTIONAL);
  329. /* copy entries so we can call callback outside lock */
  330. saved_req.desc = request->desc;
  331. saved_req.callback = request->callback;
  332. saved_req.context = request->context;
  333. /* release request entry in fifo */
  334. smp_wmb();
  335. request->desc = NULL;
  336. /* increment fifo tail */
  337. priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
  338. spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
  339. atomic_dec(&priv->chan[ch].submit_count);
  340. saved_req.callback(dev, saved_req.desc, saved_req.context,
  341. status);
  342. /* channel may resume processing in single desc error case */
  343. if (error && !reset_ch && status == error)
  344. return;
  345. spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
  346. tail = priv->chan[ch].tail;
  347. }
  348. spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
  349. }
  350. /*
  351. * process completed requests for channels that have done status
  352. */
  353. #define DEF_TALITOS1_DONE(name, ch_done_mask) \
  354. static void talitos1_done_##name(unsigned long data) \
  355. { \
  356. struct device *dev = (struct device *)data; \
  357. struct talitos_private *priv = dev_get_drvdata(dev); \
  358. unsigned long flags; \
  359. \
  360. if (ch_done_mask & 0x10000000) \
  361. flush_channel(dev, 0, 0, 0); \
  362. if (ch_done_mask & 0x40000000) \
  363. flush_channel(dev, 1, 0, 0); \
  364. if (ch_done_mask & 0x00010000) \
  365. flush_channel(dev, 2, 0, 0); \
  366. if (ch_done_mask & 0x00040000) \
  367. flush_channel(dev, 3, 0, 0); \
  368. \
  369. /* At this point, all completed channels have been processed */ \
  370. /* Unmask done interrupts for channels completed later on. */ \
  371. spin_lock_irqsave(&priv->reg_lock, flags); \
  372. clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
  373. clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
  374. spin_unlock_irqrestore(&priv->reg_lock, flags); \
  375. }
  376. DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
  377. DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
  378. #define DEF_TALITOS2_DONE(name, ch_done_mask) \
  379. static void talitos2_done_##name(unsigned long data) \
  380. { \
  381. struct device *dev = (struct device *)data; \
  382. struct talitos_private *priv = dev_get_drvdata(dev); \
  383. unsigned long flags; \
  384. \
  385. if (ch_done_mask & 1) \
  386. flush_channel(dev, 0, 0, 0); \
  387. if (ch_done_mask & (1 << 2)) \
  388. flush_channel(dev, 1, 0, 0); \
  389. if (ch_done_mask & (1 << 4)) \
  390. flush_channel(dev, 2, 0, 0); \
  391. if (ch_done_mask & (1 << 6)) \
  392. flush_channel(dev, 3, 0, 0); \
  393. \
  394. /* At this point, all completed channels have been processed */ \
  395. /* Unmask done interrupts for channels completed later on. */ \
  396. spin_lock_irqsave(&priv->reg_lock, flags); \
  397. setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
  398. setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
  399. spin_unlock_irqrestore(&priv->reg_lock, flags); \
  400. }
  401. DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
  402. DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
  403. DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
  404. DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
  405. /*
  406. * locate current (offending) descriptor
  407. */
  408. static u32 current_desc_hdr(struct device *dev, int ch)
  409. {
  410. struct talitos_private *priv = dev_get_drvdata(dev);
  411. int tail, iter;
  412. dma_addr_t cur_desc;
  413. cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
  414. cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
  415. if (!cur_desc) {
  416. dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
  417. return 0;
  418. }
  419. tail = priv->chan[ch].tail;
  420. iter = tail;
  421. while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
  422. priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
  423. iter = (iter + 1) & (priv->fifo_len - 1);
  424. if (iter == tail) {
  425. dev_err(dev, "couldn't locate current descriptor\n");
  426. return 0;
  427. }
  428. }
  429. if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
  430. struct talitos_edesc *edesc;
  431. edesc = container_of(priv->chan[ch].fifo[iter].desc,
  432. struct talitos_edesc, desc);
  433. return ((struct talitos_desc *)
  434. (edesc->buf + edesc->dma_len))->hdr;
  435. }
  436. return priv->chan[ch].fifo[iter].desc->hdr;
  437. }
  438. /*
  439. * user diagnostics; report root cause of error based on execution unit status
  440. */
  441. static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
  442. {
  443. struct talitos_private *priv = dev_get_drvdata(dev);
  444. int i;
  445. if (!desc_hdr)
  446. desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
  447. switch (desc_hdr & DESC_HDR_SEL0_MASK) {
  448. case DESC_HDR_SEL0_AFEU:
  449. dev_err(dev, "AFEUISR 0x%08x_%08x\n",
  450. in_be32(priv->reg_afeu + TALITOS_EUISR),
  451. in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
  452. break;
  453. case DESC_HDR_SEL0_DEU:
  454. dev_err(dev, "DEUISR 0x%08x_%08x\n",
  455. in_be32(priv->reg_deu + TALITOS_EUISR),
  456. in_be32(priv->reg_deu + TALITOS_EUISR_LO));
  457. break;
  458. case DESC_HDR_SEL0_MDEUA:
  459. case DESC_HDR_SEL0_MDEUB:
  460. dev_err(dev, "MDEUISR 0x%08x_%08x\n",
  461. in_be32(priv->reg_mdeu + TALITOS_EUISR),
  462. in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
  463. break;
  464. case DESC_HDR_SEL0_RNG:
  465. dev_err(dev, "RNGUISR 0x%08x_%08x\n",
  466. in_be32(priv->reg_rngu + TALITOS_ISR),
  467. in_be32(priv->reg_rngu + TALITOS_ISR_LO));
  468. break;
  469. case DESC_HDR_SEL0_PKEU:
  470. dev_err(dev, "PKEUISR 0x%08x_%08x\n",
  471. in_be32(priv->reg_pkeu + TALITOS_EUISR),
  472. in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
  473. break;
  474. case DESC_HDR_SEL0_AESU:
  475. dev_err(dev, "AESUISR 0x%08x_%08x\n",
  476. in_be32(priv->reg_aesu + TALITOS_EUISR),
  477. in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
  478. break;
  479. case DESC_HDR_SEL0_CRCU:
  480. dev_err(dev, "CRCUISR 0x%08x_%08x\n",
  481. in_be32(priv->reg_crcu + TALITOS_EUISR),
  482. in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
  483. break;
  484. case DESC_HDR_SEL0_KEU:
  485. dev_err(dev, "KEUISR 0x%08x_%08x\n",
  486. in_be32(priv->reg_pkeu + TALITOS_EUISR),
  487. in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
  488. break;
  489. }
  490. switch (desc_hdr & DESC_HDR_SEL1_MASK) {
  491. case DESC_HDR_SEL1_MDEUA:
  492. case DESC_HDR_SEL1_MDEUB:
  493. dev_err(dev, "MDEUISR 0x%08x_%08x\n",
  494. in_be32(priv->reg_mdeu + TALITOS_EUISR),
  495. in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
  496. break;
  497. case DESC_HDR_SEL1_CRCU:
  498. dev_err(dev, "CRCUISR 0x%08x_%08x\n",
  499. in_be32(priv->reg_crcu + TALITOS_EUISR),
  500. in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
  501. break;
  502. }
  503. for (i = 0; i < 8; i++)
  504. dev_err(dev, "DESCBUF 0x%08x_%08x\n",
  505. in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
  506. in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
  507. }
  508. /*
  509. * recover from error interrupts
  510. */
  511. static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
  512. {
  513. struct talitos_private *priv = dev_get_drvdata(dev);
  514. unsigned int timeout = TALITOS_TIMEOUT;
  515. int ch, error, reset_dev = 0;
  516. u32 v_lo;
  517. bool is_sec1 = has_ftr_sec1(priv);
  518. int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
  519. for (ch = 0; ch < priv->num_channels; ch++) {
  520. /* skip channels without errors */
  521. if (is_sec1) {
  522. /* bits 29, 31, 17, 19 */
  523. if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
  524. continue;
  525. } else {
  526. if (!(isr & (1 << (ch * 2 + 1))))
  527. continue;
  528. }
  529. error = -EINVAL;
  530. v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
  531. if (v_lo & TALITOS_CCPSR_LO_DOF) {
  532. dev_err(dev, "double fetch fifo overflow error\n");
  533. error = -EAGAIN;
  534. reset_ch = 1;
  535. }
  536. if (v_lo & TALITOS_CCPSR_LO_SOF) {
  537. /* h/w dropped descriptor */
  538. dev_err(dev, "single fetch fifo overflow error\n");
  539. error = -EAGAIN;
  540. }
  541. if (v_lo & TALITOS_CCPSR_LO_MDTE)
  542. dev_err(dev, "master data transfer error\n");
  543. if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
  544. dev_err(dev, is_sec1 ? "pointer not complete error\n"
  545. : "s/g data length zero error\n");
  546. if (v_lo & TALITOS_CCPSR_LO_FPZ)
  547. dev_err(dev, is_sec1 ? "parity error\n"
  548. : "fetch pointer zero error\n");
  549. if (v_lo & TALITOS_CCPSR_LO_IDH)
  550. dev_err(dev, "illegal descriptor header error\n");
  551. if (v_lo & TALITOS_CCPSR_LO_IEU)
  552. dev_err(dev, is_sec1 ? "static assignment error\n"
  553. : "invalid exec unit error\n");
  554. if (v_lo & TALITOS_CCPSR_LO_EU)
  555. report_eu_error(dev, ch, current_desc_hdr(dev, ch));
  556. if (!is_sec1) {
  557. if (v_lo & TALITOS_CCPSR_LO_GB)
  558. dev_err(dev, "gather boundary error\n");
  559. if (v_lo & TALITOS_CCPSR_LO_GRL)
  560. dev_err(dev, "gather return/length error\n");
  561. if (v_lo & TALITOS_CCPSR_LO_SB)
  562. dev_err(dev, "scatter boundary error\n");
  563. if (v_lo & TALITOS_CCPSR_LO_SRL)
  564. dev_err(dev, "scatter return/length error\n");
  565. }
  566. flush_channel(dev, ch, error, reset_ch);
  567. if (reset_ch) {
  568. reset_channel(dev, ch);
  569. } else {
  570. setbits32(priv->chan[ch].reg + TALITOS_CCCR,
  571. TALITOS2_CCCR_CONT);
  572. setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
  573. while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
  574. TALITOS2_CCCR_CONT) && --timeout)
  575. cpu_relax();
  576. if (timeout == 0) {
  577. dev_err(dev, "failed to restart channel %d\n",
  578. ch);
  579. reset_dev = 1;
  580. }
  581. }
  582. }
  583. if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
  584. (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
  585. if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
  586. dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
  587. isr, isr_lo);
  588. else
  589. dev_err(dev, "done overflow, internal time out, or "
  590. "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
  591. /* purge request queues */
  592. for (ch = 0; ch < priv->num_channels; ch++)
  593. flush_channel(dev, ch, -EIO, 1);
  594. /* reset and reinitialize the device */
  595. init_device(dev);
  596. }
  597. }
  598. #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
  599. static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
  600. { \
  601. struct device *dev = data; \
  602. struct talitos_private *priv = dev_get_drvdata(dev); \
  603. u32 isr, isr_lo; \
  604. unsigned long flags; \
  605. \
  606. spin_lock_irqsave(&priv->reg_lock, flags); \
  607. isr = in_be32(priv->reg + TALITOS_ISR); \
  608. isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
  609. /* Acknowledge interrupt */ \
  610. out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
  611. out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
  612. \
  613. if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
  614. spin_unlock_irqrestore(&priv->reg_lock, flags); \
  615. talitos_error(dev, isr & ch_err_mask, isr_lo); \
  616. } \
  617. else { \
  618. if (likely(isr & ch_done_mask)) { \
  619. /* mask further done interrupts. */ \
  620. setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
  621. /* done_task will unmask done interrupts at exit */ \
  622. tasklet_schedule(&priv->done_task[tlet]); \
  623. } \
  624. spin_unlock_irqrestore(&priv->reg_lock, flags); \
  625. } \
  626. \
  627. return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
  628. IRQ_NONE; \
  629. }
  630. DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
  631. #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
  632. static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
  633. { \
  634. struct device *dev = data; \
  635. struct talitos_private *priv = dev_get_drvdata(dev); \
  636. u32 isr, isr_lo; \
  637. unsigned long flags; \
  638. \
  639. spin_lock_irqsave(&priv->reg_lock, flags); \
  640. isr = in_be32(priv->reg + TALITOS_ISR); \
  641. isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
  642. /* Acknowledge interrupt */ \
  643. out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
  644. out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
  645. \
  646. if (unlikely(isr & ch_err_mask || isr_lo)) { \
  647. spin_unlock_irqrestore(&priv->reg_lock, flags); \
  648. talitos_error(dev, isr & ch_err_mask, isr_lo); \
  649. } \
  650. else { \
  651. if (likely(isr & ch_done_mask)) { \
  652. /* mask further done interrupts. */ \
  653. clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
  654. /* done_task will unmask done interrupts at exit */ \
  655. tasklet_schedule(&priv->done_task[tlet]); \
  656. } \
  657. spin_unlock_irqrestore(&priv->reg_lock, flags); \
  658. } \
  659. \
  660. return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
  661. IRQ_NONE; \
  662. }
  663. DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
  664. DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
  665. 0)
  666. DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
  667. 1)
  668. /*
  669. * hwrng
  670. */
  671. static int talitos_rng_data_present(struct hwrng *rng, int wait)
  672. {
  673. struct device *dev = (struct device *)rng->priv;
  674. struct talitos_private *priv = dev_get_drvdata(dev);
  675. u32 ofl;
  676. int i;
  677. for (i = 0; i < 20; i++) {
  678. ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
  679. TALITOS_RNGUSR_LO_OFL;
  680. if (ofl || !wait)
  681. break;
  682. udelay(10);
  683. }
  684. return !!ofl;
  685. }
  686. static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
  687. {
  688. struct device *dev = (struct device *)rng->priv;
  689. struct talitos_private *priv = dev_get_drvdata(dev);
  690. /* rng fifo requires 64-bit accesses */
  691. *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
  692. *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
  693. return sizeof(u32);
  694. }
  695. static int talitos_rng_init(struct hwrng *rng)
  696. {
  697. struct device *dev = (struct device *)rng->priv;
  698. struct talitos_private *priv = dev_get_drvdata(dev);
  699. unsigned int timeout = TALITOS_TIMEOUT;
  700. setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
  701. while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
  702. & TALITOS_RNGUSR_LO_RD)
  703. && --timeout)
  704. cpu_relax();
  705. if (timeout == 0) {
  706. dev_err(dev, "failed to reset rng hw\n");
  707. return -ENODEV;
  708. }
  709. /* start generating */
  710. setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
  711. return 0;
  712. }
  713. static int talitos_register_rng(struct device *dev)
  714. {
  715. struct talitos_private *priv = dev_get_drvdata(dev);
  716. int err;
  717. priv->rng.name = dev_driver_string(dev),
  718. priv->rng.init = talitos_rng_init,
  719. priv->rng.data_present = talitos_rng_data_present,
  720. priv->rng.data_read = talitos_rng_data_read,
  721. priv->rng.priv = (unsigned long)dev;
  722. err = hwrng_register(&priv->rng);
  723. if (!err)
  724. priv->rng_registered = true;
  725. return err;
  726. }
  727. static void talitos_unregister_rng(struct device *dev)
  728. {
  729. struct talitos_private *priv = dev_get_drvdata(dev);
  730. if (!priv->rng_registered)
  731. return;
  732. hwrng_unregister(&priv->rng);
  733. priv->rng_registered = false;
  734. }
  735. /*
  736. * crypto alg
  737. */
  738. #define TALITOS_CRA_PRIORITY 3000
  739. /*
  740. * Defines a priority for doing AEAD with descriptors type
  741. * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
  742. */
  743. #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
  744. #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
  745. #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  746. struct talitos_ctx {
  747. struct device *dev;
  748. int ch;
  749. __be32 desc_hdr_template;
  750. u8 key[TALITOS_MAX_KEY_SIZE];
  751. u8 iv[TALITOS_MAX_IV_LENGTH];
  752. dma_addr_t dma_key;
  753. unsigned int keylen;
  754. unsigned int enckeylen;
  755. unsigned int authkeylen;
  756. };
  757. #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
  758. #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
  759. struct talitos_ahash_req_ctx {
  760. u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
  761. unsigned int hw_context_size;
  762. u8 buf[2][HASH_MAX_BLOCK_SIZE];
  763. int buf_idx;
  764. unsigned int swinit;
  765. unsigned int first;
  766. unsigned int last;
  767. unsigned int to_hash_later;
  768. unsigned int nbuf;
  769. struct scatterlist bufsl[2];
  770. struct scatterlist *psrc;
  771. };
  772. struct talitos_export_state {
  773. u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
  774. u8 buf[HASH_MAX_BLOCK_SIZE];
  775. unsigned int swinit;
  776. unsigned int first;
  777. unsigned int last;
  778. unsigned int to_hash_later;
  779. unsigned int nbuf;
  780. };
  781. static int aead_setkey(struct crypto_aead *authenc,
  782. const u8 *key, unsigned int keylen)
  783. {
  784. struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
  785. struct device *dev = ctx->dev;
  786. struct crypto_authenc_keys keys;
  787. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  788. goto badkey;
  789. if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
  790. goto badkey;
  791. if (ctx->keylen)
  792. dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
  793. memcpy(ctx->key, keys.authkey, keys.authkeylen);
  794. memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
  795. ctx->keylen = keys.authkeylen + keys.enckeylen;
  796. ctx->enckeylen = keys.enckeylen;
  797. ctx->authkeylen = keys.authkeylen;
  798. ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
  799. DMA_TO_DEVICE);
  800. memzero_explicit(&keys, sizeof(keys));
  801. return 0;
  802. badkey:
  803. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  804. memzero_explicit(&keys, sizeof(keys));
  805. return -EINVAL;
  806. }
  807. static void talitos_sg_unmap(struct device *dev,
  808. struct talitos_edesc *edesc,
  809. struct scatterlist *src,
  810. struct scatterlist *dst,
  811. unsigned int len, unsigned int offset)
  812. {
  813. struct talitos_private *priv = dev_get_drvdata(dev);
  814. bool is_sec1 = has_ftr_sec1(priv);
  815. unsigned int src_nents = edesc->src_nents ? : 1;
  816. unsigned int dst_nents = edesc->dst_nents ? : 1;
  817. if (is_sec1 && dst && dst_nents > 1) {
  818. dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
  819. len, DMA_FROM_DEVICE);
  820. sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
  821. offset);
  822. }
  823. if (src != dst) {
  824. if (src_nents == 1 || !is_sec1)
  825. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  826. if (dst && (dst_nents == 1 || !is_sec1))
  827. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  828. } else if (src_nents == 1 || !is_sec1) {
  829. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  830. }
  831. }
  832. static void ipsec_esp_unmap(struct device *dev,
  833. struct talitos_edesc *edesc,
  834. struct aead_request *areq, bool encrypt)
  835. {
  836. struct crypto_aead *aead = crypto_aead_reqtfm(areq);
  837. struct talitos_ctx *ctx = crypto_aead_ctx(aead);
  838. unsigned int ivsize = crypto_aead_ivsize(aead);
  839. unsigned int authsize = crypto_aead_authsize(aead);
  840. unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
  841. bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
  842. struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
  843. if (is_ipsec_esp)
  844. unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
  845. DMA_FROM_DEVICE);
  846. unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
  847. talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
  848. areq->assoclen);
  849. if (edesc->dma_len)
  850. dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
  851. DMA_BIDIRECTIONAL);
  852. if (!is_ipsec_esp) {
  853. unsigned int dst_nents = edesc->dst_nents ? : 1;
  854. sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
  855. areq->assoclen + cryptlen - ivsize);
  856. }
  857. }
  858. /*
  859. * ipsec_esp descriptor callbacks
  860. */
  861. static void ipsec_esp_encrypt_done(struct device *dev,
  862. struct talitos_desc *desc, void *context,
  863. int err)
  864. {
  865. struct talitos_private *priv = dev_get_drvdata(dev);
  866. bool is_sec1 = has_ftr_sec1(priv);
  867. struct aead_request *areq = context;
  868. struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
  869. unsigned int authsize = crypto_aead_authsize(authenc);
  870. unsigned int ivsize = crypto_aead_ivsize(authenc);
  871. struct talitos_edesc *edesc;
  872. void *icvdata;
  873. edesc = container_of(desc, struct talitos_edesc, desc);
  874. ipsec_esp_unmap(dev, edesc, areq, true);
  875. /* copy the generated ICV to dst */
  876. if (edesc->icv_ool) {
  877. if (is_sec1)
  878. icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
  879. else
  880. icvdata = &edesc->link_tbl[edesc->src_nents +
  881. edesc->dst_nents + 2];
  882. sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
  883. authsize, areq->assoclen + areq->cryptlen);
  884. }
  885. dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
  886. kfree(edesc);
  887. aead_request_complete(areq, err);
  888. }
  889. static void ipsec_esp_decrypt_swauth_done(struct device *dev,
  890. struct talitos_desc *desc,
  891. void *context, int err)
  892. {
  893. struct aead_request *req = context;
  894. struct crypto_aead *authenc = crypto_aead_reqtfm(req);
  895. unsigned int authsize = crypto_aead_authsize(authenc);
  896. struct talitos_edesc *edesc;
  897. char *oicv, *icv;
  898. struct talitos_private *priv = dev_get_drvdata(dev);
  899. bool is_sec1 = has_ftr_sec1(priv);
  900. edesc = container_of(desc, struct talitos_edesc, desc);
  901. ipsec_esp_unmap(dev, edesc, req, false);
  902. if (!err) {
  903. char icvdata[SHA512_DIGEST_SIZE];
  904. int nents = edesc->dst_nents ? : 1;
  905. unsigned int len = req->assoclen + req->cryptlen;
  906. /* auth check */
  907. if (nents > 1) {
  908. sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
  909. len - authsize);
  910. icv = icvdata;
  911. } else {
  912. icv = (char *)sg_virt(req->dst) + len - authsize;
  913. }
  914. if (edesc->dma_len) {
  915. if (is_sec1)
  916. oicv = (char *)&edesc->dma_link_tbl +
  917. req->assoclen + req->cryptlen;
  918. else
  919. oicv = (char *)
  920. &edesc->link_tbl[edesc->src_nents +
  921. edesc->dst_nents + 2];
  922. if (edesc->icv_ool)
  923. icv = oicv + authsize;
  924. } else
  925. oicv = (char *)&edesc->link_tbl[0];
  926. err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
  927. }
  928. kfree(edesc);
  929. aead_request_complete(req, err);
  930. }
  931. static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
  932. struct talitos_desc *desc,
  933. void *context, int err)
  934. {
  935. struct aead_request *req = context;
  936. struct talitos_edesc *edesc;
  937. edesc = container_of(desc, struct talitos_edesc, desc);
  938. ipsec_esp_unmap(dev, edesc, req, false);
  939. /* check ICV auth status */
  940. if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
  941. DESC_HDR_LO_ICCR1_PASS))
  942. err = -EBADMSG;
  943. kfree(edesc);
  944. aead_request_complete(req, err);
  945. }
  946. /*
  947. * convert scatterlist to SEC h/w link table format
  948. * stop at cryptlen bytes
  949. */
  950. static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
  951. unsigned int offset, int cryptlen,
  952. struct talitos_ptr *link_tbl_ptr)
  953. {
  954. int n_sg = sg_count;
  955. int count = 0;
  956. while (cryptlen && sg && n_sg--) {
  957. unsigned int len = sg_dma_len(sg);
  958. if (offset >= len) {
  959. offset -= len;
  960. goto next;
  961. }
  962. len -= offset;
  963. if (len > cryptlen)
  964. len = cryptlen;
  965. to_talitos_ptr(link_tbl_ptr + count,
  966. sg_dma_address(sg) + offset, len, 0);
  967. to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
  968. count++;
  969. cryptlen -= len;
  970. offset = 0;
  971. next:
  972. sg = sg_next(sg);
  973. }
  974. /* tag end of link table */
  975. if (count > 0)
  976. to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
  977. DESC_PTR_LNKTBL_RETURN, 0);
  978. return count;
  979. }
  980. static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
  981. unsigned int len, struct talitos_edesc *edesc,
  982. struct talitos_ptr *ptr, int sg_count,
  983. unsigned int offset, int tbl_off, int elen)
  984. {
  985. struct talitos_private *priv = dev_get_drvdata(dev);
  986. bool is_sec1 = has_ftr_sec1(priv);
  987. if (!src) {
  988. to_talitos_ptr(ptr, 0, 0, is_sec1);
  989. return 1;
  990. }
  991. to_talitos_ptr_ext_set(ptr, elen, is_sec1);
  992. if (sg_count == 1) {
  993. to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
  994. return sg_count;
  995. }
  996. if (is_sec1) {
  997. to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
  998. return sg_count;
  999. }
  1000. sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
  1001. &edesc->link_tbl[tbl_off]);
  1002. if (sg_count == 1) {
  1003. /* Only one segment now, so no link tbl needed*/
  1004. copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
  1005. return sg_count;
  1006. }
  1007. to_talitos_ptr(ptr, edesc->dma_link_tbl +
  1008. tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
  1009. to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
  1010. return sg_count;
  1011. }
  1012. static int talitos_sg_map(struct device *dev, struct scatterlist *src,
  1013. unsigned int len, struct talitos_edesc *edesc,
  1014. struct talitos_ptr *ptr, int sg_count,
  1015. unsigned int offset, int tbl_off)
  1016. {
  1017. return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
  1018. tbl_off, 0);
  1019. }
  1020. /*
  1021. * fill in and submit ipsec_esp descriptor
  1022. */
  1023. static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
  1024. bool encrypt,
  1025. void (*callback)(struct device *dev,
  1026. struct talitos_desc *desc,
  1027. void *context, int error))
  1028. {
  1029. struct crypto_aead *aead = crypto_aead_reqtfm(areq);
  1030. unsigned int authsize = crypto_aead_authsize(aead);
  1031. struct talitos_ctx *ctx = crypto_aead_ctx(aead);
  1032. struct device *dev = ctx->dev;
  1033. struct talitos_desc *desc = &edesc->desc;
  1034. unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
  1035. unsigned int ivsize = crypto_aead_ivsize(aead);
  1036. int tbl_off = 0;
  1037. int sg_count, ret;
  1038. int elen = 0;
  1039. bool sync_needed = false;
  1040. struct talitos_private *priv = dev_get_drvdata(dev);
  1041. bool is_sec1 = has_ftr_sec1(priv);
  1042. bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
  1043. struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
  1044. struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
  1045. /* hmac key */
  1046. to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
  1047. sg_count = edesc->src_nents ?: 1;
  1048. if (is_sec1 && sg_count > 1)
  1049. sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
  1050. areq->assoclen + cryptlen);
  1051. else
  1052. sg_count = dma_map_sg(dev, areq->src, sg_count,
  1053. (areq->src == areq->dst) ?
  1054. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  1055. /* hmac data */
  1056. ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
  1057. &desc->ptr[1], sg_count, 0, tbl_off);
  1058. if (ret > 1) {
  1059. tbl_off += ret;
  1060. sync_needed = true;
  1061. }
  1062. /* cipher iv */
  1063. to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
  1064. /* cipher key */
  1065. to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
  1066. ctx->enckeylen, is_sec1);
  1067. /*
  1068. * cipher in
  1069. * map and adjust cipher len to aead request cryptlen.
  1070. * extent is bytes of HMAC postpended to ciphertext,
  1071. * typically 12 for ipsec
  1072. */
  1073. if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
  1074. elen = authsize;
  1075. ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
  1076. sg_count, areq->assoclen, tbl_off, elen);
  1077. if (ret > 1) {
  1078. tbl_off += ret;
  1079. sync_needed = true;
  1080. }
  1081. /* cipher out */
  1082. if (areq->src != areq->dst) {
  1083. sg_count = edesc->dst_nents ? : 1;
  1084. if (!is_sec1 || sg_count == 1)
  1085. dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
  1086. }
  1087. ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
  1088. sg_count, areq->assoclen, tbl_off);
  1089. if (is_ipsec_esp)
  1090. to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
  1091. /* ICV data */
  1092. if (ret > 1) {
  1093. tbl_off += ret;
  1094. edesc->icv_ool = true;
  1095. sync_needed = true;
  1096. if (is_ipsec_esp) {
  1097. struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
  1098. int offset = (edesc->src_nents + edesc->dst_nents + 2) *
  1099. sizeof(struct talitos_ptr) + authsize;
  1100. /* Add an entry to the link table for ICV data */
  1101. to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
  1102. to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
  1103. is_sec1);
  1104. /* icv data follows link tables */
  1105. to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
  1106. authsize, is_sec1);
  1107. } else {
  1108. dma_addr_t addr = edesc->dma_link_tbl;
  1109. if (is_sec1)
  1110. addr += areq->assoclen + cryptlen;
  1111. else
  1112. addr += sizeof(struct talitos_ptr) * tbl_off;
  1113. to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
  1114. }
  1115. } else if (!is_ipsec_esp) {
  1116. ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
  1117. &desc->ptr[6], sg_count, areq->assoclen +
  1118. cryptlen,
  1119. tbl_off);
  1120. if (ret > 1) {
  1121. tbl_off += ret;
  1122. edesc->icv_ool = true;
  1123. sync_needed = true;
  1124. } else {
  1125. edesc->icv_ool = false;
  1126. }
  1127. } else {
  1128. edesc->icv_ool = false;
  1129. }
  1130. /* iv out */
  1131. if (is_ipsec_esp)
  1132. map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
  1133. DMA_FROM_DEVICE);
  1134. if (sync_needed)
  1135. dma_sync_single_for_device(dev, edesc->dma_link_tbl,
  1136. edesc->dma_len,
  1137. DMA_BIDIRECTIONAL);
  1138. ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
  1139. if (ret != -EINPROGRESS) {
  1140. ipsec_esp_unmap(dev, edesc, areq, encrypt);
  1141. kfree(edesc);
  1142. }
  1143. return ret;
  1144. }
  1145. /*
  1146. * allocate and map the extended descriptor
  1147. */
  1148. static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
  1149. struct scatterlist *src,
  1150. struct scatterlist *dst,
  1151. u8 *iv,
  1152. unsigned int assoclen,
  1153. unsigned int cryptlen,
  1154. unsigned int authsize,
  1155. unsigned int ivsize,
  1156. int icv_stashing,
  1157. u32 cryptoflags,
  1158. bool encrypt)
  1159. {
  1160. struct talitos_edesc *edesc;
  1161. int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
  1162. dma_addr_t iv_dma = 0;
  1163. gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1164. GFP_ATOMIC;
  1165. struct talitos_private *priv = dev_get_drvdata(dev);
  1166. bool is_sec1 = has_ftr_sec1(priv);
  1167. int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
  1168. if (cryptlen + authsize > max_len) {
  1169. dev_err(dev, "length exceeds h/w max limit\n");
  1170. return ERR_PTR(-EINVAL);
  1171. }
  1172. if (!dst || dst == src) {
  1173. src_len = assoclen + cryptlen + authsize;
  1174. src_nents = sg_nents_for_len(src, src_len);
  1175. if (src_nents < 0) {
  1176. dev_err(dev, "Invalid number of src SG.\n");
  1177. return ERR_PTR(-EINVAL);
  1178. }
  1179. src_nents = (src_nents == 1) ? 0 : src_nents;
  1180. dst_nents = dst ? src_nents : 0;
  1181. dst_len = 0;
  1182. } else { /* dst && dst != src*/
  1183. src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
  1184. src_nents = sg_nents_for_len(src, src_len);
  1185. if (src_nents < 0) {
  1186. dev_err(dev, "Invalid number of src SG.\n");
  1187. return ERR_PTR(-EINVAL);
  1188. }
  1189. src_nents = (src_nents == 1) ? 0 : src_nents;
  1190. dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
  1191. dst_nents = sg_nents_for_len(dst, dst_len);
  1192. if (dst_nents < 0) {
  1193. dev_err(dev, "Invalid number of dst SG.\n");
  1194. return ERR_PTR(-EINVAL);
  1195. }
  1196. dst_nents = (dst_nents == 1) ? 0 : dst_nents;
  1197. }
  1198. /*
  1199. * allocate space for base edesc plus the link tables,
  1200. * allowing for two separate entries for AD and generated ICV (+ 2),
  1201. * and space for two sets of ICVs (stashed and generated)
  1202. */
  1203. alloc_len = sizeof(struct talitos_edesc);
  1204. if (src_nents || dst_nents) {
  1205. if (is_sec1)
  1206. dma_len = (src_nents ? src_len : 0) +
  1207. (dst_nents ? dst_len : 0);
  1208. else
  1209. dma_len = (src_nents + dst_nents + 2) *
  1210. sizeof(struct talitos_ptr) + authsize * 2;
  1211. alloc_len += dma_len;
  1212. } else {
  1213. dma_len = 0;
  1214. alloc_len += icv_stashing ? authsize : 0;
  1215. }
  1216. /* if its a ahash, add space for a second desc next to the first one */
  1217. if (is_sec1 && !dst)
  1218. alloc_len += sizeof(struct talitos_desc);
  1219. alloc_len += ivsize;
  1220. edesc = kmalloc(alloc_len, GFP_DMA | flags);
  1221. if (!edesc)
  1222. return ERR_PTR(-ENOMEM);
  1223. if (ivsize) {
  1224. iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
  1225. iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
  1226. }
  1227. memset(&edesc->desc, 0, sizeof(edesc->desc));
  1228. edesc->src_nents = src_nents;
  1229. edesc->dst_nents = dst_nents;
  1230. edesc->iv_dma = iv_dma;
  1231. edesc->dma_len = dma_len;
  1232. if (dma_len)
  1233. edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
  1234. edesc->dma_len,
  1235. DMA_BIDIRECTIONAL);
  1236. return edesc;
  1237. }
  1238. static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
  1239. int icv_stashing, bool encrypt)
  1240. {
  1241. struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
  1242. unsigned int authsize = crypto_aead_authsize(authenc);
  1243. struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
  1244. unsigned int ivsize = crypto_aead_ivsize(authenc);
  1245. unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
  1246. return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
  1247. iv, areq->assoclen, cryptlen,
  1248. authsize, ivsize, icv_stashing,
  1249. areq->base.flags, encrypt);
  1250. }
  1251. static int aead_encrypt(struct aead_request *req)
  1252. {
  1253. struct crypto_aead *authenc = crypto_aead_reqtfm(req);
  1254. struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
  1255. struct talitos_edesc *edesc;
  1256. /* allocate extended descriptor */
  1257. edesc = aead_edesc_alloc(req, req->iv, 0, true);
  1258. if (IS_ERR(edesc))
  1259. return PTR_ERR(edesc);
  1260. /* set encrypt */
  1261. edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
  1262. return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
  1263. }
  1264. static int aead_decrypt(struct aead_request *req)
  1265. {
  1266. struct crypto_aead *authenc = crypto_aead_reqtfm(req);
  1267. unsigned int authsize = crypto_aead_authsize(authenc);
  1268. struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
  1269. struct talitos_private *priv = dev_get_drvdata(ctx->dev);
  1270. struct talitos_edesc *edesc;
  1271. void *icvdata;
  1272. /* allocate extended descriptor */
  1273. edesc = aead_edesc_alloc(req, req->iv, 1, false);
  1274. if (IS_ERR(edesc))
  1275. return PTR_ERR(edesc);
  1276. if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
  1277. (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
  1278. ((!edesc->src_nents && !edesc->dst_nents) ||
  1279. priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
  1280. /* decrypt and check the ICV */
  1281. edesc->desc.hdr = ctx->desc_hdr_template |
  1282. DESC_HDR_DIR_INBOUND |
  1283. DESC_HDR_MODE1_MDEU_CICV;
  1284. /* reset integrity check result bits */
  1285. return ipsec_esp(edesc, req, false,
  1286. ipsec_esp_decrypt_hwauth_done);
  1287. }
  1288. /* Have to check the ICV with software */
  1289. edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
  1290. /* stash incoming ICV for later cmp with ICV generated by the h/w */
  1291. if (edesc->dma_len)
  1292. icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
  1293. edesc->dst_nents + 2];
  1294. else
  1295. icvdata = &edesc->link_tbl[0];
  1296. sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
  1297. req->assoclen + req->cryptlen - authsize);
  1298. return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
  1299. }
  1300. static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
  1301. const u8 *key, unsigned int keylen)
  1302. {
  1303. struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  1304. struct device *dev = ctx->dev;
  1305. u32 tmp[DES_EXPKEY_WORDS];
  1306. if (keylen > TALITOS_MAX_KEY_SIZE) {
  1307. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1308. return -EINVAL;
  1309. }
  1310. if (unlikely(crypto_ablkcipher_get_flags(cipher) &
  1311. CRYPTO_TFM_REQ_WEAK_KEY) &&
  1312. !des_ekey(tmp, key)) {
  1313. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
  1314. return -EINVAL;
  1315. }
  1316. if (ctx->keylen)
  1317. dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
  1318. memcpy(&ctx->key, key, keylen);
  1319. ctx->keylen = keylen;
  1320. ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
  1321. return 0;
  1322. }
  1323. static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
  1324. const u8 *key, unsigned int keylen)
  1325. {
  1326. if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
  1327. keylen == AES_KEYSIZE_256)
  1328. return ablkcipher_setkey(cipher, key, keylen);
  1329. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1330. return -EINVAL;
  1331. }
  1332. static void common_nonsnoop_unmap(struct device *dev,
  1333. struct talitos_edesc *edesc,
  1334. struct ablkcipher_request *areq)
  1335. {
  1336. unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
  1337. talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
  1338. unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
  1339. if (edesc->dma_len)
  1340. dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
  1341. DMA_BIDIRECTIONAL);
  1342. }
  1343. static void ablkcipher_done(struct device *dev,
  1344. struct talitos_desc *desc, void *context,
  1345. int err)
  1346. {
  1347. struct ablkcipher_request *areq = context;
  1348. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  1349. struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  1350. unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
  1351. struct talitos_edesc *edesc;
  1352. edesc = container_of(desc, struct talitos_edesc, desc);
  1353. common_nonsnoop_unmap(dev, edesc, areq);
  1354. memcpy(areq->info, ctx->iv, ivsize);
  1355. kfree(edesc);
  1356. areq->base.complete(&areq->base, err);
  1357. }
  1358. static int common_nonsnoop(struct talitos_edesc *edesc,
  1359. struct ablkcipher_request *areq,
  1360. void (*callback) (struct device *dev,
  1361. struct talitos_desc *desc,
  1362. void *context, int error))
  1363. {
  1364. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  1365. struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  1366. struct device *dev = ctx->dev;
  1367. struct talitos_desc *desc = &edesc->desc;
  1368. unsigned int cryptlen = areq->nbytes;
  1369. unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
  1370. int sg_count, ret;
  1371. bool sync_needed = false;
  1372. struct talitos_private *priv = dev_get_drvdata(dev);
  1373. bool is_sec1 = has_ftr_sec1(priv);
  1374. /* first DWORD empty */
  1375. /* cipher iv */
  1376. to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
  1377. /* cipher key */
  1378. to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
  1379. sg_count = edesc->src_nents ?: 1;
  1380. if (is_sec1 && sg_count > 1)
  1381. sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
  1382. cryptlen);
  1383. else
  1384. sg_count = dma_map_sg(dev, areq->src, sg_count,
  1385. (areq->src == areq->dst) ?
  1386. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  1387. /*
  1388. * cipher in
  1389. */
  1390. sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
  1391. &desc->ptr[3], sg_count, 0, 0);
  1392. if (sg_count > 1)
  1393. sync_needed = true;
  1394. /* cipher out */
  1395. if (areq->src != areq->dst) {
  1396. sg_count = edesc->dst_nents ? : 1;
  1397. if (!is_sec1 || sg_count == 1)
  1398. dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
  1399. }
  1400. ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
  1401. sg_count, 0, (edesc->src_nents + 1));
  1402. if (ret > 1)
  1403. sync_needed = true;
  1404. /* iv out */
  1405. map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
  1406. DMA_FROM_DEVICE);
  1407. /* last DWORD empty */
  1408. if (sync_needed)
  1409. dma_sync_single_for_device(dev, edesc->dma_link_tbl,
  1410. edesc->dma_len, DMA_BIDIRECTIONAL);
  1411. ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
  1412. if (ret != -EINPROGRESS) {
  1413. common_nonsnoop_unmap(dev, edesc, areq);
  1414. kfree(edesc);
  1415. }
  1416. return ret;
  1417. }
  1418. static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
  1419. areq, bool encrypt)
  1420. {
  1421. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  1422. struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  1423. unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
  1424. return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
  1425. areq->info, 0, areq->nbytes, 0, ivsize, 0,
  1426. areq->base.flags, encrypt);
  1427. }
  1428. static int ablkcipher_encrypt(struct ablkcipher_request *areq)
  1429. {
  1430. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  1431. struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  1432. struct talitos_edesc *edesc;
  1433. unsigned int blocksize =
  1434. crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
  1435. if (!areq->nbytes)
  1436. return 0;
  1437. if (areq->nbytes % blocksize)
  1438. return -EINVAL;
  1439. /* allocate extended descriptor */
  1440. edesc = ablkcipher_edesc_alloc(areq, true);
  1441. if (IS_ERR(edesc))
  1442. return PTR_ERR(edesc);
  1443. /* set encrypt */
  1444. edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
  1445. return common_nonsnoop(edesc, areq, ablkcipher_done);
  1446. }
  1447. static int ablkcipher_decrypt(struct ablkcipher_request *areq)
  1448. {
  1449. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
  1450. struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
  1451. struct talitos_edesc *edesc;
  1452. unsigned int blocksize =
  1453. crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
  1454. if (!areq->nbytes)
  1455. return 0;
  1456. if (areq->nbytes % blocksize)
  1457. return -EINVAL;
  1458. /* allocate extended descriptor */
  1459. edesc = ablkcipher_edesc_alloc(areq, false);
  1460. if (IS_ERR(edesc))
  1461. return PTR_ERR(edesc);
  1462. edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
  1463. return common_nonsnoop(edesc, areq, ablkcipher_done);
  1464. }
  1465. static void common_nonsnoop_hash_unmap(struct device *dev,
  1466. struct talitos_edesc *edesc,
  1467. struct ahash_request *areq)
  1468. {
  1469. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1470. struct talitos_private *priv = dev_get_drvdata(dev);
  1471. bool is_sec1 = has_ftr_sec1(priv);
  1472. struct talitos_desc *desc = &edesc->desc;
  1473. struct talitos_desc *desc2 = (struct talitos_desc *)
  1474. (edesc->buf + edesc->dma_len);
  1475. unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
  1476. if (desc->next_desc &&
  1477. desc->ptr[5].ptr != desc2->ptr[5].ptr)
  1478. unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
  1479. if (req_ctx->psrc)
  1480. talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
  1481. /* When using hashctx-in, must unmap it. */
  1482. if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
  1483. unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
  1484. DMA_TO_DEVICE);
  1485. else if (desc->next_desc)
  1486. unmap_single_talitos_ptr(dev, &desc2->ptr[1],
  1487. DMA_TO_DEVICE);
  1488. if (is_sec1 && req_ctx->nbuf)
  1489. unmap_single_talitos_ptr(dev, &desc->ptr[3],
  1490. DMA_TO_DEVICE);
  1491. if (edesc->dma_len)
  1492. dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
  1493. DMA_BIDIRECTIONAL);
  1494. if (edesc->desc.next_desc)
  1495. dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
  1496. TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
  1497. }
  1498. static void ahash_done(struct device *dev,
  1499. struct talitos_desc *desc, void *context,
  1500. int err)
  1501. {
  1502. struct ahash_request *areq = context;
  1503. struct talitos_edesc *edesc =
  1504. container_of(desc, struct talitos_edesc, desc);
  1505. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1506. if (!req_ctx->last && req_ctx->to_hash_later) {
  1507. /* Position any partial block for next update/final/finup */
  1508. req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
  1509. req_ctx->nbuf = req_ctx->to_hash_later;
  1510. }
  1511. common_nonsnoop_hash_unmap(dev, edesc, areq);
  1512. kfree(edesc);
  1513. areq->base.complete(&areq->base, err);
  1514. }
  1515. /*
  1516. * SEC1 doesn't like hashing of 0 sized message, so we do the padding
  1517. * ourself and submit a padded block
  1518. */
  1519. static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
  1520. struct talitos_edesc *edesc,
  1521. struct talitos_ptr *ptr)
  1522. {
  1523. static u8 padded_hash[64] = {
  1524. 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1525. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1526. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1527. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1528. };
  1529. pr_err_once("Bug in SEC1, padding ourself\n");
  1530. edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
  1531. map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
  1532. (char *)padded_hash, DMA_TO_DEVICE);
  1533. }
  1534. static int common_nonsnoop_hash(struct talitos_edesc *edesc,
  1535. struct ahash_request *areq, unsigned int length,
  1536. void (*callback) (struct device *dev,
  1537. struct talitos_desc *desc,
  1538. void *context, int error))
  1539. {
  1540. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1541. struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
  1542. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1543. struct device *dev = ctx->dev;
  1544. struct talitos_desc *desc = &edesc->desc;
  1545. int ret;
  1546. bool sync_needed = false;
  1547. struct talitos_private *priv = dev_get_drvdata(dev);
  1548. bool is_sec1 = has_ftr_sec1(priv);
  1549. int sg_count;
  1550. /* first DWORD empty */
  1551. /* hash context in */
  1552. if (!req_ctx->first || req_ctx->swinit) {
  1553. map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
  1554. req_ctx->hw_context_size,
  1555. req_ctx->hw_context,
  1556. DMA_TO_DEVICE);
  1557. req_ctx->swinit = 0;
  1558. }
  1559. /* Indicate next op is not the first. */
  1560. req_ctx->first = 0;
  1561. /* HMAC key */
  1562. if (ctx->keylen)
  1563. to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
  1564. is_sec1);
  1565. if (is_sec1 && req_ctx->nbuf)
  1566. length -= req_ctx->nbuf;
  1567. sg_count = edesc->src_nents ?: 1;
  1568. if (is_sec1 && sg_count > 1)
  1569. sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
  1570. else if (length)
  1571. sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
  1572. DMA_TO_DEVICE);
  1573. /*
  1574. * data in
  1575. */
  1576. if (is_sec1 && req_ctx->nbuf) {
  1577. map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
  1578. req_ctx->buf[req_ctx->buf_idx],
  1579. DMA_TO_DEVICE);
  1580. } else {
  1581. sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
  1582. &desc->ptr[3], sg_count, 0, 0);
  1583. if (sg_count > 1)
  1584. sync_needed = true;
  1585. }
  1586. /* fifth DWORD empty */
  1587. /* hash/HMAC out -or- hash context out */
  1588. if (req_ctx->last)
  1589. map_single_talitos_ptr(dev, &desc->ptr[5],
  1590. crypto_ahash_digestsize(tfm),
  1591. areq->result, DMA_FROM_DEVICE);
  1592. else
  1593. map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
  1594. req_ctx->hw_context_size,
  1595. req_ctx->hw_context,
  1596. DMA_FROM_DEVICE);
  1597. /* last DWORD empty */
  1598. if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
  1599. talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
  1600. if (is_sec1 && req_ctx->nbuf && length) {
  1601. struct talitos_desc *desc2 = (struct talitos_desc *)
  1602. (edesc->buf + edesc->dma_len);
  1603. dma_addr_t next_desc;
  1604. memset(desc2, 0, sizeof(*desc2));
  1605. desc2->hdr = desc->hdr;
  1606. desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
  1607. desc2->hdr1 = desc2->hdr;
  1608. desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
  1609. desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
  1610. desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
  1611. if (desc->ptr[1].ptr)
  1612. copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
  1613. is_sec1);
  1614. else
  1615. map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
  1616. req_ctx->hw_context_size,
  1617. req_ctx->hw_context,
  1618. DMA_TO_DEVICE);
  1619. copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
  1620. sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
  1621. &desc2->ptr[3], sg_count, 0, 0);
  1622. if (sg_count > 1)
  1623. sync_needed = true;
  1624. copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
  1625. if (req_ctx->last)
  1626. map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
  1627. req_ctx->hw_context_size,
  1628. req_ctx->hw_context,
  1629. DMA_FROM_DEVICE);
  1630. next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
  1631. DMA_BIDIRECTIONAL);
  1632. desc->next_desc = cpu_to_be32(next_desc);
  1633. }
  1634. if (sync_needed)
  1635. dma_sync_single_for_device(dev, edesc->dma_link_tbl,
  1636. edesc->dma_len, DMA_BIDIRECTIONAL);
  1637. ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
  1638. if (ret != -EINPROGRESS) {
  1639. common_nonsnoop_hash_unmap(dev, edesc, areq);
  1640. kfree(edesc);
  1641. }
  1642. return ret;
  1643. }
  1644. static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
  1645. unsigned int nbytes)
  1646. {
  1647. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1648. struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
  1649. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1650. struct talitos_private *priv = dev_get_drvdata(ctx->dev);
  1651. bool is_sec1 = has_ftr_sec1(priv);
  1652. if (is_sec1)
  1653. nbytes -= req_ctx->nbuf;
  1654. return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
  1655. nbytes, 0, 0, 0, areq->base.flags, false);
  1656. }
  1657. static int ahash_init(struct ahash_request *areq)
  1658. {
  1659. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1660. struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
  1661. struct device *dev = ctx->dev;
  1662. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1663. unsigned int size;
  1664. dma_addr_t dma;
  1665. /* Initialize the context */
  1666. req_ctx->buf_idx = 0;
  1667. req_ctx->nbuf = 0;
  1668. req_ctx->first = 1; /* first indicates h/w must init its context */
  1669. req_ctx->swinit = 0; /* assume h/w init of context */
  1670. size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
  1671. ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
  1672. : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
  1673. req_ctx->hw_context_size = size;
  1674. dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
  1675. DMA_TO_DEVICE);
  1676. dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
  1677. return 0;
  1678. }
  1679. /*
  1680. * on h/w without explicit sha224 support, we initialize h/w context
  1681. * manually with sha224 constants, and tell it to run sha256.
  1682. */
  1683. static int ahash_init_sha224_swinit(struct ahash_request *areq)
  1684. {
  1685. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1686. req_ctx->hw_context[0] = SHA224_H0;
  1687. req_ctx->hw_context[1] = SHA224_H1;
  1688. req_ctx->hw_context[2] = SHA224_H2;
  1689. req_ctx->hw_context[3] = SHA224_H3;
  1690. req_ctx->hw_context[4] = SHA224_H4;
  1691. req_ctx->hw_context[5] = SHA224_H5;
  1692. req_ctx->hw_context[6] = SHA224_H6;
  1693. req_ctx->hw_context[7] = SHA224_H7;
  1694. /* init 64-bit count */
  1695. req_ctx->hw_context[8] = 0;
  1696. req_ctx->hw_context[9] = 0;
  1697. ahash_init(areq);
  1698. req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
  1699. return 0;
  1700. }
  1701. static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
  1702. {
  1703. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1704. struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
  1705. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1706. struct talitos_edesc *edesc;
  1707. unsigned int blocksize =
  1708. crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1709. unsigned int nbytes_to_hash;
  1710. unsigned int to_hash_later;
  1711. unsigned int nsg;
  1712. int nents;
  1713. struct device *dev = ctx->dev;
  1714. struct talitos_private *priv = dev_get_drvdata(dev);
  1715. bool is_sec1 = has_ftr_sec1(priv);
  1716. u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
  1717. if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
  1718. /* Buffer up to one whole block */
  1719. nents = sg_nents_for_len(areq->src, nbytes);
  1720. if (nents < 0) {
  1721. dev_err(ctx->dev, "Invalid number of src SG.\n");
  1722. return nents;
  1723. }
  1724. sg_copy_to_buffer(areq->src, nents,
  1725. ctx_buf + req_ctx->nbuf, nbytes);
  1726. req_ctx->nbuf += nbytes;
  1727. return 0;
  1728. }
  1729. /* At least (blocksize + 1) bytes are available to hash */
  1730. nbytes_to_hash = nbytes + req_ctx->nbuf;
  1731. to_hash_later = nbytes_to_hash & (blocksize - 1);
  1732. if (req_ctx->last)
  1733. to_hash_later = 0;
  1734. else if (to_hash_later)
  1735. /* There is a partial block. Hash the full block(s) now */
  1736. nbytes_to_hash -= to_hash_later;
  1737. else {
  1738. /* Keep one block buffered */
  1739. nbytes_to_hash -= blocksize;
  1740. to_hash_later = blocksize;
  1741. }
  1742. /* Chain in any previously buffered data */
  1743. if (!is_sec1 && req_ctx->nbuf) {
  1744. nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
  1745. sg_init_table(req_ctx->bufsl, nsg);
  1746. sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
  1747. if (nsg > 1)
  1748. sg_chain(req_ctx->bufsl, 2, areq->src);
  1749. req_ctx->psrc = req_ctx->bufsl;
  1750. } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
  1751. int offset;
  1752. if (nbytes_to_hash > blocksize)
  1753. offset = blocksize - req_ctx->nbuf;
  1754. else
  1755. offset = nbytes_to_hash - req_ctx->nbuf;
  1756. nents = sg_nents_for_len(areq->src, offset);
  1757. if (nents < 0) {
  1758. dev_err(ctx->dev, "Invalid number of src SG.\n");
  1759. return nents;
  1760. }
  1761. sg_copy_to_buffer(areq->src, nents,
  1762. ctx_buf + req_ctx->nbuf, offset);
  1763. req_ctx->nbuf += offset;
  1764. req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
  1765. offset);
  1766. } else
  1767. req_ctx->psrc = areq->src;
  1768. if (to_hash_later) {
  1769. nents = sg_nents_for_len(areq->src, nbytes);
  1770. if (nents < 0) {
  1771. dev_err(ctx->dev, "Invalid number of src SG.\n");
  1772. return nents;
  1773. }
  1774. sg_pcopy_to_buffer(areq->src, nents,
  1775. req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
  1776. to_hash_later,
  1777. nbytes - to_hash_later);
  1778. }
  1779. req_ctx->to_hash_later = to_hash_later;
  1780. /* Allocate extended descriptor */
  1781. edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
  1782. if (IS_ERR(edesc))
  1783. return PTR_ERR(edesc);
  1784. edesc->desc.hdr = ctx->desc_hdr_template;
  1785. /* On last one, request SEC to pad; otherwise continue */
  1786. if (req_ctx->last)
  1787. edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
  1788. else
  1789. edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
  1790. /* request SEC to INIT hash. */
  1791. if (req_ctx->first && !req_ctx->swinit)
  1792. edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
  1793. /* When the tfm context has a keylen, it's an HMAC.
  1794. * A first or last (ie. not middle) descriptor must request HMAC.
  1795. */
  1796. if (ctx->keylen && (req_ctx->first || req_ctx->last))
  1797. edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
  1798. return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
  1799. }
  1800. static int ahash_update(struct ahash_request *areq)
  1801. {
  1802. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1803. req_ctx->last = 0;
  1804. return ahash_process_req(areq, areq->nbytes);
  1805. }
  1806. static int ahash_final(struct ahash_request *areq)
  1807. {
  1808. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1809. req_ctx->last = 1;
  1810. return ahash_process_req(areq, 0);
  1811. }
  1812. static int ahash_finup(struct ahash_request *areq)
  1813. {
  1814. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1815. req_ctx->last = 1;
  1816. return ahash_process_req(areq, areq->nbytes);
  1817. }
  1818. static int ahash_digest(struct ahash_request *areq)
  1819. {
  1820. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1821. struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
  1822. ahash->init(areq);
  1823. req_ctx->last = 1;
  1824. return ahash_process_req(areq, areq->nbytes);
  1825. }
  1826. static int ahash_export(struct ahash_request *areq, void *out)
  1827. {
  1828. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1829. struct talitos_export_state *export = out;
  1830. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1831. struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
  1832. struct device *dev = ctx->dev;
  1833. dma_addr_t dma;
  1834. dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
  1835. DMA_FROM_DEVICE);
  1836. dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
  1837. memcpy(export->hw_context, req_ctx->hw_context,
  1838. req_ctx->hw_context_size);
  1839. memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
  1840. export->swinit = req_ctx->swinit;
  1841. export->first = req_ctx->first;
  1842. export->last = req_ctx->last;
  1843. export->to_hash_later = req_ctx->to_hash_later;
  1844. export->nbuf = req_ctx->nbuf;
  1845. return 0;
  1846. }
  1847. static int ahash_import(struct ahash_request *areq, const void *in)
  1848. {
  1849. struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1850. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1851. struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
  1852. struct device *dev = ctx->dev;
  1853. const struct talitos_export_state *export = in;
  1854. unsigned int size;
  1855. dma_addr_t dma;
  1856. memset(req_ctx, 0, sizeof(*req_ctx));
  1857. size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
  1858. ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
  1859. : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
  1860. req_ctx->hw_context_size = size;
  1861. memcpy(req_ctx->hw_context, export->hw_context, size);
  1862. memcpy(req_ctx->buf[0], export->buf, export->nbuf);
  1863. req_ctx->swinit = export->swinit;
  1864. req_ctx->first = export->first;
  1865. req_ctx->last = export->last;
  1866. req_ctx->to_hash_later = export->to_hash_later;
  1867. req_ctx->nbuf = export->nbuf;
  1868. dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
  1869. DMA_TO_DEVICE);
  1870. dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
  1871. return 0;
  1872. }
  1873. static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
  1874. u8 *hash)
  1875. {
  1876. struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  1877. struct scatterlist sg[1];
  1878. struct ahash_request *req;
  1879. struct crypto_wait wait;
  1880. int ret;
  1881. crypto_init_wait(&wait);
  1882. req = ahash_request_alloc(tfm, GFP_KERNEL);
  1883. if (!req)
  1884. return -ENOMEM;
  1885. /* Keep tfm keylen == 0 during hash of the long key */
  1886. ctx->keylen = 0;
  1887. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  1888. crypto_req_done, &wait);
  1889. sg_init_one(&sg[0], key, keylen);
  1890. ahash_request_set_crypt(req, sg, hash, keylen);
  1891. ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
  1892. ahash_request_free(req);
  1893. return ret;
  1894. }
  1895. static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1896. unsigned int keylen)
  1897. {
  1898. struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
  1899. struct device *dev = ctx->dev;
  1900. unsigned int blocksize =
  1901. crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1902. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1903. unsigned int keysize = keylen;
  1904. u8 hash[SHA512_DIGEST_SIZE];
  1905. int ret;
  1906. if (keylen <= blocksize)
  1907. memcpy(ctx->key, key, keysize);
  1908. else {
  1909. /* Must get the hash of the long key */
  1910. ret = keyhash(tfm, key, keylen, hash);
  1911. if (ret) {
  1912. crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1913. return -EINVAL;
  1914. }
  1915. keysize = digestsize;
  1916. memcpy(ctx->key, hash, digestsize);
  1917. }
  1918. if (ctx->keylen)
  1919. dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
  1920. ctx->keylen = keysize;
  1921. ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
  1922. return 0;
  1923. }
  1924. struct talitos_alg_template {
  1925. u32 type;
  1926. u32 priority;
  1927. union {
  1928. struct crypto_alg crypto;
  1929. struct ahash_alg hash;
  1930. struct aead_alg aead;
  1931. } alg;
  1932. __be32 desc_hdr_template;
  1933. };
  1934. static struct talitos_alg_template driver_algs[] = {
  1935. /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
  1936. { .type = CRYPTO_ALG_TYPE_AEAD,
  1937. .alg.aead = {
  1938. .base = {
  1939. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1940. .cra_driver_name = "authenc-hmac-sha1-"
  1941. "cbc-aes-talitos",
  1942. .cra_blocksize = AES_BLOCK_SIZE,
  1943. .cra_flags = CRYPTO_ALG_ASYNC,
  1944. },
  1945. .ivsize = AES_BLOCK_SIZE,
  1946. .maxauthsize = SHA1_DIGEST_SIZE,
  1947. },
  1948. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  1949. DESC_HDR_SEL0_AESU |
  1950. DESC_HDR_MODE0_AESU_CBC |
  1951. DESC_HDR_SEL1_MDEUA |
  1952. DESC_HDR_MODE1_MDEU_INIT |
  1953. DESC_HDR_MODE1_MDEU_PAD |
  1954. DESC_HDR_MODE1_MDEU_SHA1_HMAC,
  1955. },
  1956. { .type = CRYPTO_ALG_TYPE_AEAD,
  1957. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  1958. .alg.aead = {
  1959. .base = {
  1960. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1961. .cra_driver_name = "authenc-hmac-sha1-"
  1962. "cbc-aes-talitos-hsna",
  1963. .cra_blocksize = AES_BLOCK_SIZE,
  1964. .cra_flags = CRYPTO_ALG_ASYNC,
  1965. },
  1966. .ivsize = AES_BLOCK_SIZE,
  1967. .maxauthsize = SHA1_DIGEST_SIZE,
  1968. },
  1969. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  1970. DESC_HDR_SEL0_AESU |
  1971. DESC_HDR_MODE0_AESU_CBC |
  1972. DESC_HDR_SEL1_MDEUA |
  1973. DESC_HDR_MODE1_MDEU_INIT |
  1974. DESC_HDR_MODE1_MDEU_PAD |
  1975. DESC_HDR_MODE1_MDEU_SHA1_HMAC,
  1976. },
  1977. { .type = CRYPTO_ALG_TYPE_AEAD,
  1978. .alg.aead = {
  1979. .base = {
  1980. .cra_name = "authenc(hmac(sha1),"
  1981. "cbc(des3_ede))",
  1982. .cra_driver_name = "authenc-hmac-sha1-"
  1983. "cbc-3des-talitos",
  1984. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1985. .cra_flags = CRYPTO_ALG_ASYNC,
  1986. },
  1987. .ivsize = DES3_EDE_BLOCK_SIZE,
  1988. .maxauthsize = SHA1_DIGEST_SIZE,
  1989. },
  1990. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  1991. DESC_HDR_SEL0_DEU |
  1992. DESC_HDR_MODE0_DEU_CBC |
  1993. DESC_HDR_MODE0_DEU_3DES |
  1994. DESC_HDR_SEL1_MDEUA |
  1995. DESC_HDR_MODE1_MDEU_INIT |
  1996. DESC_HDR_MODE1_MDEU_PAD |
  1997. DESC_HDR_MODE1_MDEU_SHA1_HMAC,
  1998. },
  1999. { .type = CRYPTO_ALG_TYPE_AEAD,
  2000. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2001. .alg.aead = {
  2002. .base = {
  2003. .cra_name = "authenc(hmac(sha1),"
  2004. "cbc(des3_ede))",
  2005. .cra_driver_name = "authenc-hmac-sha1-"
  2006. "cbc-3des-talitos-hsna",
  2007. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2008. .cra_flags = CRYPTO_ALG_ASYNC,
  2009. },
  2010. .ivsize = DES3_EDE_BLOCK_SIZE,
  2011. .maxauthsize = SHA1_DIGEST_SIZE,
  2012. },
  2013. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2014. DESC_HDR_SEL0_DEU |
  2015. DESC_HDR_MODE0_DEU_CBC |
  2016. DESC_HDR_MODE0_DEU_3DES |
  2017. DESC_HDR_SEL1_MDEUA |
  2018. DESC_HDR_MODE1_MDEU_INIT |
  2019. DESC_HDR_MODE1_MDEU_PAD |
  2020. DESC_HDR_MODE1_MDEU_SHA1_HMAC,
  2021. },
  2022. { .type = CRYPTO_ALG_TYPE_AEAD,
  2023. .alg.aead = {
  2024. .base = {
  2025. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  2026. .cra_driver_name = "authenc-hmac-sha224-"
  2027. "cbc-aes-talitos",
  2028. .cra_blocksize = AES_BLOCK_SIZE,
  2029. .cra_flags = CRYPTO_ALG_ASYNC,
  2030. },
  2031. .ivsize = AES_BLOCK_SIZE,
  2032. .maxauthsize = SHA224_DIGEST_SIZE,
  2033. },
  2034. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2035. DESC_HDR_SEL0_AESU |
  2036. DESC_HDR_MODE0_AESU_CBC |
  2037. DESC_HDR_SEL1_MDEUA |
  2038. DESC_HDR_MODE1_MDEU_INIT |
  2039. DESC_HDR_MODE1_MDEU_PAD |
  2040. DESC_HDR_MODE1_MDEU_SHA224_HMAC,
  2041. },
  2042. { .type = CRYPTO_ALG_TYPE_AEAD,
  2043. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2044. .alg.aead = {
  2045. .base = {
  2046. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  2047. .cra_driver_name = "authenc-hmac-sha224-"
  2048. "cbc-aes-talitos-hsna",
  2049. .cra_blocksize = AES_BLOCK_SIZE,
  2050. .cra_flags = CRYPTO_ALG_ASYNC,
  2051. },
  2052. .ivsize = AES_BLOCK_SIZE,
  2053. .maxauthsize = SHA224_DIGEST_SIZE,
  2054. },
  2055. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2056. DESC_HDR_SEL0_AESU |
  2057. DESC_HDR_MODE0_AESU_CBC |
  2058. DESC_HDR_SEL1_MDEUA |
  2059. DESC_HDR_MODE1_MDEU_INIT |
  2060. DESC_HDR_MODE1_MDEU_PAD |
  2061. DESC_HDR_MODE1_MDEU_SHA224_HMAC,
  2062. },
  2063. { .type = CRYPTO_ALG_TYPE_AEAD,
  2064. .alg.aead = {
  2065. .base = {
  2066. .cra_name = "authenc(hmac(sha224),"
  2067. "cbc(des3_ede))",
  2068. .cra_driver_name = "authenc-hmac-sha224-"
  2069. "cbc-3des-talitos",
  2070. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2071. .cra_flags = CRYPTO_ALG_ASYNC,
  2072. },
  2073. .ivsize = DES3_EDE_BLOCK_SIZE,
  2074. .maxauthsize = SHA224_DIGEST_SIZE,
  2075. },
  2076. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2077. DESC_HDR_SEL0_DEU |
  2078. DESC_HDR_MODE0_DEU_CBC |
  2079. DESC_HDR_MODE0_DEU_3DES |
  2080. DESC_HDR_SEL1_MDEUA |
  2081. DESC_HDR_MODE1_MDEU_INIT |
  2082. DESC_HDR_MODE1_MDEU_PAD |
  2083. DESC_HDR_MODE1_MDEU_SHA224_HMAC,
  2084. },
  2085. { .type = CRYPTO_ALG_TYPE_AEAD,
  2086. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2087. .alg.aead = {
  2088. .base = {
  2089. .cra_name = "authenc(hmac(sha224),"
  2090. "cbc(des3_ede))",
  2091. .cra_driver_name = "authenc-hmac-sha224-"
  2092. "cbc-3des-talitos-hsna",
  2093. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2094. .cra_flags = CRYPTO_ALG_ASYNC,
  2095. },
  2096. .ivsize = DES3_EDE_BLOCK_SIZE,
  2097. .maxauthsize = SHA224_DIGEST_SIZE,
  2098. },
  2099. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2100. DESC_HDR_SEL0_DEU |
  2101. DESC_HDR_MODE0_DEU_CBC |
  2102. DESC_HDR_MODE0_DEU_3DES |
  2103. DESC_HDR_SEL1_MDEUA |
  2104. DESC_HDR_MODE1_MDEU_INIT |
  2105. DESC_HDR_MODE1_MDEU_PAD |
  2106. DESC_HDR_MODE1_MDEU_SHA224_HMAC,
  2107. },
  2108. { .type = CRYPTO_ALG_TYPE_AEAD,
  2109. .alg.aead = {
  2110. .base = {
  2111. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2112. .cra_driver_name = "authenc-hmac-sha256-"
  2113. "cbc-aes-talitos",
  2114. .cra_blocksize = AES_BLOCK_SIZE,
  2115. .cra_flags = CRYPTO_ALG_ASYNC,
  2116. },
  2117. .ivsize = AES_BLOCK_SIZE,
  2118. .maxauthsize = SHA256_DIGEST_SIZE,
  2119. },
  2120. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2121. DESC_HDR_SEL0_AESU |
  2122. DESC_HDR_MODE0_AESU_CBC |
  2123. DESC_HDR_SEL1_MDEUA |
  2124. DESC_HDR_MODE1_MDEU_INIT |
  2125. DESC_HDR_MODE1_MDEU_PAD |
  2126. DESC_HDR_MODE1_MDEU_SHA256_HMAC,
  2127. },
  2128. { .type = CRYPTO_ALG_TYPE_AEAD,
  2129. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2130. .alg.aead = {
  2131. .base = {
  2132. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2133. .cra_driver_name = "authenc-hmac-sha256-"
  2134. "cbc-aes-talitos-hsna",
  2135. .cra_blocksize = AES_BLOCK_SIZE,
  2136. .cra_flags = CRYPTO_ALG_ASYNC,
  2137. },
  2138. .ivsize = AES_BLOCK_SIZE,
  2139. .maxauthsize = SHA256_DIGEST_SIZE,
  2140. },
  2141. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2142. DESC_HDR_SEL0_AESU |
  2143. DESC_HDR_MODE0_AESU_CBC |
  2144. DESC_HDR_SEL1_MDEUA |
  2145. DESC_HDR_MODE1_MDEU_INIT |
  2146. DESC_HDR_MODE1_MDEU_PAD |
  2147. DESC_HDR_MODE1_MDEU_SHA256_HMAC,
  2148. },
  2149. { .type = CRYPTO_ALG_TYPE_AEAD,
  2150. .alg.aead = {
  2151. .base = {
  2152. .cra_name = "authenc(hmac(sha256),"
  2153. "cbc(des3_ede))",
  2154. .cra_driver_name = "authenc-hmac-sha256-"
  2155. "cbc-3des-talitos",
  2156. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2157. .cra_flags = CRYPTO_ALG_ASYNC,
  2158. },
  2159. .ivsize = DES3_EDE_BLOCK_SIZE,
  2160. .maxauthsize = SHA256_DIGEST_SIZE,
  2161. },
  2162. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2163. DESC_HDR_SEL0_DEU |
  2164. DESC_HDR_MODE0_DEU_CBC |
  2165. DESC_HDR_MODE0_DEU_3DES |
  2166. DESC_HDR_SEL1_MDEUA |
  2167. DESC_HDR_MODE1_MDEU_INIT |
  2168. DESC_HDR_MODE1_MDEU_PAD |
  2169. DESC_HDR_MODE1_MDEU_SHA256_HMAC,
  2170. },
  2171. { .type = CRYPTO_ALG_TYPE_AEAD,
  2172. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2173. .alg.aead = {
  2174. .base = {
  2175. .cra_name = "authenc(hmac(sha256),"
  2176. "cbc(des3_ede))",
  2177. .cra_driver_name = "authenc-hmac-sha256-"
  2178. "cbc-3des-talitos-hsna",
  2179. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2180. .cra_flags = CRYPTO_ALG_ASYNC,
  2181. },
  2182. .ivsize = DES3_EDE_BLOCK_SIZE,
  2183. .maxauthsize = SHA256_DIGEST_SIZE,
  2184. },
  2185. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2186. DESC_HDR_SEL0_DEU |
  2187. DESC_HDR_MODE0_DEU_CBC |
  2188. DESC_HDR_MODE0_DEU_3DES |
  2189. DESC_HDR_SEL1_MDEUA |
  2190. DESC_HDR_MODE1_MDEU_INIT |
  2191. DESC_HDR_MODE1_MDEU_PAD |
  2192. DESC_HDR_MODE1_MDEU_SHA256_HMAC,
  2193. },
  2194. { .type = CRYPTO_ALG_TYPE_AEAD,
  2195. .alg.aead = {
  2196. .base = {
  2197. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2198. .cra_driver_name = "authenc-hmac-sha384-"
  2199. "cbc-aes-talitos",
  2200. .cra_blocksize = AES_BLOCK_SIZE,
  2201. .cra_flags = CRYPTO_ALG_ASYNC,
  2202. },
  2203. .ivsize = AES_BLOCK_SIZE,
  2204. .maxauthsize = SHA384_DIGEST_SIZE,
  2205. },
  2206. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2207. DESC_HDR_SEL0_AESU |
  2208. DESC_HDR_MODE0_AESU_CBC |
  2209. DESC_HDR_SEL1_MDEUB |
  2210. DESC_HDR_MODE1_MDEU_INIT |
  2211. DESC_HDR_MODE1_MDEU_PAD |
  2212. DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
  2213. },
  2214. { .type = CRYPTO_ALG_TYPE_AEAD,
  2215. .alg.aead = {
  2216. .base = {
  2217. .cra_name = "authenc(hmac(sha384),"
  2218. "cbc(des3_ede))",
  2219. .cra_driver_name = "authenc-hmac-sha384-"
  2220. "cbc-3des-talitos",
  2221. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2222. .cra_flags = CRYPTO_ALG_ASYNC,
  2223. },
  2224. .ivsize = DES3_EDE_BLOCK_SIZE,
  2225. .maxauthsize = SHA384_DIGEST_SIZE,
  2226. },
  2227. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2228. DESC_HDR_SEL0_DEU |
  2229. DESC_HDR_MODE0_DEU_CBC |
  2230. DESC_HDR_MODE0_DEU_3DES |
  2231. DESC_HDR_SEL1_MDEUB |
  2232. DESC_HDR_MODE1_MDEU_INIT |
  2233. DESC_HDR_MODE1_MDEU_PAD |
  2234. DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
  2235. },
  2236. { .type = CRYPTO_ALG_TYPE_AEAD,
  2237. .alg.aead = {
  2238. .base = {
  2239. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2240. .cra_driver_name = "authenc-hmac-sha512-"
  2241. "cbc-aes-talitos",
  2242. .cra_blocksize = AES_BLOCK_SIZE,
  2243. .cra_flags = CRYPTO_ALG_ASYNC,
  2244. },
  2245. .ivsize = AES_BLOCK_SIZE,
  2246. .maxauthsize = SHA512_DIGEST_SIZE,
  2247. },
  2248. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2249. DESC_HDR_SEL0_AESU |
  2250. DESC_HDR_MODE0_AESU_CBC |
  2251. DESC_HDR_SEL1_MDEUB |
  2252. DESC_HDR_MODE1_MDEU_INIT |
  2253. DESC_HDR_MODE1_MDEU_PAD |
  2254. DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
  2255. },
  2256. { .type = CRYPTO_ALG_TYPE_AEAD,
  2257. .alg.aead = {
  2258. .base = {
  2259. .cra_name = "authenc(hmac(sha512),"
  2260. "cbc(des3_ede))",
  2261. .cra_driver_name = "authenc-hmac-sha512-"
  2262. "cbc-3des-talitos",
  2263. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2264. .cra_flags = CRYPTO_ALG_ASYNC,
  2265. },
  2266. .ivsize = DES3_EDE_BLOCK_SIZE,
  2267. .maxauthsize = SHA512_DIGEST_SIZE,
  2268. },
  2269. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2270. DESC_HDR_SEL0_DEU |
  2271. DESC_HDR_MODE0_DEU_CBC |
  2272. DESC_HDR_MODE0_DEU_3DES |
  2273. DESC_HDR_SEL1_MDEUB |
  2274. DESC_HDR_MODE1_MDEU_INIT |
  2275. DESC_HDR_MODE1_MDEU_PAD |
  2276. DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
  2277. },
  2278. { .type = CRYPTO_ALG_TYPE_AEAD,
  2279. .alg.aead = {
  2280. .base = {
  2281. .cra_name = "authenc(hmac(md5),cbc(aes))",
  2282. .cra_driver_name = "authenc-hmac-md5-"
  2283. "cbc-aes-talitos",
  2284. .cra_blocksize = AES_BLOCK_SIZE,
  2285. .cra_flags = CRYPTO_ALG_ASYNC,
  2286. },
  2287. .ivsize = AES_BLOCK_SIZE,
  2288. .maxauthsize = MD5_DIGEST_SIZE,
  2289. },
  2290. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2291. DESC_HDR_SEL0_AESU |
  2292. DESC_HDR_MODE0_AESU_CBC |
  2293. DESC_HDR_SEL1_MDEUA |
  2294. DESC_HDR_MODE1_MDEU_INIT |
  2295. DESC_HDR_MODE1_MDEU_PAD |
  2296. DESC_HDR_MODE1_MDEU_MD5_HMAC,
  2297. },
  2298. { .type = CRYPTO_ALG_TYPE_AEAD,
  2299. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2300. .alg.aead = {
  2301. .base = {
  2302. .cra_name = "authenc(hmac(md5),cbc(aes))",
  2303. .cra_driver_name = "authenc-hmac-md5-"
  2304. "cbc-aes-talitos-hsna",
  2305. .cra_blocksize = AES_BLOCK_SIZE,
  2306. .cra_flags = CRYPTO_ALG_ASYNC,
  2307. },
  2308. .ivsize = AES_BLOCK_SIZE,
  2309. .maxauthsize = MD5_DIGEST_SIZE,
  2310. },
  2311. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2312. DESC_HDR_SEL0_AESU |
  2313. DESC_HDR_MODE0_AESU_CBC |
  2314. DESC_HDR_SEL1_MDEUA |
  2315. DESC_HDR_MODE1_MDEU_INIT |
  2316. DESC_HDR_MODE1_MDEU_PAD |
  2317. DESC_HDR_MODE1_MDEU_MD5_HMAC,
  2318. },
  2319. { .type = CRYPTO_ALG_TYPE_AEAD,
  2320. .alg.aead = {
  2321. .base = {
  2322. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2323. .cra_driver_name = "authenc-hmac-md5-"
  2324. "cbc-3des-talitos",
  2325. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2326. .cra_flags = CRYPTO_ALG_ASYNC,
  2327. },
  2328. .ivsize = DES3_EDE_BLOCK_SIZE,
  2329. .maxauthsize = MD5_DIGEST_SIZE,
  2330. },
  2331. .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
  2332. DESC_HDR_SEL0_DEU |
  2333. DESC_HDR_MODE0_DEU_CBC |
  2334. DESC_HDR_MODE0_DEU_3DES |
  2335. DESC_HDR_SEL1_MDEUA |
  2336. DESC_HDR_MODE1_MDEU_INIT |
  2337. DESC_HDR_MODE1_MDEU_PAD |
  2338. DESC_HDR_MODE1_MDEU_MD5_HMAC,
  2339. },
  2340. { .type = CRYPTO_ALG_TYPE_AEAD,
  2341. .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
  2342. .alg.aead = {
  2343. .base = {
  2344. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2345. .cra_driver_name = "authenc-hmac-md5-"
  2346. "cbc-3des-talitos-hsna",
  2347. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2348. .cra_flags = CRYPTO_ALG_ASYNC,
  2349. },
  2350. .ivsize = DES3_EDE_BLOCK_SIZE,
  2351. .maxauthsize = MD5_DIGEST_SIZE,
  2352. },
  2353. .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
  2354. DESC_HDR_SEL0_DEU |
  2355. DESC_HDR_MODE0_DEU_CBC |
  2356. DESC_HDR_MODE0_DEU_3DES |
  2357. DESC_HDR_SEL1_MDEUA |
  2358. DESC_HDR_MODE1_MDEU_INIT |
  2359. DESC_HDR_MODE1_MDEU_PAD |
  2360. DESC_HDR_MODE1_MDEU_MD5_HMAC,
  2361. },
  2362. /* ABLKCIPHER algorithms. */
  2363. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2364. .alg.crypto = {
  2365. .cra_name = "ecb(aes)",
  2366. .cra_driver_name = "ecb-aes-talitos",
  2367. .cra_blocksize = AES_BLOCK_SIZE,
  2368. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2369. CRYPTO_ALG_ASYNC,
  2370. .cra_ablkcipher = {
  2371. .min_keysize = AES_MIN_KEY_SIZE,
  2372. .max_keysize = AES_MAX_KEY_SIZE,
  2373. .ivsize = AES_BLOCK_SIZE,
  2374. }
  2375. },
  2376. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2377. DESC_HDR_SEL0_AESU,
  2378. },
  2379. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2380. .alg.crypto = {
  2381. .cra_name = "cbc(aes)",
  2382. .cra_driver_name = "cbc-aes-talitos",
  2383. .cra_blocksize = AES_BLOCK_SIZE,
  2384. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2385. CRYPTO_ALG_ASYNC,
  2386. .cra_ablkcipher = {
  2387. .min_keysize = AES_MIN_KEY_SIZE,
  2388. .max_keysize = AES_MAX_KEY_SIZE,
  2389. .ivsize = AES_BLOCK_SIZE,
  2390. .setkey = ablkcipher_aes_setkey,
  2391. }
  2392. },
  2393. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2394. DESC_HDR_SEL0_AESU |
  2395. DESC_HDR_MODE0_AESU_CBC,
  2396. },
  2397. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2398. .alg.crypto = {
  2399. .cra_name = "ctr(aes)",
  2400. .cra_driver_name = "ctr-aes-talitos",
  2401. .cra_blocksize = 1,
  2402. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2403. CRYPTO_ALG_ASYNC,
  2404. .cra_ablkcipher = {
  2405. .min_keysize = AES_MIN_KEY_SIZE,
  2406. .max_keysize = AES_MAX_KEY_SIZE,
  2407. .setkey = ablkcipher_aes_setkey,
  2408. }
  2409. },
  2410. .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
  2411. DESC_HDR_SEL0_AESU |
  2412. DESC_HDR_MODE0_AESU_CTR,
  2413. },
  2414. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2415. .alg.crypto = {
  2416. .cra_name = "ecb(des)",
  2417. .cra_driver_name = "ecb-des-talitos",
  2418. .cra_blocksize = DES_BLOCK_SIZE,
  2419. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2420. CRYPTO_ALG_ASYNC,
  2421. .cra_ablkcipher = {
  2422. .min_keysize = DES_KEY_SIZE,
  2423. .max_keysize = DES_KEY_SIZE,
  2424. .ivsize = DES_BLOCK_SIZE,
  2425. }
  2426. },
  2427. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2428. DESC_HDR_SEL0_DEU,
  2429. },
  2430. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2431. .alg.crypto = {
  2432. .cra_name = "cbc(des)",
  2433. .cra_driver_name = "cbc-des-talitos",
  2434. .cra_blocksize = DES_BLOCK_SIZE,
  2435. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2436. CRYPTO_ALG_ASYNC,
  2437. .cra_ablkcipher = {
  2438. .min_keysize = DES_KEY_SIZE,
  2439. .max_keysize = DES_KEY_SIZE,
  2440. .ivsize = DES_BLOCK_SIZE,
  2441. }
  2442. },
  2443. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2444. DESC_HDR_SEL0_DEU |
  2445. DESC_HDR_MODE0_DEU_CBC,
  2446. },
  2447. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2448. .alg.crypto = {
  2449. .cra_name = "ecb(des3_ede)",
  2450. .cra_driver_name = "ecb-3des-talitos",
  2451. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2452. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2453. CRYPTO_ALG_ASYNC,
  2454. .cra_ablkcipher = {
  2455. .min_keysize = DES3_EDE_KEY_SIZE,
  2456. .max_keysize = DES3_EDE_KEY_SIZE,
  2457. .ivsize = DES3_EDE_BLOCK_SIZE,
  2458. }
  2459. },
  2460. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2461. DESC_HDR_SEL0_DEU |
  2462. DESC_HDR_MODE0_DEU_3DES,
  2463. },
  2464. { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2465. .alg.crypto = {
  2466. .cra_name = "cbc(des3_ede)",
  2467. .cra_driver_name = "cbc-3des-talitos",
  2468. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2469. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  2470. CRYPTO_ALG_ASYNC,
  2471. .cra_ablkcipher = {
  2472. .min_keysize = DES3_EDE_KEY_SIZE,
  2473. .max_keysize = DES3_EDE_KEY_SIZE,
  2474. .ivsize = DES3_EDE_BLOCK_SIZE,
  2475. }
  2476. },
  2477. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2478. DESC_HDR_SEL0_DEU |
  2479. DESC_HDR_MODE0_DEU_CBC |
  2480. DESC_HDR_MODE0_DEU_3DES,
  2481. },
  2482. /* AHASH algorithms. */
  2483. { .type = CRYPTO_ALG_TYPE_AHASH,
  2484. .alg.hash = {
  2485. .halg.digestsize = MD5_DIGEST_SIZE,
  2486. .halg.statesize = sizeof(struct talitos_export_state),
  2487. .halg.base = {
  2488. .cra_name = "md5",
  2489. .cra_driver_name = "md5-talitos",
  2490. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  2491. .cra_flags = CRYPTO_ALG_ASYNC,
  2492. }
  2493. },
  2494. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2495. DESC_HDR_SEL0_MDEUA |
  2496. DESC_HDR_MODE0_MDEU_MD5,
  2497. },
  2498. { .type = CRYPTO_ALG_TYPE_AHASH,
  2499. .alg.hash = {
  2500. .halg.digestsize = SHA1_DIGEST_SIZE,
  2501. .halg.statesize = sizeof(struct talitos_export_state),
  2502. .halg.base = {
  2503. .cra_name = "sha1",
  2504. .cra_driver_name = "sha1-talitos",
  2505. .cra_blocksize = SHA1_BLOCK_SIZE,
  2506. .cra_flags = CRYPTO_ALG_ASYNC,
  2507. }
  2508. },
  2509. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2510. DESC_HDR_SEL0_MDEUA |
  2511. DESC_HDR_MODE0_MDEU_SHA1,
  2512. },
  2513. { .type = CRYPTO_ALG_TYPE_AHASH,
  2514. .alg.hash = {
  2515. .halg.digestsize = SHA224_DIGEST_SIZE,
  2516. .halg.statesize = sizeof(struct talitos_export_state),
  2517. .halg.base = {
  2518. .cra_name = "sha224",
  2519. .cra_driver_name = "sha224-talitos",
  2520. .cra_blocksize = SHA224_BLOCK_SIZE,
  2521. .cra_flags = CRYPTO_ALG_ASYNC,
  2522. }
  2523. },
  2524. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2525. DESC_HDR_SEL0_MDEUA |
  2526. DESC_HDR_MODE0_MDEU_SHA224,
  2527. },
  2528. { .type = CRYPTO_ALG_TYPE_AHASH,
  2529. .alg.hash = {
  2530. .halg.digestsize = SHA256_DIGEST_SIZE,
  2531. .halg.statesize = sizeof(struct talitos_export_state),
  2532. .halg.base = {
  2533. .cra_name = "sha256",
  2534. .cra_driver_name = "sha256-talitos",
  2535. .cra_blocksize = SHA256_BLOCK_SIZE,
  2536. .cra_flags = CRYPTO_ALG_ASYNC,
  2537. }
  2538. },
  2539. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2540. DESC_HDR_SEL0_MDEUA |
  2541. DESC_HDR_MODE0_MDEU_SHA256,
  2542. },
  2543. { .type = CRYPTO_ALG_TYPE_AHASH,
  2544. .alg.hash = {
  2545. .halg.digestsize = SHA384_DIGEST_SIZE,
  2546. .halg.statesize = sizeof(struct talitos_export_state),
  2547. .halg.base = {
  2548. .cra_name = "sha384",
  2549. .cra_driver_name = "sha384-talitos",
  2550. .cra_blocksize = SHA384_BLOCK_SIZE,
  2551. .cra_flags = CRYPTO_ALG_ASYNC,
  2552. }
  2553. },
  2554. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2555. DESC_HDR_SEL0_MDEUB |
  2556. DESC_HDR_MODE0_MDEUB_SHA384,
  2557. },
  2558. { .type = CRYPTO_ALG_TYPE_AHASH,
  2559. .alg.hash = {
  2560. .halg.digestsize = SHA512_DIGEST_SIZE,
  2561. .halg.statesize = sizeof(struct talitos_export_state),
  2562. .halg.base = {
  2563. .cra_name = "sha512",
  2564. .cra_driver_name = "sha512-talitos",
  2565. .cra_blocksize = SHA512_BLOCK_SIZE,
  2566. .cra_flags = CRYPTO_ALG_ASYNC,
  2567. }
  2568. },
  2569. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2570. DESC_HDR_SEL0_MDEUB |
  2571. DESC_HDR_MODE0_MDEUB_SHA512,
  2572. },
  2573. { .type = CRYPTO_ALG_TYPE_AHASH,
  2574. .alg.hash = {
  2575. .halg.digestsize = MD5_DIGEST_SIZE,
  2576. .halg.statesize = sizeof(struct talitos_export_state),
  2577. .halg.base = {
  2578. .cra_name = "hmac(md5)",
  2579. .cra_driver_name = "hmac-md5-talitos",
  2580. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  2581. .cra_flags = CRYPTO_ALG_ASYNC,
  2582. }
  2583. },
  2584. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2585. DESC_HDR_SEL0_MDEUA |
  2586. DESC_HDR_MODE0_MDEU_MD5,
  2587. },
  2588. { .type = CRYPTO_ALG_TYPE_AHASH,
  2589. .alg.hash = {
  2590. .halg.digestsize = SHA1_DIGEST_SIZE,
  2591. .halg.statesize = sizeof(struct talitos_export_state),
  2592. .halg.base = {
  2593. .cra_name = "hmac(sha1)",
  2594. .cra_driver_name = "hmac-sha1-talitos",
  2595. .cra_blocksize = SHA1_BLOCK_SIZE,
  2596. .cra_flags = CRYPTO_ALG_ASYNC,
  2597. }
  2598. },
  2599. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2600. DESC_HDR_SEL0_MDEUA |
  2601. DESC_HDR_MODE0_MDEU_SHA1,
  2602. },
  2603. { .type = CRYPTO_ALG_TYPE_AHASH,
  2604. .alg.hash = {
  2605. .halg.digestsize = SHA224_DIGEST_SIZE,
  2606. .halg.statesize = sizeof(struct talitos_export_state),
  2607. .halg.base = {
  2608. .cra_name = "hmac(sha224)",
  2609. .cra_driver_name = "hmac-sha224-talitos",
  2610. .cra_blocksize = SHA224_BLOCK_SIZE,
  2611. .cra_flags = CRYPTO_ALG_ASYNC,
  2612. }
  2613. },
  2614. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2615. DESC_HDR_SEL0_MDEUA |
  2616. DESC_HDR_MODE0_MDEU_SHA224,
  2617. },
  2618. { .type = CRYPTO_ALG_TYPE_AHASH,
  2619. .alg.hash = {
  2620. .halg.digestsize = SHA256_DIGEST_SIZE,
  2621. .halg.statesize = sizeof(struct talitos_export_state),
  2622. .halg.base = {
  2623. .cra_name = "hmac(sha256)",
  2624. .cra_driver_name = "hmac-sha256-talitos",
  2625. .cra_blocksize = SHA256_BLOCK_SIZE,
  2626. .cra_flags = CRYPTO_ALG_ASYNC,
  2627. }
  2628. },
  2629. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2630. DESC_HDR_SEL0_MDEUA |
  2631. DESC_HDR_MODE0_MDEU_SHA256,
  2632. },
  2633. { .type = CRYPTO_ALG_TYPE_AHASH,
  2634. .alg.hash = {
  2635. .halg.digestsize = SHA384_DIGEST_SIZE,
  2636. .halg.statesize = sizeof(struct talitos_export_state),
  2637. .halg.base = {
  2638. .cra_name = "hmac(sha384)",
  2639. .cra_driver_name = "hmac-sha384-talitos",
  2640. .cra_blocksize = SHA384_BLOCK_SIZE,
  2641. .cra_flags = CRYPTO_ALG_ASYNC,
  2642. }
  2643. },
  2644. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2645. DESC_HDR_SEL0_MDEUB |
  2646. DESC_HDR_MODE0_MDEUB_SHA384,
  2647. },
  2648. { .type = CRYPTO_ALG_TYPE_AHASH,
  2649. .alg.hash = {
  2650. .halg.digestsize = SHA512_DIGEST_SIZE,
  2651. .halg.statesize = sizeof(struct talitos_export_state),
  2652. .halg.base = {
  2653. .cra_name = "hmac(sha512)",
  2654. .cra_driver_name = "hmac-sha512-talitos",
  2655. .cra_blocksize = SHA512_BLOCK_SIZE,
  2656. .cra_flags = CRYPTO_ALG_ASYNC,
  2657. }
  2658. },
  2659. .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2660. DESC_HDR_SEL0_MDEUB |
  2661. DESC_HDR_MODE0_MDEUB_SHA512,
  2662. }
  2663. };
  2664. struct talitos_crypto_alg {
  2665. struct list_head entry;
  2666. struct device *dev;
  2667. struct talitos_alg_template algt;
  2668. };
  2669. static int talitos_init_common(struct talitos_ctx *ctx,
  2670. struct talitos_crypto_alg *talitos_alg)
  2671. {
  2672. struct talitos_private *priv;
  2673. /* update context with ptr to dev */
  2674. ctx->dev = talitos_alg->dev;
  2675. /* assign SEC channel to tfm in round-robin fashion */
  2676. priv = dev_get_drvdata(ctx->dev);
  2677. ctx->ch = atomic_inc_return(&priv->last_chan) &
  2678. (priv->num_channels - 1);
  2679. /* copy descriptor header template value */
  2680. ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
  2681. /* select done notification */
  2682. ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
  2683. return 0;
  2684. }
  2685. static int talitos_cra_init(struct crypto_tfm *tfm)
  2686. {
  2687. struct crypto_alg *alg = tfm->__crt_alg;
  2688. struct talitos_crypto_alg *talitos_alg;
  2689. struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
  2690. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
  2691. talitos_alg = container_of(__crypto_ahash_alg(alg),
  2692. struct talitos_crypto_alg,
  2693. algt.alg.hash);
  2694. else
  2695. talitos_alg = container_of(alg, struct talitos_crypto_alg,
  2696. algt.alg.crypto);
  2697. return talitos_init_common(ctx, talitos_alg);
  2698. }
  2699. static int talitos_cra_init_aead(struct crypto_aead *tfm)
  2700. {
  2701. struct aead_alg *alg = crypto_aead_alg(tfm);
  2702. struct talitos_crypto_alg *talitos_alg;
  2703. struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
  2704. talitos_alg = container_of(alg, struct talitos_crypto_alg,
  2705. algt.alg.aead);
  2706. return talitos_init_common(ctx, talitos_alg);
  2707. }
  2708. static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
  2709. {
  2710. struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
  2711. talitos_cra_init(tfm);
  2712. ctx->keylen = 0;
  2713. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  2714. sizeof(struct talitos_ahash_req_ctx));
  2715. return 0;
  2716. }
  2717. static void talitos_cra_exit(struct crypto_tfm *tfm)
  2718. {
  2719. struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
  2720. struct device *dev = ctx->dev;
  2721. if (ctx->keylen)
  2722. dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
  2723. }
  2724. /*
  2725. * given the alg's descriptor header template, determine whether descriptor
  2726. * type and primary/secondary execution units required match the hw
  2727. * capabilities description provided in the device tree node.
  2728. */
  2729. static int hw_supports(struct device *dev, __be32 desc_hdr_template)
  2730. {
  2731. struct talitos_private *priv = dev_get_drvdata(dev);
  2732. int ret;
  2733. ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
  2734. (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
  2735. if (SECONDARY_EU(desc_hdr_template))
  2736. ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
  2737. & priv->exec_units);
  2738. return ret;
  2739. }
  2740. static int talitos_remove(struct platform_device *ofdev)
  2741. {
  2742. struct device *dev = &ofdev->dev;
  2743. struct talitos_private *priv = dev_get_drvdata(dev);
  2744. struct talitos_crypto_alg *t_alg, *n;
  2745. int i;
  2746. list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
  2747. switch (t_alg->algt.type) {
  2748. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  2749. break;
  2750. case CRYPTO_ALG_TYPE_AEAD:
  2751. crypto_unregister_aead(&t_alg->algt.alg.aead);
  2752. break;
  2753. case CRYPTO_ALG_TYPE_AHASH:
  2754. crypto_unregister_ahash(&t_alg->algt.alg.hash);
  2755. break;
  2756. }
  2757. list_del(&t_alg->entry);
  2758. }
  2759. if (hw_supports(dev, DESC_HDR_SEL0_RNG))
  2760. talitos_unregister_rng(dev);
  2761. for (i = 0; i < 2; i++)
  2762. if (priv->irq[i]) {
  2763. free_irq(priv->irq[i], dev);
  2764. irq_dispose_mapping(priv->irq[i]);
  2765. }
  2766. tasklet_kill(&priv->done_task[0]);
  2767. if (priv->irq[1])
  2768. tasklet_kill(&priv->done_task[1]);
  2769. return 0;
  2770. }
  2771. static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
  2772. struct talitos_alg_template
  2773. *template)
  2774. {
  2775. struct talitos_private *priv = dev_get_drvdata(dev);
  2776. struct talitos_crypto_alg *t_alg;
  2777. struct crypto_alg *alg;
  2778. t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
  2779. GFP_KERNEL);
  2780. if (!t_alg)
  2781. return ERR_PTR(-ENOMEM);
  2782. t_alg->algt = *template;
  2783. switch (t_alg->algt.type) {
  2784. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  2785. alg = &t_alg->algt.alg.crypto;
  2786. alg->cra_init = talitos_cra_init;
  2787. alg->cra_exit = talitos_cra_exit;
  2788. alg->cra_type = &crypto_ablkcipher_type;
  2789. alg->cra_ablkcipher.setkey = ablkcipher_setkey;
  2790. alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
  2791. alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
  2792. alg->cra_ablkcipher.geniv = "eseqiv";
  2793. break;
  2794. case CRYPTO_ALG_TYPE_AEAD:
  2795. alg = &t_alg->algt.alg.aead.base;
  2796. alg->cra_exit = talitos_cra_exit;
  2797. t_alg->algt.alg.aead.init = talitos_cra_init_aead;
  2798. t_alg->algt.alg.aead.setkey = aead_setkey;
  2799. t_alg->algt.alg.aead.encrypt = aead_encrypt;
  2800. t_alg->algt.alg.aead.decrypt = aead_decrypt;
  2801. if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
  2802. !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
  2803. devm_kfree(dev, t_alg);
  2804. return ERR_PTR(-ENOTSUPP);
  2805. }
  2806. break;
  2807. case CRYPTO_ALG_TYPE_AHASH:
  2808. alg = &t_alg->algt.alg.hash.halg.base;
  2809. alg->cra_init = talitos_cra_init_ahash;
  2810. alg->cra_exit = talitos_cra_exit;
  2811. t_alg->algt.alg.hash.init = ahash_init;
  2812. t_alg->algt.alg.hash.update = ahash_update;
  2813. t_alg->algt.alg.hash.final = ahash_final;
  2814. t_alg->algt.alg.hash.finup = ahash_finup;
  2815. t_alg->algt.alg.hash.digest = ahash_digest;
  2816. if (!strncmp(alg->cra_name, "hmac", 4))
  2817. t_alg->algt.alg.hash.setkey = ahash_setkey;
  2818. t_alg->algt.alg.hash.import = ahash_import;
  2819. t_alg->algt.alg.hash.export = ahash_export;
  2820. if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
  2821. !strncmp(alg->cra_name, "hmac", 4)) {
  2822. devm_kfree(dev, t_alg);
  2823. return ERR_PTR(-ENOTSUPP);
  2824. }
  2825. if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
  2826. (!strcmp(alg->cra_name, "sha224") ||
  2827. !strcmp(alg->cra_name, "hmac(sha224)"))) {
  2828. t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
  2829. t_alg->algt.desc_hdr_template =
  2830. DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
  2831. DESC_HDR_SEL0_MDEUA |
  2832. DESC_HDR_MODE0_MDEU_SHA256;
  2833. }
  2834. break;
  2835. default:
  2836. dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
  2837. devm_kfree(dev, t_alg);
  2838. return ERR_PTR(-EINVAL);
  2839. }
  2840. alg->cra_module = THIS_MODULE;
  2841. if (t_alg->algt.priority)
  2842. alg->cra_priority = t_alg->algt.priority;
  2843. else
  2844. alg->cra_priority = TALITOS_CRA_PRIORITY;
  2845. if (has_ftr_sec1(priv))
  2846. alg->cra_alignmask = 3;
  2847. else
  2848. alg->cra_alignmask = 0;
  2849. alg->cra_ctxsize = sizeof(struct talitos_ctx);
  2850. alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
  2851. t_alg->dev = dev;
  2852. return t_alg;
  2853. }
  2854. static int talitos_probe_irq(struct platform_device *ofdev)
  2855. {
  2856. struct device *dev = &ofdev->dev;
  2857. struct device_node *np = ofdev->dev.of_node;
  2858. struct talitos_private *priv = dev_get_drvdata(dev);
  2859. int err;
  2860. bool is_sec1 = has_ftr_sec1(priv);
  2861. priv->irq[0] = irq_of_parse_and_map(np, 0);
  2862. if (!priv->irq[0]) {
  2863. dev_err(dev, "failed to map irq\n");
  2864. return -EINVAL;
  2865. }
  2866. if (is_sec1) {
  2867. err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
  2868. dev_driver_string(dev), dev);
  2869. goto primary_out;
  2870. }
  2871. priv->irq[1] = irq_of_parse_and_map(np, 1);
  2872. /* get the primary irq line */
  2873. if (!priv->irq[1]) {
  2874. err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
  2875. dev_driver_string(dev), dev);
  2876. goto primary_out;
  2877. }
  2878. err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
  2879. dev_driver_string(dev), dev);
  2880. if (err)
  2881. goto primary_out;
  2882. /* get the secondary irq line */
  2883. err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
  2884. dev_driver_string(dev), dev);
  2885. if (err) {
  2886. dev_err(dev, "failed to request secondary irq\n");
  2887. irq_dispose_mapping(priv->irq[1]);
  2888. priv->irq[1] = 0;
  2889. }
  2890. return err;
  2891. primary_out:
  2892. if (err) {
  2893. dev_err(dev, "failed to request primary irq\n");
  2894. irq_dispose_mapping(priv->irq[0]);
  2895. priv->irq[0] = 0;
  2896. }
  2897. return err;
  2898. }
  2899. static int talitos_probe(struct platform_device *ofdev)
  2900. {
  2901. struct device *dev = &ofdev->dev;
  2902. struct device_node *np = ofdev->dev.of_node;
  2903. struct talitos_private *priv;
  2904. int i, err;
  2905. int stride;
  2906. struct resource *res;
  2907. priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
  2908. if (!priv)
  2909. return -ENOMEM;
  2910. INIT_LIST_HEAD(&priv->alg_list);
  2911. dev_set_drvdata(dev, priv);
  2912. priv->ofdev = ofdev;
  2913. spin_lock_init(&priv->reg_lock);
  2914. res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
  2915. if (!res)
  2916. return -ENXIO;
  2917. priv->reg = devm_ioremap(dev, res->start, resource_size(res));
  2918. if (!priv->reg) {
  2919. dev_err(dev, "failed to of_iomap\n");
  2920. err = -ENOMEM;
  2921. goto err_out;
  2922. }
  2923. /* get SEC version capabilities from device tree */
  2924. of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
  2925. of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
  2926. of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
  2927. of_property_read_u32(np, "fsl,descriptor-types-mask",
  2928. &priv->desc_types);
  2929. if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
  2930. !priv->exec_units || !priv->desc_types) {
  2931. dev_err(dev, "invalid property data in device tree node\n");
  2932. err = -EINVAL;
  2933. goto err_out;
  2934. }
  2935. if (of_device_is_compatible(np, "fsl,sec3.0"))
  2936. priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
  2937. if (of_device_is_compatible(np, "fsl,sec2.1"))
  2938. priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
  2939. TALITOS_FTR_SHA224_HWINIT |
  2940. TALITOS_FTR_HMAC_OK;
  2941. if (of_device_is_compatible(np, "fsl,sec1.0"))
  2942. priv->features |= TALITOS_FTR_SEC1;
  2943. if (of_device_is_compatible(np, "fsl,sec1.2")) {
  2944. priv->reg_deu = priv->reg + TALITOS12_DEU;
  2945. priv->reg_aesu = priv->reg + TALITOS12_AESU;
  2946. priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
  2947. stride = TALITOS1_CH_STRIDE;
  2948. } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
  2949. priv->reg_deu = priv->reg + TALITOS10_DEU;
  2950. priv->reg_aesu = priv->reg + TALITOS10_AESU;
  2951. priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
  2952. priv->reg_afeu = priv->reg + TALITOS10_AFEU;
  2953. priv->reg_rngu = priv->reg + TALITOS10_RNGU;
  2954. priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
  2955. stride = TALITOS1_CH_STRIDE;
  2956. } else {
  2957. priv->reg_deu = priv->reg + TALITOS2_DEU;
  2958. priv->reg_aesu = priv->reg + TALITOS2_AESU;
  2959. priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
  2960. priv->reg_afeu = priv->reg + TALITOS2_AFEU;
  2961. priv->reg_rngu = priv->reg + TALITOS2_RNGU;
  2962. priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
  2963. priv->reg_keu = priv->reg + TALITOS2_KEU;
  2964. priv->reg_crcu = priv->reg + TALITOS2_CRCU;
  2965. stride = TALITOS2_CH_STRIDE;
  2966. }
  2967. err = talitos_probe_irq(ofdev);
  2968. if (err)
  2969. goto err_out;
  2970. if (of_device_is_compatible(np, "fsl,sec1.0")) {
  2971. if (priv->num_channels == 1)
  2972. tasklet_init(&priv->done_task[0], talitos1_done_ch0,
  2973. (unsigned long)dev);
  2974. else
  2975. tasklet_init(&priv->done_task[0], talitos1_done_4ch,
  2976. (unsigned long)dev);
  2977. } else {
  2978. if (priv->irq[1]) {
  2979. tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
  2980. (unsigned long)dev);
  2981. tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
  2982. (unsigned long)dev);
  2983. } else if (priv->num_channels == 1) {
  2984. tasklet_init(&priv->done_task[0], talitos2_done_ch0,
  2985. (unsigned long)dev);
  2986. } else {
  2987. tasklet_init(&priv->done_task[0], talitos2_done_4ch,
  2988. (unsigned long)dev);
  2989. }
  2990. }
  2991. priv->chan = devm_kcalloc(dev,
  2992. priv->num_channels,
  2993. sizeof(struct talitos_channel),
  2994. GFP_KERNEL);
  2995. if (!priv->chan) {
  2996. dev_err(dev, "failed to allocate channel management space\n");
  2997. err = -ENOMEM;
  2998. goto err_out;
  2999. }
  3000. priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
  3001. for (i = 0; i < priv->num_channels; i++) {
  3002. priv->chan[i].reg = priv->reg + stride * (i + 1);
  3003. if (!priv->irq[1] || !(i & 1))
  3004. priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
  3005. spin_lock_init(&priv->chan[i].head_lock);
  3006. spin_lock_init(&priv->chan[i].tail_lock);
  3007. priv->chan[i].fifo = devm_kcalloc(dev,
  3008. priv->fifo_len,
  3009. sizeof(struct talitos_request),
  3010. GFP_KERNEL);
  3011. if (!priv->chan[i].fifo) {
  3012. dev_err(dev, "failed to allocate request fifo %d\n", i);
  3013. err = -ENOMEM;
  3014. goto err_out;
  3015. }
  3016. atomic_set(&priv->chan[i].submit_count,
  3017. -(priv->chfifo_len - 1));
  3018. }
  3019. dma_set_mask(dev, DMA_BIT_MASK(36));
  3020. /* reset and initialize the h/w */
  3021. err = init_device(dev);
  3022. if (err) {
  3023. dev_err(dev, "failed to initialize device\n");
  3024. goto err_out;
  3025. }
  3026. /* register the RNG, if available */
  3027. if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
  3028. err = talitos_register_rng(dev);
  3029. if (err) {
  3030. dev_err(dev, "failed to register hwrng: %d\n", err);
  3031. goto err_out;
  3032. } else
  3033. dev_info(dev, "hwrng\n");
  3034. }
  3035. /* register crypto algorithms the device supports */
  3036. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3037. if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
  3038. struct talitos_crypto_alg *t_alg;
  3039. struct crypto_alg *alg = NULL;
  3040. t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
  3041. if (IS_ERR(t_alg)) {
  3042. err = PTR_ERR(t_alg);
  3043. if (err == -ENOTSUPP)
  3044. continue;
  3045. goto err_out;
  3046. }
  3047. switch (t_alg->algt.type) {
  3048. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3049. err = crypto_register_alg(
  3050. &t_alg->algt.alg.crypto);
  3051. alg = &t_alg->algt.alg.crypto;
  3052. break;
  3053. case CRYPTO_ALG_TYPE_AEAD:
  3054. err = crypto_register_aead(
  3055. &t_alg->algt.alg.aead);
  3056. alg = &t_alg->algt.alg.aead.base;
  3057. break;
  3058. case CRYPTO_ALG_TYPE_AHASH:
  3059. err = crypto_register_ahash(
  3060. &t_alg->algt.alg.hash);
  3061. alg = &t_alg->algt.alg.hash.halg.base;
  3062. break;
  3063. }
  3064. if (err) {
  3065. dev_err(dev, "%s alg registration failed\n",
  3066. alg->cra_driver_name);
  3067. devm_kfree(dev, t_alg);
  3068. } else
  3069. list_add_tail(&t_alg->entry, &priv->alg_list);
  3070. }
  3071. }
  3072. if (!list_empty(&priv->alg_list))
  3073. dev_info(dev, "%s algorithms registered in /proc/crypto\n",
  3074. (char *)of_get_property(np, "compatible", NULL));
  3075. return 0;
  3076. err_out:
  3077. talitos_remove(ofdev);
  3078. return err;
  3079. }
  3080. static const struct of_device_id talitos_match[] = {
  3081. #ifdef CONFIG_CRYPTO_DEV_TALITOS1
  3082. {
  3083. .compatible = "fsl,sec1.0",
  3084. },
  3085. #endif
  3086. #ifdef CONFIG_CRYPTO_DEV_TALITOS2
  3087. {
  3088. .compatible = "fsl,sec2.0",
  3089. },
  3090. #endif
  3091. {},
  3092. };
  3093. MODULE_DEVICE_TABLE(of, talitos_match);
  3094. static struct platform_driver talitos_driver = {
  3095. .driver = {
  3096. .name = "talitos",
  3097. .of_match_table = talitos_match,
  3098. },
  3099. .probe = talitos_probe,
  3100. .remove = talitos_remove,
  3101. };
  3102. module_platform_driver(talitos_driver);
  3103. MODULE_LICENSE("GPL");
  3104. MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
  3105. MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");