chcr_algo.c 123 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Manoj Malviya (manojmalviya@chelsio.com)
  36. * Atul Gupta (atul.gupta@chelsio.com)
  37. * Jitendra Lulla (jlulla@chelsio.com)
  38. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39. * Harsh Jain (harsh@chelsio.com)
  40. */
  41. #define pr_fmt(fmt) "chcr:" fmt
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/crypto.h>
  45. #include <linux/cryptohash.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/rtnetlink.h>
  48. #include <linux/highmem.h>
  49. #include <linux/scatterlist.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/gcm.h>
  54. #include <crypto/sha.h>
  55. #include <crypto/authenc.h>
  56. #include <crypto/ctr.h>
  57. #include <crypto/gf128mul.h>
  58. #include <crypto/internal/aead.h>
  59. #include <crypto/null.h>
  60. #include <crypto/internal/skcipher.h>
  61. #include <crypto/aead.h>
  62. #include <crypto/scatterwalk.h>
  63. #include <crypto/internal/hash.h>
  64. #include "t4fw_api.h"
  65. #include "t4_msg.h"
  66. #include "chcr_core.h"
  67. #include "chcr_algo.h"
  68. #include "chcr_crypto.h"
  69. #define IV AES_BLOCK_SIZE
  70. static unsigned int sgl_ent_len[] = {
  71. 0, 0, 16, 24, 40, 48, 64, 72, 88,
  72. 96, 112, 120, 136, 144, 160, 168, 184,
  73. 192, 208, 216, 232, 240, 256, 264, 280,
  74. 288, 304, 312, 328, 336, 352, 360, 376
  75. };
  76. static unsigned int dsgl_ent_len[] = {
  77. 0, 32, 32, 48, 48, 64, 64, 80, 80,
  78. 112, 112, 128, 128, 144, 144, 160, 160,
  79. 192, 192, 208, 208, 224, 224, 240, 240,
  80. 272, 272, 288, 288, 304, 304, 320, 320
  81. };
  82. static u32 round_constant[11] = {
  83. 0x01000000, 0x02000000, 0x04000000, 0x08000000,
  84. 0x10000000, 0x20000000, 0x40000000, 0x80000000,
  85. 0x1B000000, 0x36000000, 0x6C000000
  86. };
  87. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  88. unsigned char *input, int err);
  89. static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  90. {
  91. return ctx->crypto_ctx->aeadctx;
  92. }
  93. static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  94. {
  95. return ctx->crypto_ctx->ablkctx;
  96. }
  97. static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  98. {
  99. return ctx->crypto_ctx->hmacctx;
  100. }
  101. static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  102. {
  103. return gctx->ctx->gcm;
  104. }
  105. static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  106. {
  107. return gctx->ctx->authenc;
  108. }
  109. static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  110. {
  111. return ctx->dev->u_ctx;
  112. }
  113. static inline int is_ofld_imm(const struct sk_buff *skb)
  114. {
  115. return (skb->len <= SGE_MAX_WR_LEN);
  116. }
  117. static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
  118. {
  119. memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
  120. }
  121. static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
  122. unsigned int entlen,
  123. unsigned int skip)
  124. {
  125. int nents = 0;
  126. unsigned int less;
  127. unsigned int skip_len = 0;
  128. while (sg && skip) {
  129. if (sg_dma_len(sg) <= skip) {
  130. skip -= sg_dma_len(sg);
  131. skip_len = 0;
  132. sg = sg_next(sg);
  133. } else {
  134. skip_len = skip;
  135. skip = 0;
  136. }
  137. }
  138. while (sg && reqlen) {
  139. less = min(reqlen, sg_dma_len(sg) - skip_len);
  140. nents += DIV_ROUND_UP(less, entlen);
  141. reqlen -= less;
  142. skip_len = 0;
  143. sg = sg_next(sg);
  144. }
  145. return nents;
  146. }
  147. static inline int get_aead_subtype(struct crypto_aead *aead)
  148. {
  149. struct aead_alg *alg = crypto_aead_alg(aead);
  150. struct chcr_alg_template *chcr_crypto_alg =
  151. container_of(alg, struct chcr_alg_template, alg.aead);
  152. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  153. }
  154. void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
  155. {
  156. u8 temp[SHA512_DIGEST_SIZE];
  157. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  158. int authsize = crypto_aead_authsize(tfm);
  159. struct cpl_fw6_pld *fw6_pld;
  160. int cmp = 0;
  161. fw6_pld = (struct cpl_fw6_pld *)input;
  162. if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
  163. (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
  164. cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
  165. } else {
  166. sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
  167. authsize, req->assoclen +
  168. req->cryptlen - authsize);
  169. cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
  170. }
  171. if (cmp)
  172. *err = -EBADMSG;
  173. else
  174. *err = 0;
  175. }
  176. static inline void chcr_handle_aead_resp(struct aead_request *req,
  177. unsigned char *input,
  178. int err)
  179. {
  180. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  181. chcr_aead_common_exit(req);
  182. if (reqctx->verify == VERIFY_SW) {
  183. chcr_verify_tag(req, input, &err);
  184. reqctx->verify = VERIFY_HW;
  185. }
  186. req->base.complete(&req->base, err);
  187. }
  188. static void get_aes_decrypt_key(unsigned char *dec_key,
  189. const unsigned char *key,
  190. unsigned int keylength)
  191. {
  192. u32 temp;
  193. u32 w_ring[MAX_NK];
  194. int i, j, k;
  195. u8 nr, nk;
  196. switch (keylength) {
  197. case AES_KEYLENGTH_128BIT:
  198. nk = KEYLENGTH_4BYTES;
  199. nr = NUMBER_OF_ROUNDS_10;
  200. break;
  201. case AES_KEYLENGTH_192BIT:
  202. nk = KEYLENGTH_6BYTES;
  203. nr = NUMBER_OF_ROUNDS_12;
  204. break;
  205. case AES_KEYLENGTH_256BIT:
  206. nk = KEYLENGTH_8BYTES;
  207. nr = NUMBER_OF_ROUNDS_14;
  208. break;
  209. default:
  210. return;
  211. }
  212. for (i = 0; i < nk; i++)
  213. w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
  214. i = 0;
  215. temp = w_ring[nk - 1];
  216. while (i + nk < (nr + 1) * 4) {
  217. if (!(i % nk)) {
  218. /* RotWord(temp) */
  219. temp = (temp << 8) | (temp >> 24);
  220. temp = aes_ks_subword(temp);
  221. temp ^= round_constant[i / nk];
  222. } else if (nk == 8 && (i % 4 == 0)) {
  223. temp = aes_ks_subword(temp);
  224. }
  225. w_ring[i % nk] ^= temp;
  226. temp = w_ring[i % nk];
  227. i++;
  228. }
  229. i--;
  230. for (k = 0, j = i % nk; k < nk; k++) {
  231. *((u32 *)dec_key + k) = htonl(w_ring[j]);
  232. j--;
  233. if (j < 0)
  234. j += nk;
  235. }
  236. }
  237. static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  238. {
  239. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  240. switch (ds) {
  241. case SHA1_DIGEST_SIZE:
  242. base_hash = crypto_alloc_shash("sha1", 0, 0);
  243. break;
  244. case SHA224_DIGEST_SIZE:
  245. base_hash = crypto_alloc_shash("sha224", 0, 0);
  246. break;
  247. case SHA256_DIGEST_SIZE:
  248. base_hash = crypto_alloc_shash("sha256", 0, 0);
  249. break;
  250. case SHA384_DIGEST_SIZE:
  251. base_hash = crypto_alloc_shash("sha384", 0, 0);
  252. break;
  253. case SHA512_DIGEST_SIZE:
  254. base_hash = crypto_alloc_shash("sha512", 0, 0);
  255. break;
  256. }
  257. return base_hash;
  258. }
  259. static int chcr_compute_partial_hash(struct shash_desc *desc,
  260. char *iopad, char *result_hash,
  261. int digest_size)
  262. {
  263. struct sha1_state sha1_st;
  264. struct sha256_state sha256_st;
  265. struct sha512_state sha512_st;
  266. int error;
  267. if (digest_size == SHA1_DIGEST_SIZE) {
  268. error = crypto_shash_init(desc) ?:
  269. crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
  270. crypto_shash_export(desc, (void *)&sha1_st);
  271. memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
  272. } else if (digest_size == SHA224_DIGEST_SIZE) {
  273. error = crypto_shash_init(desc) ?:
  274. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  275. crypto_shash_export(desc, (void *)&sha256_st);
  276. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  277. } else if (digest_size == SHA256_DIGEST_SIZE) {
  278. error = crypto_shash_init(desc) ?:
  279. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  280. crypto_shash_export(desc, (void *)&sha256_st);
  281. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  282. } else if (digest_size == SHA384_DIGEST_SIZE) {
  283. error = crypto_shash_init(desc) ?:
  284. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  285. crypto_shash_export(desc, (void *)&sha512_st);
  286. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  287. } else if (digest_size == SHA512_DIGEST_SIZE) {
  288. error = crypto_shash_init(desc) ?:
  289. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  290. crypto_shash_export(desc, (void *)&sha512_st);
  291. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  292. } else {
  293. error = -EINVAL;
  294. pr_err("Unknown digest size %d\n", digest_size);
  295. }
  296. return error;
  297. }
  298. static void chcr_change_order(char *buf, int ds)
  299. {
  300. int i;
  301. if (ds == SHA512_DIGEST_SIZE) {
  302. for (i = 0; i < (ds / sizeof(u64)); i++)
  303. *((__be64 *)buf + i) =
  304. cpu_to_be64(*((u64 *)buf + i));
  305. } else {
  306. for (i = 0; i < (ds / sizeof(u32)); i++)
  307. *((__be32 *)buf + i) =
  308. cpu_to_be32(*((u32 *)buf + i));
  309. }
  310. }
  311. static inline int is_hmac(struct crypto_tfm *tfm)
  312. {
  313. struct crypto_alg *alg = tfm->__crt_alg;
  314. struct chcr_alg_template *chcr_crypto_alg =
  315. container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
  316. alg.hash);
  317. if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
  318. return 1;
  319. return 0;
  320. }
  321. static inline void dsgl_walk_init(struct dsgl_walk *walk,
  322. struct cpl_rx_phys_dsgl *dsgl)
  323. {
  324. walk->dsgl = dsgl;
  325. walk->nents = 0;
  326. walk->to = (struct phys_sge_pairs *)(dsgl + 1);
  327. }
  328. static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
  329. int pci_chan_id)
  330. {
  331. struct cpl_rx_phys_dsgl *phys_cpl;
  332. phys_cpl = walk->dsgl;
  333. phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
  334. | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
  335. phys_cpl->pcirlxorder_to_noofsgentr =
  336. htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
  337. CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
  338. CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
  339. CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
  340. CPL_RX_PHYS_DSGL_DCAID_V(0) |
  341. CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
  342. phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
  343. phys_cpl->rss_hdr_int.qid = htons(qid);
  344. phys_cpl->rss_hdr_int.hash_val = 0;
  345. phys_cpl->rss_hdr_int.channel = pci_chan_id;
  346. }
  347. static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
  348. size_t size,
  349. dma_addr_t *addr)
  350. {
  351. int j;
  352. if (!size)
  353. return;
  354. j = walk->nents;
  355. walk->to->len[j % 8] = htons(size);
  356. walk->to->addr[j % 8] = cpu_to_be64(*addr);
  357. j++;
  358. if ((j % 8) == 0)
  359. walk->to++;
  360. walk->nents = j;
  361. }
  362. static void dsgl_walk_add_sg(struct dsgl_walk *walk,
  363. struct scatterlist *sg,
  364. unsigned int slen,
  365. unsigned int skip)
  366. {
  367. int skip_len = 0;
  368. unsigned int left_size = slen, len = 0;
  369. unsigned int j = walk->nents;
  370. int offset, ent_len;
  371. if (!slen)
  372. return;
  373. while (sg && skip) {
  374. if (sg_dma_len(sg) <= skip) {
  375. skip -= sg_dma_len(sg);
  376. skip_len = 0;
  377. sg = sg_next(sg);
  378. } else {
  379. skip_len = skip;
  380. skip = 0;
  381. }
  382. }
  383. while (left_size && sg) {
  384. len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  385. offset = 0;
  386. while (len) {
  387. ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
  388. walk->to->len[j % 8] = htons(ent_len);
  389. walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
  390. offset + skip_len);
  391. offset += ent_len;
  392. len -= ent_len;
  393. j++;
  394. if ((j % 8) == 0)
  395. walk->to++;
  396. }
  397. walk->last_sg = sg;
  398. walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
  399. skip_len) + skip_len;
  400. left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  401. skip_len = 0;
  402. sg = sg_next(sg);
  403. }
  404. walk->nents = j;
  405. }
  406. static inline void ulptx_walk_init(struct ulptx_walk *walk,
  407. struct ulptx_sgl *ulp)
  408. {
  409. walk->sgl = ulp;
  410. walk->nents = 0;
  411. walk->pair_idx = 0;
  412. walk->pair = ulp->sge;
  413. walk->last_sg = NULL;
  414. walk->last_sg_len = 0;
  415. }
  416. static inline void ulptx_walk_end(struct ulptx_walk *walk)
  417. {
  418. walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  419. ULPTX_NSGE_V(walk->nents));
  420. }
  421. static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
  422. size_t size,
  423. dma_addr_t *addr)
  424. {
  425. if (!size)
  426. return;
  427. if (walk->nents == 0) {
  428. walk->sgl->len0 = cpu_to_be32(size);
  429. walk->sgl->addr0 = cpu_to_be64(*addr);
  430. } else {
  431. walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
  432. walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
  433. walk->pair_idx = !walk->pair_idx;
  434. if (!walk->pair_idx)
  435. walk->pair++;
  436. }
  437. walk->nents++;
  438. }
  439. static void ulptx_walk_add_sg(struct ulptx_walk *walk,
  440. struct scatterlist *sg,
  441. unsigned int len,
  442. unsigned int skip)
  443. {
  444. int small;
  445. int skip_len = 0;
  446. unsigned int sgmin;
  447. if (!len)
  448. return;
  449. while (sg && skip) {
  450. if (sg_dma_len(sg) <= skip) {
  451. skip -= sg_dma_len(sg);
  452. skip_len = 0;
  453. sg = sg_next(sg);
  454. } else {
  455. skip_len = skip;
  456. skip = 0;
  457. }
  458. }
  459. WARN(!sg, "SG should not be null here\n");
  460. if (sg && (walk->nents == 0)) {
  461. small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
  462. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  463. walk->sgl->len0 = cpu_to_be32(sgmin);
  464. walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
  465. walk->nents++;
  466. len -= sgmin;
  467. walk->last_sg = sg;
  468. walk->last_sg_len = sgmin + skip_len;
  469. skip_len += sgmin;
  470. if (sg_dma_len(sg) == skip_len) {
  471. sg = sg_next(sg);
  472. skip_len = 0;
  473. }
  474. }
  475. while (sg && len) {
  476. small = min(sg_dma_len(sg) - skip_len, len);
  477. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  478. walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
  479. walk->pair->addr[walk->pair_idx] =
  480. cpu_to_be64(sg_dma_address(sg) + skip_len);
  481. walk->pair_idx = !walk->pair_idx;
  482. walk->nents++;
  483. if (!walk->pair_idx)
  484. walk->pair++;
  485. len -= sgmin;
  486. skip_len += sgmin;
  487. walk->last_sg = sg;
  488. walk->last_sg_len = skip_len;
  489. if (sg_dma_len(sg) == skip_len) {
  490. sg = sg_next(sg);
  491. skip_len = 0;
  492. }
  493. }
  494. }
  495. static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  496. {
  497. struct crypto_alg *alg = tfm->__crt_alg;
  498. struct chcr_alg_template *chcr_crypto_alg =
  499. container_of(alg, struct chcr_alg_template, alg.crypto);
  500. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  501. }
  502. static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  503. {
  504. struct adapter *adap = netdev2adap(dev);
  505. struct sge_uld_txq_info *txq_info =
  506. adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
  507. struct sge_uld_txq *txq;
  508. int ret = 0;
  509. local_bh_disable();
  510. txq = &txq_info->uldtxq[idx];
  511. spin_lock(&txq->sendq.lock);
  512. if (txq->full)
  513. ret = -1;
  514. spin_unlock(&txq->sendq.lock);
  515. local_bh_enable();
  516. return ret;
  517. }
  518. static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
  519. struct _key_ctx *key_ctx)
  520. {
  521. if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
  522. memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
  523. } else {
  524. memcpy(key_ctx->key,
  525. ablkctx->key + (ablkctx->enckey_len >> 1),
  526. ablkctx->enckey_len >> 1);
  527. memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
  528. ablkctx->rrkey, ablkctx->enckey_len >> 1);
  529. }
  530. return 0;
  531. }
  532. static int chcr_hash_ent_in_wr(struct scatterlist *src,
  533. unsigned int minsg,
  534. unsigned int space,
  535. unsigned int srcskip)
  536. {
  537. int srclen = 0;
  538. int srcsg = minsg;
  539. int soffset = 0, sless;
  540. if (sg_dma_len(src) == srcskip) {
  541. src = sg_next(src);
  542. srcskip = 0;
  543. }
  544. while (src && space > (sgl_ent_len[srcsg + 1])) {
  545. sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
  546. CHCR_SRC_SG_SIZE);
  547. srclen += sless;
  548. soffset += sless;
  549. srcsg++;
  550. if (sg_dma_len(src) == (soffset + srcskip)) {
  551. src = sg_next(src);
  552. soffset = 0;
  553. srcskip = 0;
  554. }
  555. }
  556. return srclen;
  557. }
  558. static int chcr_sg_ent_in_wr(struct scatterlist *src,
  559. struct scatterlist *dst,
  560. unsigned int minsg,
  561. unsigned int space,
  562. unsigned int srcskip,
  563. unsigned int dstskip)
  564. {
  565. int srclen = 0, dstlen = 0;
  566. int srcsg = minsg, dstsg = minsg;
  567. int offset = 0, soffset = 0, less, sless = 0;
  568. if (sg_dma_len(src) == srcskip) {
  569. src = sg_next(src);
  570. srcskip = 0;
  571. }
  572. if (sg_dma_len(dst) == dstskip) {
  573. dst = sg_next(dst);
  574. dstskip = 0;
  575. }
  576. while (src && dst &&
  577. space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
  578. sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
  579. CHCR_SRC_SG_SIZE);
  580. srclen += sless;
  581. srcsg++;
  582. offset = 0;
  583. while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
  584. space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
  585. if (srclen <= dstlen)
  586. break;
  587. less = min_t(unsigned int, sg_dma_len(dst) - offset -
  588. dstskip, CHCR_DST_SG_SIZE);
  589. dstlen += less;
  590. offset += less;
  591. if ((offset + dstskip) == sg_dma_len(dst)) {
  592. dst = sg_next(dst);
  593. offset = 0;
  594. }
  595. dstsg++;
  596. dstskip = 0;
  597. }
  598. soffset += sless;
  599. if ((soffset + srcskip) == sg_dma_len(src)) {
  600. src = sg_next(src);
  601. srcskip = 0;
  602. soffset = 0;
  603. }
  604. }
  605. return min(srclen, dstlen);
  606. }
  607. static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
  608. u32 flags,
  609. struct scatterlist *src,
  610. struct scatterlist *dst,
  611. unsigned int nbytes,
  612. u8 *iv,
  613. unsigned short op_type)
  614. {
  615. int err;
  616. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
  617. skcipher_request_set_sync_tfm(subreq, cipher);
  618. skcipher_request_set_callback(subreq, flags, NULL, NULL);
  619. skcipher_request_set_crypt(subreq, src, dst,
  620. nbytes, iv);
  621. err = op_type ? crypto_skcipher_decrypt(subreq) :
  622. crypto_skcipher_encrypt(subreq);
  623. skcipher_request_zero(subreq);
  624. return err;
  625. }
  626. static inline void create_wreq(struct chcr_context *ctx,
  627. struct chcr_wr *chcr_req,
  628. struct crypto_async_request *req,
  629. unsigned int imm,
  630. int hash_sz,
  631. unsigned int len16,
  632. unsigned int sc_len,
  633. unsigned int lcb)
  634. {
  635. struct uld_ctx *u_ctx = ULD_CTX(ctx);
  636. int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
  637. chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
  638. chcr_req->wreq.pld_size_hash_size =
  639. htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
  640. chcr_req->wreq.len16_pkd =
  641. htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
  642. chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
  643. chcr_req->wreq.rx_chid_to_rx_q_id =
  644. FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
  645. !!lcb, ctx->tx_qidx);
  646. chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
  647. qid);
  648. chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
  649. ((sizeof(chcr_req->wreq)) >> 4)));
  650. chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
  651. chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  652. sizeof(chcr_req->key_ctx) + sc_len);
  653. }
  654. /**
  655. * create_cipher_wr - form the WR for cipher operations
  656. * @req: cipher req.
  657. * @ctx: crypto driver context of the request.
  658. * @qid: ingress qid where response of this WR should be received.
  659. * @op_type: encryption or decryption
  660. */
  661. static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
  662. {
  663. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  664. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  665. struct sk_buff *skb = NULL;
  666. struct chcr_wr *chcr_req;
  667. struct cpl_rx_phys_dsgl *phys_cpl;
  668. struct ulptx_sgl *ulptx;
  669. struct chcr_blkcipher_req_ctx *reqctx =
  670. ablkcipher_request_ctx(wrparam->req);
  671. unsigned int temp = 0, transhdr_len, dst_size;
  672. int error;
  673. int nents;
  674. unsigned int kctx_len;
  675. gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  676. GFP_KERNEL : GFP_ATOMIC;
  677. struct adapter *adap = padap(c_ctx(tfm)->dev);
  678. nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
  679. reqctx->dst_ofst);
  680. dst_size = get_space_for_phys_dsgl(nents);
  681. kctx_len = roundup(ablkctx->enckey_len, 16);
  682. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  683. nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
  684. CHCR_SRC_SG_SIZE, reqctx->src_ofst);
  685. temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
  686. (sgl_len(nents) * 8);
  687. transhdr_len += temp;
  688. transhdr_len = roundup(transhdr_len, 16);
  689. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  690. if (!skb) {
  691. error = -ENOMEM;
  692. goto err;
  693. }
  694. chcr_req = __skb_put_zero(skb, transhdr_len);
  695. chcr_req->sec_cpl.op_ivinsrtofst =
  696. FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
  697. chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
  698. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  699. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
  700. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  701. FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
  702. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
  703. ablkctx->ciph_mode,
  704. 0, 0, IV >> 1);
  705. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
  706. 0, 1, dst_size);
  707. chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
  708. if ((reqctx->op == CHCR_DECRYPT_OP) &&
  709. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  710. CRYPTO_ALG_SUB_TYPE_CTR)) &&
  711. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  712. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
  713. generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
  714. } else {
  715. if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
  716. (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
  717. memcpy(chcr_req->key_ctx.key, ablkctx->key,
  718. ablkctx->enckey_len);
  719. } else {
  720. memcpy(chcr_req->key_ctx.key, ablkctx->key +
  721. (ablkctx->enckey_len >> 1),
  722. ablkctx->enckey_len >> 1);
  723. memcpy(chcr_req->key_ctx.key +
  724. (ablkctx->enckey_len >> 1),
  725. ablkctx->key,
  726. ablkctx->enckey_len >> 1);
  727. }
  728. }
  729. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  730. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  731. chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
  732. chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
  733. atomic_inc(&adap->chcr_stats.cipher_rqst);
  734. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
  735. + (reqctx->imm ? (wrparam->bytes) : 0);
  736. create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
  737. transhdr_len, temp,
  738. ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
  739. reqctx->skb = skb;
  740. if (reqctx->op && (ablkctx->ciph_mode ==
  741. CHCR_SCMD_CIPHER_MODE_AES_CBC))
  742. sg_pcopy_to_buffer(wrparam->req->src,
  743. sg_nents(wrparam->req->src), wrparam->req->info, 16,
  744. reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
  745. return skb;
  746. err:
  747. return ERR_PTR(error);
  748. }
  749. static inline int chcr_keyctx_ck_size(unsigned int keylen)
  750. {
  751. int ck_size = 0;
  752. if (keylen == AES_KEYSIZE_128)
  753. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  754. else if (keylen == AES_KEYSIZE_192)
  755. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  756. else if (keylen == AES_KEYSIZE_256)
  757. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  758. else
  759. ck_size = 0;
  760. return ck_size;
  761. }
  762. static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
  763. const u8 *key,
  764. unsigned int keylen)
  765. {
  766. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  767. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  768. int err = 0;
  769. crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
  770. CRYPTO_TFM_REQ_MASK);
  771. crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
  772. cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  773. err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
  774. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  775. tfm->crt_flags |=
  776. crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
  777. CRYPTO_TFM_RES_MASK;
  778. return err;
  779. }
  780. static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
  781. const u8 *key,
  782. unsigned int keylen)
  783. {
  784. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  785. unsigned int ck_size, context_size;
  786. u16 alignment = 0;
  787. int err;
  788. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  789. if (err)
  790. goto badkey_err;
  791. ck_size = chcr_keyctx_ck_size(keylen);
  792. alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
  793. memcpy(ablkctx->key, key, keylen);
  794. ablkctx->enckey_len = keylen;
  795. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
  796. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  797. keylen + alignment) >> 4;
  798. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  799. 0, 0, context_size);
  800. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  801. return 0;
  802. badkey_err:
  803. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  804. ablkctx->enckey_len = 0;
  805. return err;
  806. }
  807. static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
  808. const u8 *key,
  809. unsigned int keylen)
  810. {
  811. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  812. unsigned int ck_size, context_size;
  813. u16 alignment = 0;
  814. int err;
  815. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  816. if (err)
  817. goto badkey_err;
  818. ck_size = chcr_keyctx_ck_size(keylen);
  819. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  820. memcpy(ablkctx->key, key, keylen);
  821. ablkctx->enckey_len = keylen;
  822. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  823. keylen + alignment) >> 4;
  824. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  825. 0, 0, context_size);
  826. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  827. return 0;
  828. badkey_err:
  829. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  830. ablkctx->enckey_len = 0;
  831. return err;
  832. }
  833. static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
  834. const u8 *key,
  835. unsigned int keylen)
  836. {
  837. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  838. unsigned int ck_size, context_size;
  839. u16 alignment = 0;
  840. int err;
  841. if (keylen < CTR_RFC3686_NONCE_SIZE)
  842. return -EINVAL;
  843. memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  844. CTR_RFC3686_NONCE_SIZE);
  845. keylen -= CTR_RFC3686_NONCE_SIZE;
  846. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  847. if (err)
  848. goto badkey_err;
  849. ck_size = chcr_keyctx_ck_size(keylen);
  850. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  851. memcpy(ablkctx->key, key, keylen);
  852. ablkctx->enckey_len = keylen;
  853. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  854. keylen + alignment) >> 4;
  855. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  856. 0, 0, context_size);
  857. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  858. return 0;
  859. badkey_err:
  860. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  861. ablkctx->enckey_len = 0;
  862. return err;
  863. }
  864. static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
  865. {
  866. unsigned int size = AES_BLOCK_SIZE;
  867. __be32 *b = (__be32 *)(dstiv + size);
  868. u32 c, prev;
  869. memcpy(dstiv, srciv, AES_BLOCK_SIZE);
  870. for (; size >= 4; size -= 4) {
  871. prev = be32_to_cpu(*--b);
  872. c = prev + add;
  873. *b = cpu_to_be32(c);
  874. if (prev < c)
  875. break;
  876. add = 1;
  877. }
  878. }
  879. static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
  880. {
  881. __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
  882. u64 c;
  883. u32 temp = be32_to_cpu(*--b);
  884. temp = ~temp;
  885. c = (u64)temp + 1; // No of block can processed withou overflow
  886. if ((bytes / AES_BLOCK_SIZE) > c)
  887. bytes = c * AES_BLOCK_SIZE;
  888. return bytes;
  889. }
  890. static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
  891. u32 isfinal)
  892. {
  893. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  894. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  895. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  896. struct crypto_cipher *cipher;
  897. int ret, i;
  898. u8 *key;
  899. unsigned int keylen;
  900. int round = reqctx->last_req_len / AES_BLOCK_SIZE;
  901. int round8 = round / 8;
  902. cipher = ablkctx->aes_generic;
  903. memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
  904. keylen = ablkctx->enckey_len / 2;
  905. key = ablkctx->key + keylen;
  906. ret = crypto_cipher_setkey(cipher, key, keylen);
  907. if (ret)
  908. goto out;
  909. crypto_cipher_encrypt_one(cipher, iv, iv);
  910. for (i = 0; i < round8; i++)
  911. gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
  912. for (i = 0; i < (round % 8); i++)
  913. gf128mul_x_ble((le128 *)iv, (le128 *)iv);
  914. if (!isfinal)
  915. crypto_cipher_decrypt_one(cipher, iv, iv);
  916. out:
  917. return ret;
  918. }
  919. static int chcr_update_cipher_iv(struct ablkcipher_request *req,
  920. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  921. {
  922. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  923. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  924. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  925. int ret = 0;
  926. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  927. ctr_add_iv(iv, req->info, (reqctx->processed /
  928. AES_BLOCK_SIZE));
  929. else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
  930. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  931. CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
  932. AES_BLOCK_SIZE) + 1);
  933. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  934. ret = chcr_update_tweak(req, iv, 0);
  935. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  936. if (reqctx->op)
  937. /*Updated before sending last WR*/
  938. memcpy(iv, req->info, AES_BLOCK_SIZE);
  939. else
  940. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  941. }
  942. return ret;
  943. }
  944. /* We need separate function for final iv because in rfc3686 Initial counter
  945. * starts from 1 and buffer size of iv is 8 byte only which remains constant
  946. * for subsequent update requests
  947. */
  948. static int chcr_final_cipher_iv(struct ablkcipher_request *req,
  949. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  950. {
  951. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  952. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  953. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  954. int ret = 0;
  955. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  956. ctr_add_iv(iv, req->info, (reqctx->processed /
  957. AES_BLOCK_SIZE));
  958. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  959. ret = chcr_update_tweak(req, iv, 1);
  960. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  961. /*Already updated for Decrypt*/
  962. if (!reqctx->op)
  963. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  964. }
  965. return ret;
  966. }
  967. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  968. unsigned char *input, int err)
  969. {
  970. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  971. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  972. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  973. struct sk_buff *skb;
  974. struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
  975. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  976. struct cipher_wr_param wrparam;
  977. int bytes;
  978. if (err)
  979. goto unmap;
  980. if (req->nbytes == reqctx->processed) {
  981. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  982. req);
  983. err = chcr_final_cipher_iv(req, fw6_pld, req->info);
  984. goto complete;
  985. }
  986. if (!reqctx->imm) {
  987. bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
  988. CIP_SPACE_LEFT(ablkctx->enckey_len),
  989. reqctx->src_ofst, reqctx->dst_ofst);
  990. if ((bytes + reqctx->processed) >= req->nbytes)
  991. bytes = req->nbytes - reqctx->processed;
  992. else
  993. bytes = rounddown(bytes, 16);
  994. } else {
  995. /*CTR mode counter overfloa*/
  996. bytes = req->nbytes - reqctx->processed;
  997. }
  998. err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
  999. if (err)
  1000. goto unmap;
  1001. if (unlikely(bytes == 0)) {
  1002. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1003. req);
  1004. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1005. req->base.flags,
  1006. req->src,
  1007. req->dst,
  1008. req->nbytes,
  1009. req->info,
  1010. reqctx->op);
  1011. goto complete;
  1012. }
  1013. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1014. CRYPTO_ALG_SUB_TYPE_CTR)
  1015. bytes = adjust_ctr_overflow(reqctx->iv, bytes);
  1016. wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
  1017. wrparam.req = req;
  1018. wrparam.bytes = bytes;
  1019. skb = create_cipher_wr(&wrparam);
  1020. if (IS_ERR(skb)) {
  1021. pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
  1022. err = PTR_ERR(skb);
  1023. goto unmap;
  1024. }
  1025. skb->dev = u_ctx->lldi.ports[0];
  1026. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1027. chcr_send_wr(skb);
  1028. reqctx->last_req_len = bytes;
  1029. reqctx->processed += bytes;
  1030. return 0;
  1031. unmap:
  1032. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1033. complete:
  1034. req->base.complete(&req->base, err);
  1035. return err;
  1036. }
  1037. static int process_cipher(struct ablkcipher_request *req,
  1038. unsigned short qid,
  1039. struct sk_buff **skb,
  1040. unsigned short op_type)
  1041. {
  1042. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1043. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  1044. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  1045. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  1046. struct cipher_wr_param wrparam;
  1047. int bytes, err = -EINVAL;
  1048. reqctx->processed = 0;
  1049. if (!req->info)
  1050. goto error;
  1051. if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
  1052. (req->nbytes == 0) ||
  1053. (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
  1054. pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
  1055. ablkctx->enckey_len, req->nbytes, ivsize);
  1056. goto error;
  1057. }
  1058. chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1059. if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
  1060. AES_MIN_KEY_SIZE +
  1061. sizeof(struct cpl_rx_phys_dsgl) +
  1062. /*Min dsgl size*/
  1063. 32))) {
  1064. /* Can be sent as Imm*/
  1065. unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
  1066. dnents = sg_nents_xlen(req->dst, req->nbytes,
  1067. CHCR_DST_SG_SIZE, 0);
  1068. phys_dsgl = get_space_for_phys_dsgl(dnents);
  1069. kctx_len = roundup(ablkctx->enckey_len, 16);
  1070. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
  1071. reqctx->imm = (transhdr_len + IV + req->nbytes) <=
  1072. SGE_MAX_WR_LEN;
  1073. bytes = IV + req->nbytes;
  1074. } else {
  1075. reqctx->imm = 0;
  1076. }
  1077. if (!reqctx->imm) {
  1078. bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
  1079. CIP_SPACE_LEFT(ablkctx->enckey_len),
  1080. 0, 0);
  1081. if ((bytes + reqctx->processed) >= req->nbytes)
  1082. bytes = req->nbytes - reqctx->processed;
  1083. else
  1084. bytes = rounddown(bytes, 16);
  1085. } else {
  1086. bytes = req->nbytes;
  1087. }
  1088. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1089. CRYPTO_ALG_SUB_TYPE_CTR) {
  1090. bytes = adjust_ctr_overflow(req->info, bytes);
  1091. }
  1092. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1093. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
  1094. memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
  1095. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
  1096. CTR_RFC3686_IV_SIZE);
  1097. /* initialize counter portion of counter block */
  1098. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  1099. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1100. } else {
  1101. memcpy(reqctx->iv, req->info, IV);
  1102. }
  1103. if (unlikely(bytes == 0)) {
  1104. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1105. req);
  1106. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1107. req->base.flags,
  1108. req->src,
  1109. req->dst,
  1110. req->nbytes,
  1111. reqctx->iv,
  1112. op_type);
  1113. goto error;
  1114. }
  1115. reqctx->op = op_type;
  1116. reqctx->srcsg = req->src;
  1117. reqctx->dstsg = req->dst;
  1118. reqctx->src_ofst = 0;
  1119. reqctx->dst_ofst = 0;
  1120. wrparam.qid = qid;
  1121. wrparam.req = req;
  1122. wrparam.bytes = bytes;
  1123. *skb = create_cipher_wr(&wrparam);
  1124. if (IS_ERR(*skb)) {
  1125. err = PTR_ERR(*skb);
  1126. goto unmap;
  1127. }
  1128. reqctx->processed = bytes;
  1129. reqctx->last_req_len = bytes;
  1130. return 0;
  1131. unmap:
  1132. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1133. error:
  1134. return err;
  1135. }
  1136. static int chcr_aes_encrypt(struct ablkcipher_request *req)
  1137. {
  1138. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1139. struct sk_buff *skb = NULL;
  1140. int err, isfull = 0;
  1141. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1142. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1143. c_ctx(tfm)->tx_qidx))) {
  1144. isfull = 1;
  1145. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1146. return -ENOSPC;
  1147. }
  1148. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1149. &skb, CHCR_ENCRYPT_OP);
  1150. if (err || !skb)
  1151. return err;
  1152. skb->dev = u_ctx->lldi.ports[0];
  1153. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1154. chcr_send_wr(skb);
  1155. return isfull ? -EBUSY : -EINPROGRESS;
  1156. }
  1157. static int chcr_aes_decrypt(struct ablkcipher_request *req)
  1158. {
  1159. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1160. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1161. struct sk_buff *skb = NULL;
  1162. int err, isfull = 0;
  1163. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1164. c_ctx(tfm)->tx_qidx))) {
  1165. isfull = 1;
  1166. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1167. return -ENOSPC;
  1168. }
  1169. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1170. &skb, CHCR_DECRYPT_OP);
  1171. if (err || !skb)
  1172. return err;
  1173. skb->dev = u_ctx->lldi.ports[0];
  1174. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1175. chcr_send_wr(skb);
  1176. return isfull ? -EBUSY : -EINPROGRESS;
  1177. }
  1178. static int chcr_device_init(struct chcr_context *ctx)
  1179. {
  1180. struct uld_ctx *u_ctx = NULL;
  1181. struct adapter *adap;
  1182. unsigned int id;
  1183. int txq_perchan, txq_idx, ntxq;
  1184. int err = 0, rxq_perchan, rxq_idx;
  1185. id = smp_processor_id();
  1186. if (!ctx->dev) {
  1187. u_ctx = assign_chcr_device();
  1188. if (!u_ctx) {
  1189. pr_err("chcr device assignment fails\n");
  1190. goto out;
  1191. }
  1192. ctx->dev = u_ctx->dev;
  1193. adap = padap(ctx->dev);
  1194. ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
  1195. adap->vres.ncrypto_fc);
  1196. rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
  1197. txq_perchan = ntxq / u_ctx->lldi.nchan;
  1198. spin_lock(&ctx->dev->lock_chcr_dev);
  1199. ctx->tx_chan_id = ctx->dev->tx_channel_id;
  1200. ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
  1201. ctx->dev->rx_channel_id = 0;
  1202. spin_unlock(&ctx->dev->lock_chcr_dev);
  1203. rxq_idx = ctx->tx_chan_id * rxq_perchan;
  1204. rxq_idx += id % rxq_perchan;
  1205. txq_idx = ctx->tx_chan_id * txq_perchan;
  1206. txq_idx += id % txq_perchan;
  1207. ctx->rx_qidx = rxq_idx;
  1208. ctx->tx_qidx = txq_idx;
  1209. /* Channel Id used by SGE to forward packet to Host.
  1210. * Same value should be used in cpl_fw6_pld RSS_CH field
  1211. * by FW. Driver programs PCI channel ID to be used in fw
  1212. * at the time of queue allocation with value "pi->tx_chan"
  1213. */
  1214. ctx->pci_chan_id = txq_idx / txq_perchan;
  1215. }
  1216. out:
  1217. return err;
  1218. }
  1219. static int chcr_cra_init(struct crypto_tfm *tfm)
  1220. {
  1221. struct crypto_alg *alg = tfm->__crt_alg;
  1222. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1223. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1224. ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
  1225. CRYPTO_ALG_NEED_FALLBACK);
  1226. if (IS_ERR(ablkctx->sw_cipher)) {
  1227. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1228. return PTR_ERR(ablkctx->sw_cipher);
  1229. }
  1230. if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
  1231. /* To update tweak*/
  1232. ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
  1233. if (IS_ERR(ablkctx->aes_generic)) {
  1234. pr_err("failed to allocate aes cipher for tweak\n");
  1235. return PTR_ERR(ablkctx->aes_generic);
  1236. }
  1237. } else
  1238. ablkctx->aes_generic = NULL;
  1239. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1240. return chcr_device_init(crypto_tfm_ctx(tfm));
  1241. }
  1242. static int chcr_rfc3686_init(struct crypto_tfm *tfm)
  1243. {
  1244. struct crypto_alg *alg = tfm->__crt_alg;
  1245. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1246. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1247. /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
  1248. * cannot be used as fallback in chcr_handle_cipher_response
  1249. */
  1250. ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
  1251. CRYPTO_ALG_NEED_FALLBACK);
  1252. if (IS_ERR(ablkctx->sw_cipher)) {
  1253. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1254. return PTR_ERR(ablkctx->sw_cipher);
  1255. }
  1256. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1257. return chcr_device_init(crypto_tfm_ctx(tfm));
  1258. }
  1259. static void chcr_cra_exit(struct crypto_tfm *tfm)
  1260. {
  1261. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1262. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1263. crypto_free_sync_skcipher(ablkctx->sw_cipher);
  1264. if (ablkctx->aes_generic)
  1265. crypto_free_cipher(ablkctx->aes_generic);
  1266. }
  1267. static int get_alg_config(struct algo_param *params,
  1268. unsigned int auth_size)
  1269. {
  1270. switch (auth_size) {
  1271. case SHA1_DIGEST_SIZE:
  1272. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
  1273. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
  1274. params->result_size = SHA1_DIGEST_SIZE;
  1275. break;
  1276. case SHA224_DIGEST_SIZE:
  1277. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1278. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
  1279. params->result_size = SHA256_DIGEST_SIZE;
  1280. break;
  1281. case SHA256_DIGEST_SIZE:
  1282. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1283. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
  1284. params->result_size = SHA256_DIGEST_SIZE;
  1285. break;
  1286. case SHA384_DIGEST_SIZE:
  1287. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1288. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
  1289. params->result_size = SHA512_DIGEST_SIZE;
  1290. break;
  1291. case SHA512_DIGEST_SIZE:
  1292. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1293. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
  1294. params->result_size = SHA512_DIGEST_SIZE;
  1295. break;
  1296. default:
  1297. pr_err("chcr : ERROR, unsupported digest size\n");
  1298. return -EINVAL;
  1299. }
  1300. return 0;
  1301. }
  1302. static inline void chcr_free_shash(struct crypto_shash *base_hash)
  1303. {
  1304. crypto_free_shash(base_hash);
  1305. }
  1306. /**
  1307. * create_hash_wr - Create hash work request
  1308. * @req - Cipher req base
  1309. */
  1310. static struct sk_buff *create_hash_wr(struct ahash_request *req,
  1311. struct hash_wr_param *param)
  1312. {
  1313. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1314. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1315. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1316. struct sk_buff *skb = NULL;
  1317. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1318. struct chcr_wr *chcr_req;
  1319. struct ulptx_sgl *ulptx;
  1320. unsigned int nents = 0, transhdr_len;
  1321. unsigned int temp = 0;
  1322. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1323. GFP_ATOMIC;
  1324. struct adapter *adap = padap(h_ctx(tfm)->dev);
  1325. int error = 0;
  1326. transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
  1327. req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
  1328. param->sg_len) <= SGE_MAX_WR_LEN;
  1329. nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
  1330. CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
  1331. nents += param->bfr_len ? 1 : 0;
  1332. transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
  1333. param->sg_len, 16) : (sgl_len(nents) * 8);
  1334. transhdr_len = roundup(transhdr_len, 16);
  1335. skb = alloc_skb(transhdr_len, flags);
  1336. if (!skb)
  1337. return ERR_PTR(-ENOMEM);
  1338. chcr_req = __skb_put_zero(skb, transhdr_len);
  1339. chcr_req->sec_cpl.op_ivinsrtofst =
  1340. FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
  1341. chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  1342. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  1343. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
  1344. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  1345. FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
  1346. chcr_req->sec_cpl.seqno_numivs =
  1347. FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
  1348. param->opad_needed, 0);
  1349. chcr_req->sec_cpl.ivgen_hdrlen =
  1350. FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  1351. memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
  1352. param->alg_prm.result_size);
  1353. if (param->opad_needed)
  1354. memcpy(chcr_req->key_ctx.key +
  1355. ((param->alg_prm.result_size <= 32) ? 32 :
  1356. CHCR_HASH_MAX_DIGEST_SIZE),
  1357. hmacctx->opad, param->alg_prm.result_size);
  1358. chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
  1359. param->alg_prm.mk_size, 0,
  1360. param->opad_needed,
  1361. ((param->kctx_len +
  1362. sizeof(chcr_req->key_ctx)) >> 4));
  1363. chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  1364. ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
  1365. DUMMY_BYTES);
  1366. if (param->bfr_len != 0) {
  1367. req_ctx->hctx_wr.dma_addr =
  1368. dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
  1369. param->bfr_len, DMA_TO_DEVICE);
  1370. if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
  1371. req_ctx->hctx_wr. dma_addr)) {
  1372. error = -ENOMEM;
  1373. goto err;
  1374. }
  1375. req_ctx->hctx_wr.dma_len = param->bfr_len;
  1376. } else {
  1377. req_ctx->hctx_wr.dma_addr = 0;
  1378. }
  1379. chcr_add_hash_src_ent(req, ulptx, param);
  1380. /* Request upto max wr size */
  1381. temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
  1382. (param->sg_len + param->bfr_len) : 0);
  1383. atomic_inc(&adap->chcr_stats.digest_rqst);
  1384. create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
  1385. param->hash_size, transhdr_len,
  1386. temp, 0);
  1387. req_ctx->hctx_wr.skb = skb;
  1388. return skb;
  1389. err:
  1390. kfree_skb(skb);
  1391. return ERR_PTR(error);
  1392. }
  1393. static int chcr_ahash_update(struct ahash_request *req)
  1394. {
  1395. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1396. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1397. struct uld_ctx *u_ctx = NULL;
  1398. struct sk_buff *skb;
  1399. u8 remainder = 0, bs;
  1400. unsigned int nbytes = req->nbytes;
  1401. struct hash_wr_param params;
  1402. int error, isfull = 0;
  1403. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1404. u_ctx = ULD_CTX(h_ctx(rtfm));
  1405. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1406. h_ctx(rtfm)->tx_qidx))) {
  1407. isfull = 1;
  1408. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1409. return -ENOSPC;
  1410. }
  1411. if (nbytes + req_ctx->reqlen >= bs) {
  1412. remainder = (nbytes + req_ctx->reqlen) % bs;
  1413. nbytes = nbytes + req_ctx->reqlen - remainder;
  1414. } else {
  1415. sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
  1416. + req_ctx->reqlen, nbytes, 0);
  1417. req_ctx->reqlen += nbytes;
  1418. return 0;
  1419. }
  1420. chcr_init_hctx_per_wr(req_ctx);
  1421. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1422. if (error)
  1423. return -ENOMEM;
  1424. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1425. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1426. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1427. HASH_SPACE_LEFT(params.kctx_len), 0);
  1428. if (params.sg_len > req->nbytes)
  1429. params.sg_len = req->nbytes;
  1430. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
  1431. req_ctx->reqlen;
  1432. params.opad_needed = 0;
  1433. params.more = 1;
  1434. params.last = 0;
  1435. params.bfr_len = req_ctx->reqlen;
  1436. params.scmd1 = 0;
  1437. req_ctx->hctx_wr.srcsg = req->src;
  1438. params.hash_size = params.alg_prm.result_size;
  1439. req_ctx->data_len += params.sg_len + params.bfr_len;
  1440. skb = create_hash_wr(req, &params);
  1441. if (IS_ERR(skb)) {
  1442. error = PTR_ERR(skb);
  1443. goto unmap;
  1444. }
  1445. req_ctx->hctx_wr.processed += params.sg_len;
  1446. if (remainder) {
  1447. /* Swap buffers */
  1448. swap(req_ctx->reqbfr, req_ctx->skbfr);
  1449. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  1450. req_ctx->reqbfr, remainder, req->nbytes -
  1451. remainder);
  1452. }
  1453. req_ctx->reqlen = remainder;
  1454. skb->dev = u_ctx->lldi.ports[0];
  1455. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1456. chcr_send_wr(skb);
  1457. return isfull ? -EBUSY : -EINPROGRESS;
  1458. unmap:
  1459. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1460. return error;
  1461. }
  1462. static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
  1463. {
  1464. memset(bfr_ptr, 0, bs);
  1465. *bfr_ptr = 0x80;
  1466. if (bs == 64)
  1467. *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
  1468. else
  1469. *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
  1470. }
  1471. static int chcr_ahash_final(struct ahash_request *req)
  1472. {
  1473. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1474. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1475. struct hash_wr_param params;
  1476. struct sk_buff *skb;
  1477. struct uld_ctx *u_ctx = NULL;
  1478. u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1479. chcr_init_hctx_per_wr(req_ctx);
  1480. u_ctx = ULD_CTX(h_ctx(rtfm));
  1481. if (is_hmac(crypto_ahash_tfm(rtfm)))
  1482. params.opad_needed = 1;
  1483. else
  1484. params.opad_needed = 0;
  1485. params.sg_len = 0;
  1486. req_ctx->hctx_wr.isfinal = 1;
  1487. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1488. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1489. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1490. params.opad_needed = 1;
  1491. params.kctx_len *= 2;
  1492. } else {
  1493. params.opad_needed = 0;
  1494. }
  1495. req_ctx->hctx_wr.result = 1;
  1496. params.bfr_len = req_ctx->reqlen;
  1497. req_ctx->data_len += params.bfr_len + params.sg_len;
  1498. req_ctx->hctx_wr.srcsg = req->src;
  1499. if (req_ctx->reqlen == 0) {
  1500. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1501. params.last = 0;
  1502. params.more = 1;
  1503. params.scmd1 = 0;
  1504. params.bfr_len = bs;
  1505. } else {
  1506. params.scmd1 = req_ctx->data_len;
  1507. params.last = 1;
  1508. params.more = 0;
  1509. }
  1510. params.hash_size = crypto_ahash_digestsize(rtfm);
  1511. skb = create_hash_wr(req, &params);
  1512. if (IS_ERR(skb))
  1513. return PTR_ERR(skb);
  1514. req_ctx->reqlen = 0;
  1515. skb->dev = u_ctx->lldi.ports[0];
  1516. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1517. chcr_send_wr(skb);
  1518. return -EINPROGRESS;
  1519. }
  1520. static int chcr_ahash_finup(struct ahash_request *req)
  1521. {
  1522. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1523. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1524. struct uld_ctx *u_ctx = NULL;
  1525. struct sk_buff *skb;
  1526. struct hash_wr_param params;
  1527. u8 bs;
  1528. int error, isfull = 0;
  1529. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1530. u_ctx = ULD_CTX(h_ctx(rtfm));
  1531. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1532. h_ctx(rtfm)->tx_qidx))) {
  1533. isfull = 1;
  1534. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1535. return -ENOSPC;
  1536. }
  1537. chcr_init_hctx_per_wr(req_ctx);
  1538. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1539. if (error)
  1540. return -ENOMEM;
  1541. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1542. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1543. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1544. params.kctx_len *= 2;
  1545. params.opad_needed = 1;
  1546. } else {
  1547. params.opad_needed = 0;
  1548. }
  1549. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1550. HASH_SPACE_LEFT(params.kctx_len), 0);
  1551. if (params.sg_len < req->nbytes) {
  1552. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1553. params.kctx_len /= 2;
  1554. params.opad_needed = 0;
  1555. }
  1556. params.last = 0;
  1557. params.more = 1;
  1558. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
  1559. - req_ctx->reqlen;
  1560. params.hash_size = params.alg_prm.result_size;
  1561. params.scmd1 = 0;
  1562. } else {
  1563. params.last = 1;
  1564. params.more = 0;
  1565. params.sg_len = req->nbytes;
  1566. params.hash_size = crypto_ahash_digestsize(rtfm);
  1567. params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
  1568. params.sg_len;
  1569. }
  1570. params.bfr_len = req_ctx->reqlen;
  1571. req_ctx->data_len += params.bfr_len + params.sg_len;
  1572. req_ctx->hctx_wr.result = 1;
  1573. req_ctx->hctx_wr.srcsg = req->src;
  1574. if ((req_ctx->reqlen + req->nbytes) == 0) {
  1575. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1576. params.last = 0;
  1577. params.more = 1;
  1578. params.scmd1 = 0;
  1579. params.bfr_len = bs;
  1580. }
  1581. skb = create_hash_wr(req, &params);
  1582. if (IS_ERR(skb)) {
  1583. error = PTR_ERR(skb);
  1584. goto unmap;
  1585. }
  1586. req_ctx->reqlen = 0;
  1587. req_ctx->hctx_wr.processed += params.sg_len;
  1588. skb->dev = u_ctx->lldi.ports[0];
  1589. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1590. chcr_send_wr(skb);
  1591. return isfull ? -EBUSY : -EINPROGRESS;
  1592. unmap:
  1593. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1594. return error;
  1595. }
  1596. static int chcr_ahash_digest(struct ahash_request *req)
  1597. {
  1598. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1599. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1600. struct uld_ctx *u_ctx = NULL;
  1601. struct sk_buff *skb;
  1602. struct hash_wr_param params;
  1603. u8 bs;
  1604. int error, isfull = 0;
  1605. rtfm->init(req);
  1606. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1607. u_ctx = ULD_CTX(h_ctx(rtfm));
  1608. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1609. h_ctx(rtfm)->tx_qidx))) {
  1610. isfull = 1;
  1611. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1612. return -ENOSPC;
  1613. }
  1614. chcr_init_hctx_per_wr(req_ctx);
  1615. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1616. if (error)
  1617. return -ENOMEM;
  1618. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1619. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1620. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1621. params.kctx_len *= 2;
  1622. params.opad_needed = 1;
  1623. } else {
  1624. params.opad_needed = 0;
  1625. }
  1626. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1627. HASH_SPACE_LEFT(params.kctx_len), 0);
  1628. if (params.sg_len < req->nbytes) {
  1629. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1630. params.kctx_len /= 2;
  1631. params.opad_needed = 0;
  1632. }
  1633. params.last = 0;
  1634. params.more = 1;
  1635. params.scmd1 = 0;
  1636. params.sg_len = rounddown(params.sg_len, bs);
  1637. params.hash_size = params.alg_prm.result_size;
  1638. } else {
  1639. params.sg_len = req->nbytes;
  1640. params.hash_size = crypto_ahash_digestsize(rtfm);
  1641. params.last = 1;
  1642. params.more = 0;
  1643. params.scmd1 = req->nbytes + req_ctx->data_len;
  1644. }
  1645. params.bfr_len = 0;
  1646. req_ctx->hctx_wr.result = 1;
  1647. req_ctx->hctx_wr.srcsg = req->src;
  1648. req_ctx->data_len += params.bfr_len + params.sg_len;
  1649. if (req->nbytes == 0) {
  1650. create_last_hash_block(req_ctx->reqbfr, bs, 0);
  1651. params.more = 1;
  1652. params.bfr_len = bs;
  1653. }
  1654. skb = create_hash_wr(req, &params);
  1655. if (IS_ERR(skb)) {
  1656. error = PTR_ERR(skb);
  1657. goto unmap;
  1658. }
  1659. req_ctx->hctx_wr.processed += params.sg_len;
  1660. skb->dev = u_ctx->lldi.ports[0];
  1661. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1662. chcr_send_wr(skb);
  1663. return isfull ? -EBUSY : -EINPROGRESS;
  1664. unmap:
  1665. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1666. return error;
  1667. }
  1668. static int chcr_ahash_continue(struct ahash_request *req)
  1669. {
  1670. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1671. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1672. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1673. struct uld_ctx *u_ctx = NULL;
  1674. struct sk_buff *skb;
  1675. struct hash_wr_param params;
  1676. u8 bs;
  1677. int error;
  1678. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1679. u_ctx = ULD_CTX(h_ctx(rtfm));
  1680. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1681. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1682. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1683. params.kctx_len *= 2;
  1684. params.opad_needed = 1;
  1685. } else {
  1686. params.opad_needed = 0;
  1687. }
  1688. params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
  1689. HASH_SPACE_LEFT(params.kctx_len),
  1690. hctx_wr->src_ofst);
  1691. if ((params.sg_len + hctx_wr->processed) > req->nbytes)
  1692. params.sg_len = req->nbytes - hctx_wr->processed;
  1693. if (!hctx_wr->result ||
  1694. ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
  1695. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1696. params.kctx_len /= 2;
  1697. params.opad_needed = 0;
  1698. }
  1699. params.last = 0;
  1700. params.more = 1;
  1701. params.sg_len = rounddown(params.sg_len, bs);
  1702. params.hash_size = params.alg_prm.result_size;
  1703. params.scmd1 = 0;
  1704. } else {
  1705. params.last = 1;
  1706. params.more = 0;
  1707. params.hash_size = crypto_ahash_digestsize(rtfm);
  1708. params.scmd1 = reqctx->data_len + params.sg_len;
  1709. }
  1710. params.bfr_len = 0;
  1711. reqctx->data_len += params.sg_len;
  1712. skb = create_hash_wr(req, &params);
  1713. if (IS_ERR(skb)) {
  1714. error = PTR_ERR(skb);
  1715. goto err;
  1716. }
  1717. hctx_wr->processed += params.sg_len;
  1718. skb->dev = u_ctx->lldi.ports[0];
  1719. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1720. chcr_send_wr(skb);
  1721. return 0;
  1722. err:
  1723. return error;
  1724. }
  1725. static inline void chcr_handle_ahash_resp(struct ahash_request *req,
  1726. unsigned char *input,
  1727. int err)
  1728. {
  1729. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1730. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1731. int digestsize, updated_digestsize;
  1732. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1733. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1734. if (input == NULL)
  1735. goto out;
  1736. digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  1737. updated_digestsize = digestsize;
  1738. if (digestsize == SHA224_DIGEST_SIZE)
  1739. updated_digestsize = SHA256_DIGEST_SIZE;
  1740. else if (digestsize == SHA384_DIGEST_SIZE)
  1741. updated_digestsize = SHA512_DIGEST_SIZE;
  1742. if (hctx_wr->dma_addr) {
  1743. dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
  1744. hctx_wr->dma_len, DMA_TO_DEVICE);
  1745. hctx_wr->dma_addr = 0;
  1746. }
  1747. if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
  1748. req->nbytes)) {
  1749. if (hctx_wr->result == 1) {
  1750. hctx_wr->result = 0;
  1751. memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
  1752. digestsize);
  1753. } else {
  1754. memcpy(reqctx->partial_hash,
  1755. input + sizeof(struct cpl_fw6_pld),
  1756. updated_digestsize);
  1757. }
  1758. goto unmap;
  1759. }
  1760. memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
  1761. updated_digestsize);
  1762. err = chcr_ahash_continue(req);
  1763. if (err)
  1764. goto unmap;
  1765. return;
  1766. unmap:
  1767. if (hctx_wr->is_sg_map)
  1768. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1769. out:
  1770. req->base.complete(&req->base, err);
  1771. }
  1772. /*
  1773. * chcr_handle_resp - Unmap the DMA buffers associated with the request
  1774. * @req: crypto request
  1775. */
  1776. int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
  1777. int err)
  1778. {
  1779. struct crypto_tfm *tfm = req->tfm;
  1780. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1781. struct adapter *adap = padap(ctx->dev);
  1782. switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  1783. case CRYPTO_ALG_TYPE_AEAD:
  1784. chcr_handle_aead_resp(aead_request_cast(req), input, err);
  1785. break;
  1786. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1787. err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
  1788. input, err);
  1789. break;
  1790. case CRYPTO_ALG_TYPE_AHASH:
  1791. chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
  1792. }
  1793. atomic_inc(&adap->chcr_stats.complete);
  1794. return err;
  1795. }
  1796. static int chcr_ahash_export(struct ahash_request *areq, void *out)
  1797. {
  1798. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1799. struct chcr_ahash_req_ctx *state = out;
  1800. state->reqlen = req_ctx->reqlen;
  1801. state->data_len = req_ctx->data_len;
  1802. memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
  1803. memcpy(state->partial_hash, req_ctx->partial_hash,
  1804. CHCR_HASH_MAX_DIGEST_SIZE);
  1805. chcr_init_hctx_per_wr(state);
  1806. return 0;
  1807. }
  1808. static int chcr_ahash_import(struct ahash_request *areq, const void *in)
  1809. {
  1810. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1811. struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  1812. req_ctx->reqlen = state->reqlen;
  1813. req_ctx->data_len = state->data_len;
  1814. req_ctx->reqbfr = req_ctx->bfr1;
  1815. req_ctx->skbfr = req_ctx->bfr2;
  1816. memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
  1817. memcpy(req_ctx->partial_hash, state->partial_hash,
  1818. CHCR_HASH_MAX_DIGEST_SIZE);
  1819. chcr_init_hctx_per_wr(req_ctx);
  1820. return 0;
  1821. }
  1822. static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1823. unsigned int keylen)
  1824. {
  1825. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1826. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1827. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1828. unsigned int i, err = 0, updated_digestsize;
  1829. SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
  1830. /* use the key to calculate the ipad and opad. ipad will sent with the
  1831. * first request's data. opad will be sent with the final hash result
  1832. * ipad in hmacctx->ipad and opad in hmacctx->opad location
  1833. */
  1834. shash->tfm = hmacctx->base_hash;
  1835. shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
  1836. if (keylen > bs) {
  1837. err = crypto_shash_digest(shash, key, keylen,
  1838. hmacctx->ipad);
  1839. if (err)
  1840. goto out;
  1841. keylen = digestsize;
  1842. } else {
  1843. memcpy(hmacctx->ipad, key, keylen);
  1844. }
  1845. memset(hmacctx->ipad + keylen, 0, bs - keylen);
  1846. memcpy(hmacctx->opad, hmacctx->ipad, bs);
  1847. for (i = 0; i < bs / sizeof(int); i++) {
  1848. *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
  1849. *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
  1850. }
  1851. updated_digestsize = digestsize;
  1852. if (digestsize == SHA224_DIGEST_SIZE)
  1853. updated_digestsize = SHA256_DIGEST_SIZE;
  1854. else if (digestsize == SHA384_DIGEST_SIZE)
  1855. updated_digestsize = SHA512_DIGEST_SIZE;
  1856. err = chcr_compute_partial_hash(shash, hmacctx->ipad,
  1857. hmacctx->ipad, digestsize);
  1858. if (err)
  1859. goto out;
  1860. chcr_change_order(hmacctx->ipad, updated_digestsize);
  1861. err = chcr_compute_partial_hash(shash, hmacctx->opad,
  1862. hmacctx->opad, digestsize);
  1863. if (err)
  1864. goto out;
  1865. chcr_change_order(hmacctx->opad, updated_digestsize);
  1866. out:
  1867. return err;
  1868. }
  1869. static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1870. unsigned int key_len)
  1871. {
  1872. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  1873. unsigned short context_size = 0;
  1874. int err;
  1875. err = chcr_cipher_fallback_setkey(cipher, key, key_len);
  1876. if (err)
  1877. goto badkey_err;
  1878. memcpy(ablkctx->key, key, key_len);
  1879. ablkctx->enckey_len = key_len;
  1880. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
  1881. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
  1882. ablkctx->key_ctx_hdr =
  1883. FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
  1884. CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
  1885. CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
  1886. CHCR_KEYCTX_NO_KEY, 1,
  1887. 0, context_size);
  1888. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
  1889. return 0;
  1890. badkey_err:
  1891. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1892. ablkctx->enckey_len = 0;
  1893. return err;
  1894. }
  1895. static int chcr_sha_init(struct ahash_request *areq)
  1896. {
  1897. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1898. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1899. int digestsize = crypto_ahash_digestsize(tfm);
  1900. req_ctx->data_len = 0;
  1901. req_ctx->reqlen = 0;
  1902. req_ctx->reqbfr = req_ctx->bfr1;
  1903. req_ctx->skbfr = req_ctx->bfr2;
  1904. copy_hash_init_values(req_ctx->partial_hash, digestsize);
  1905. return 0;
  1906. }
  1907. static int chcr_sha_cra_init(struct crypto_tfm *tfm)
  1908. {
  1909. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1910. sizeof(struct chcr_ahash_req_ctx));
  1911. return chcr_device_init(crypto_tfm_ctx(tfm));
  1912. }
  1913. static int chcr_hmac_init(struct ahash_request *areq)
  1914. {
  1915. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1916. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
  1917. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
  1918. unsigned int digestsize = crypto_ahash_digestsize(rtfm);
  1919. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1920. chcr_sha_init(areq);
  1921. req_ctx->data_len = bs;
  1922. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1923. if (digestsize == SHA224_DIGEST_SIZE)
  1924. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1925. SHA256_DIGEST_SIZE);
  1926. else if (digestsize == SHA384_DIGEST_SIZE)
  1927. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1928. SHA512_DIGEST_SIZE);
  1929. else
  1930. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1931. digestsize);
  1932. }
  1933. return 0;
  1934. }
  1935. static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
  1936. {
  1937. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1938. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1939. unsigned int digestsize =
  1940. crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
  1941. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1942. sizeof(struct chcr_ahash_req_ctx));
  1943. hmacctx->base_hash = chcr_alloc_shash(digestsize);
  1944. if (IS_ERR(hmacctx->base_hash))
  1945. return PTR_ERR(hmacctx->base_hash);
  1946. return chcr_device_init(crypto_tfm_ctx(tfm));
  1947. }
  1948. static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  1949. {
  1950. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1951. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1952. if (hmacctx->base_hash) {
  1953. chcr_free_shash(hmacctx->base_hash);
  1954. hmacctx->base_hash = NULL;
  1955. }
  1956. }
  1957. inline void chcr_aead_common_exit(struct aead_request *req)
  1958. {
  1959. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1960. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1961. struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
  1962. chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
  1963. }
  1964. static int chcr_aead_common_init(struct aead_request *req)
  1965. {
  1966. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1967. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  1968. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1969. unsigned int authsize = crypto_aead_authsize(tfm);
  1970. int error = -EINVAL;
  1971. /* validate key size */
  1972. if (aeadctx->enckey_len == 0)
  1973. goto err;
  1974. if (reqctx->op && req->cryptlen < authsize)
  1975. goto err;
  1976. if (reqctx->b0_len)
  1977. reqctx->scratch_pad = reqctx->iv + IV;
  1978. else
  1979. reqctx->scratch_pad = NULL;
  1980. error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  1981. reqctx->op);
  1982. if (error) {
  1983. error = -ENOMEM;
  1984. goto err;
  1985. }
  1986. reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
  1987. CHCR_SRC_SG_SIZE, 0);
  1988. reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
  1989. CHCR_SRC_SG_SIZE, req->assoclen);
  1990. return 0;
  1991. err:
  1992. return error;
  1993. }
  1994. static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
  1995. int aadmax, int wrlen,
  1996. unsigned short op_type)
  1997. {
  1998. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  1999. if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
  2000. dst_nents > MAX_DSGL_ENT ||
  2001. (req->assoclen > aadmax) ||
  2002. (wrlen > SGE_MAX_WR_LEN))
  2003. return 1;
  2004. return 0;
  2005. }
  2006. static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
  2007. {
  2008. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2009. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2010. struct aead_request *subreq = aead_request_ctx(req);
  2011. aead_request_set_tfm(subreq, aeadctx->sw_cipher);
  2012. aead_request_set_callback(subreq, req->base.flags,
  2013. req->base.complete, req->base.data);
  2014. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  2015. req->iv);
  2016. aead_request_set_ad(subreq, req->assoclen);
  2017. return op_type ? crypto_aead_decrypt(subreq) :
  2018. crypto_aead_encrypt(subreq);
  2019. }
  2020. static struct sk_buff *create_authenc_wr(struct aead_request *req,
  2021. unsigned short qid,
  2022. int size)
  2023. {
  2024. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2025. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2026. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2027. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2028. struct sk_buff *skb = NULL;
  2029. struct chcr_wr *chcr_req;
  2030. struct cpl_rx_phys_dsgl *phys_cpl;
  2031. struct ulptx_sgl *ulptx;
  2032. unsigned int transhdr_len;
  2033. unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
  2034. unsigned int kctx_len = 0, dnents;
  2035. unsigned int assoclen = req->assoclen;
  2036. unsigned int authsize = crypto_aead_authsize(tfm);
  2037. int error = -EINVAL;
  2038. int null = 0;
  2039. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2040. GFP_ATOMIC;
  2041. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2042. if (req->cryptlen == 0)
  2043. return NULL;
  2044. reqctx->b0_len = 0;
  2045. error = chcr_aead_common_init(req);
  2046. if (error)
  2047. return ERR_PTR(error);
  2048. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
  2049. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2050. null = 1;
  2051. assoclen = 0;
  2052. reqctx->aad_nents = 0;
  2053. }
  2054. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2055. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2056. (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
  2057. req->assoclen);
  2058. dnents += MIN_AUTH_SG; // For IV
  2059. dst_size = get_space_for_phys_dsgl(dnents);
  2060. kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
  2061. - sizeof(chcr_req->key_ctx);
  2062. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2063. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
  2064. SGE_MAX_WR_LEN;
  2065. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
  2066. : (sgl_len(reqctx->src_nents + reqctx->aad_nents
  2067. + MIN_GCM_SG) * 8);
  2068. transhdr_len += temp;
  2069. transhdr_len = roundup(transhdr_len, 16);
  2070. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2071. transhdr_len, reqctx->op)) {
  2072. atomic_inc(&adap->chcr_stats.fallback);
  2073. chcr_aead_common_exit(req);
  2074. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2075. }
  2076. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2077. if (!skb) {
  2078. error = -ENOMEM;
  2079. goto err;
  2080. }
  2081. chcr_req = __skb_put_zero(skb, transhdr_len);
  2082. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2083. /*
  2084. * Input order is AAD,IV and Payload. where IV should be included as
  2085. * the part of authdata. All other fields should be filled according
  2086. * to the hardware spec
  2087. */
  2088. chcr_req->sec_cpl.op_ivinsrtofst =
  2089. FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
  2090. assoclen + 1);
  2091. chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
  2092. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2093. assoclen ? 1 : 0, assoclen,
  2094. assoclen + IV + 1,
  2095. (temp & 0x1F0) >> 4);
  2096. chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
  2097. temp & 0xF,
  2098. null ? 0 : assoclen + IV + 1,
  2099. temp, temp);
  2100. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
  2101. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
  2102. temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  2103. else
  2104. temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  2105. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
  2106. (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
  2107. temp,
  2108. actx->auth_mode, aeadctx->hmac_ctrl,
  2109. IV >> 1);
  2110. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2111. 0, 0, dst_size);
  2112. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2113. if (reqctx->op == CHCR_ENCRYPT_OP ||
  2114. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2115. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
  2116. memcpy(chcr_req->key_ctx.key, aeadctx->key,
  2117. aeadctx->enckey_len);
  2118. else
  2119. memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
  2120. aeadctx->enckey_len);
  2121. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2122. actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
  2123. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2124. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2125. memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
  2126. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
  2127. CTR_RFC3686_IV_SIZE);
  2128. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  2129. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  2130. } else {
  2131. memcpy(reqctx->iv, req->iv, IV);
  2132. }
  2133. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2134. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2135. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2136. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2137. atomic_inc(&adap->chcr_stats.cipher_rqst);
  2138. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2139. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2140. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2141. transhdr_len, temp, 0);
  2142. reqctx->skb = skb;
  2143. return skb;
  2144. err:
  2145. chcr_aead_common_exit(req);
  2146. return ERR_PTR(error);
  2147. }
  2148. int chcr_aead_dma_map(struct device *dev,
  2149. struct aead_request *req,
  2150. unsigned short op_type)
  2151. {
  2152. int error;
  2153. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2154. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2155. unsigned int authsize = crypto_aead_authsize(tfm);
  2156. int dst_size;
  2157. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2158. -authsize : authsize);
  2159. if (!req->cryptlen || !dst_size)
  2160. return 0;
  2161. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
  2162. DMA_BIDIRECTIONAL);
  2163. if (dma_mapping_error(dev, reqctx->iv_dma))
  2164. return -ENOMEM;
  2165. if (reqctx->b0_len)
  2166. reqctx->b0_dma = reqctx->iv_dma + IV;
  2167. else
  2168. reqctx->b0_dma = 0;
  2169. if (req->src == req->dst) {
  2170. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2171. DMA_BIDIRECTIONAL);
  2172. if (!error)
  2173. goto err;
  2174. } else {
  2175. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2176. DMA_TO_DEVICE);
  2177. if (!error)
  2178. goto err;
  2179. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2180. DMA_FROM_DEVICE);
  2181. if (!error) {
  2182. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2183. DMA_TO_DEVICE);
  2184. goto err;
  2185. }
  2186. }
  2187. return 0;
  2188. err:
  2189. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2190. return -ENOMEM;
  2191. }
  2192. void chcr_aead_dma_unmap(struct device *dev,
  2193. struct aead_request *req,
  2194. unsigned short op_type)
  2195. {
  2196. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2197. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2198. unsigned int authsize = crypto_aead_authsize(tfm);
  2199. int dst_size;
  2200. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2201. -authsize : authsize);
  2202. if (!req->cryptlen || !dst_size)
  2203. return;
  2204. dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
  2205. DMA_BIDIRECTIONAL);
  2206. if (req->src == req->dst) {
  2207. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2208. DMA_BIDIRECTIONAL);
  2209. } else {
  2210. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2211. DMA_TO_DEVICE);
  2212. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2213. DMA_FROM_DEVICE);
  2214. }
  2215. }
  2216. void chcr_add_aead_src_ent(struct aead_request *req,
  2217. struct ulptx_sgl *ulptx,
  2218. unsigned int assoclen)
  2219. {
  2220. struct ulptx_walk ulp_walk;
  2221. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2222. if (reqctx->imm) {
  2223. u8 *buf = (u8 *)ulptx;
  2224. if (reqctx->b0_len) {
  2225. memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
  2226. buf += reqctx->b0_len;
  2227. }
  2228. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2229. buf, assoclen, 0);
  2230. buf += assoclen;
  2231. memcpy(buf, reqctx->iv, IV);
  2232. buf += IV;
  2233. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2234. buf, req->cryptlen, req->assoclen);
  2235. } else {
  2236. ulptx_walk_init(&ulp_walk, ulptx);
  2237. if (reqctx->b0_len)
  2238. ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
  2239. &reqctx->b0_dma);
  2240. ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
  2241. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2242. ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
  2243. req->assoclen);
  2244. ulptx_walk_end(&ulp_walk);
  2245. }
  2246. }
  2247. void chcr_add_aead_dst_ent(struct aead_request *req,
  2248. struct cpl_rx_phys_dsgl *phys_cpl,
  2249. unsigned int assoclen,
  2250. unsigned short qid)
  2251. {
  2252. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2253. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2254. struct dsgl_walk dsgl_walk;
  2255. unsigned int authsize = crypto_aead_authsize(tfm);
  2256. struct chcr_context *ctx = a_ctx(tfm);
  2257. u32 temp;
  2258. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2259. if (reqctx->b0_len)
  2260. dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
  2261. dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
  2262. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2263. temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
  2264. dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
  2265. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2266. }
  2267. void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
  2268. void *ulptx,
  2269. struct cipher_wr_param *wrparam)
  2270. {
  2271. struct ulptx_walk ulp_walk;
  2272. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2273. u8 *buf = ulptx;
  2274. memcpy(buf, reqctx->iv, IV);
  2275. buf += IV;
  2276. if (reqctx->imm) {
  2277. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2278. buf, wrparam->bytes, reqctx->processed);
  2279. } else {
  2280. ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
  2281. ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
  2282. reqctx->src_ofst);
  2283. reqctx->srcsg = ulp_walk.last_sg;
  2284. reqctx->src_ofst = ulp_walk.last_sg_len;
  2285. ulptx_walk_end(&ulp_walk);
  2286. }
  2287. }
  2288. void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
  2289. struct cpl_rx_phys_dsgl *phys_cpl,
  2290. struct cipher_wr_param *wrparam,
  2291. unsigned short qid)
  2292. {
  2293. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2294. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  2295. struct chcr_context *ctx = c_ctx(tfm);
  2296. struct dsgl_walk dsgl_walk;
  2297. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2298. dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
  2299. reqctx->dst_ofst);
  2300. reqctx->dstsg = dsgl_walk.last_sg;
  2301. reqctx->dst_ofst = dsgl_walk.last_sg_len;
  2302. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2303. }
  2304. void chcr_add_hash_src_ent(struct ahash_request *req,
  2305. struct ulptx_sgl *ulptx,
  2306. struct hash_wr_param *param)
  2307. {
  2308. struct ulptx_walk ulp_walk;
  2309. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  2310. if (reqctx->hctx_wr.imm) {
  2311. u8 *buf = (u8 *)ulptx;
  2312. if (param->bfr_len) {
  2313. memcpy(buf, reqctx->reqbfr, param->bfr_len);
  2314. buf += param->bfr_len;
  2315. }
  2316. sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
  2317. sg_nents(reqctx->hctx_wr.srcsg), buf,
  2318. param->sg_len, 0);
  2319. } else {
  2320. ulptx_walk_init(&ulp_walk, ulptx);
  2321. if (param->bfr_len)
  2322. ulptx_walk_add_page(&ulp_walk, param->bfr_len,
  2323. &reqctx->hctx_wr.dma_addr);
  2324. ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
  2325. param->sg_len, reqctx->hctx_wr.src_ofst);
  2326. reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
  2327. reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
  2328. ulptx_walk_end(&ulp_walk);
  2329. }
  2330. }
  2331. int chcr_hash_dma_map(struct device *dev,
  2332. struct ahash_request *req)
  2333. {
  2334. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2335. int error = 0;
  2336. if (!req->nbytes)
  2337. return 0;
  2338. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2339. DMA_TO_DEVICE);
  2340. if (!error)
  2341. return -ENOMEM;
  2342. req_ctx->hctx_wr.is_sg_map = 1;
  2343. return 0;
  2344. }
  2345. void chcr_hash_dma_unmap(struct device *dev,
  2346. struct ahash_request *req)
  2347. {
  2348. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2349. if (!req->nbytes)
  2350. return;
  2351. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2352. DMA_TO_DEVICE);
  2353. req_ctx->hctx_wr.is_sg_map = 0;
  2354. }
  2355. int chcr_cipher_dma_map(struct device *dev,
  2356. struct ablkcipher_request *req)
  2357. {
  2358. int error;
  2359. if (req->src == req->dst) {
  2360. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2361. DMA_BIDIRECTIONAL);
  2362. if (!error)
  2363. goto err;
  2364. } else {
  2365. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2366. DMA_TO_DEVICE);
  2367. if (!error)
  2368. goto err;
  2369. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2370. DMA_FROM_DEVICE);
  2371. if (!error) {
  2372. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2373. DMA_TO_DEVICE);
  2374. goto err;
  2375. }
  2376. }
  2377. return 0;
  2378. err:
  2379. return -ENOMEM;
  2380. }
  2381. void chcr_cipher_dma_unmap(struct device *dev,
  2382. struct ablkcipher_request *req)
  2383. {
  2384. if (req->src == req->dst) {
  2385. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2386. DMA_BIDIRECTIONAL);
  2387. } else {
  2388. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2389. DMA_TO_DEVICE);
  2390. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2391. DMA_FROM_DEVICE);
  2392. }
  2393. }
  2394. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  2395. {
  2396. __be32 data;
  2397. memset(block, 0, csize);
  2398. block += csize;
  2399. if (csize >= 4)
  2400. csize = 4;
  2401. else if (msglen > (unsigned int)(1 << (8 * csize)))
  2402. return -EOVERFLOW;
  2403. data = cpu_to_be32(msglen);
  2404. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  2405. return 0;
  2406. }
  2407. static void generate_b0(struct aead_request *req,
  2408. struct chcr_aead_ctx *aeadctx,
  2409. unsigned short op_type)
  2410. {
  2411. unsigned int l, lp, m;
  2412. int rc;
  2413. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2414. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2415. u8 *b0 = reqctx->scratch_pad;
  2416. m = crypto_aead_authsize(aead);
  2417. memcpy(b0, reqctx->iv, 16);
  2418. lp = b0[0];
  2419. l = lp + 1;
  2420. /* set m, bits 3-5 */
  2421. *b0 |= (8 * ((m - 2) / 2));
  2422. /* set adata, bit 6, if associated data is used */
  2423. if (req->assoclen)
  2424. *b0 |= 64;
  2425. rc = set_msg_len(b0 + 16 - l,
  2426. (op_type == CHCR_DECRYPT_OP) ?
  2427. req->cryptlen - m : req->cryptlen, l);
  2428. }
  2429. static inline int crypto_ccm_check_iv(const u8 *iv)
  2430. {
  2431. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  2432. if (iv[0] < 1 || iv[0] > 7)
  2433. return -EINVAL;
  2434. return 0;
  2435. }
  2436. static int ccm_format_packet(struct aead_request *req,
  2437. struct chcr_aead_ctx *aeadctx,
  2438. unsigned int sub_type,
  2439. unsigned short op_type,
  2440. unsigned int assoclen)
  2441. {
  2442. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2443. int rc = 0;
  2444. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2445. reqctx->iv[0] = 3;
  2446. memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
  2447. memcpy(reqctx->iv + 4, req->iv, 8);
  2448. memset(reqctx->iv + 12, 0, 4);
  2449. } else {
  2450. memcpy(reqctx->iv, req->iv, 16);
  2451. }
  2452. if (assoclen)
  2453. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2454. htons(assoclen);
  2455. generate_b0(req, aeadctx, op_type);
  2456. /* zero the ctr value */
  2457. memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
  2458. return rc;
  2459. }
  2460. static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
  2461. unsigned int dst_size,
  2462. struct aead_request *req,
  2463. unsigned short op_type)
  2464. {
  2465. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2466. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2467. unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
  2468. unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
  2469. unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
  2470. unsigned int ccm_xtra;
  2471. unsigned char tag_offset = 0, auth_offset = 0;
  2472. unsigned int assoclen;
  2473. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2474. assoclen = req->assoclen - 8;
  2475. else
  2476. assoclen = req->assoclen;
  2477. ccm_xtra = CCM_B0_SIZE +
  2478. ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
  2479. auth_offset = req->cryptlen ?
  2480. (assoclen + IV + 1 + ccm_xtra) : 0;
  2481. if (op_type == CHCR_DECRYPT_OP) {
  2482. if (crypto_aead_authsize(tfm) != req->cryptlen)
  2483. tag_offset = crypto_aead_authsize(tfm);
  2484. else
  2485. auth_offset = 0;
  2486. }
  2487. sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
  2488. 2, assoclen + 1 + ccm_xtra);
  2489. sec_cpl->pldlen =
  2490. htonl(assoclen + IV + req->cryptlen + ccm_xtra);
  2491. /* For CCM there wil be b0 always. So AAD start will be 1 always */
  2492. sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2493. 1, assoclen + ccm_xtra, assoclen
  2494. + IV + 1 + ccm_xtra, 0);
  2495. sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
  2496. auth_offset, tag_offset,
  2497. (op_type == CHCR_ENCRYPT_OP) ? 0 :
  2498. crypto_aead_authsize(tfm));
  2499. sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2500. (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
  2501. cipher_mode, mac_mode,
  2502. aeadctx->hmac_ctrl, IV >> 1);
  2503. sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
  2504. 0, dst_size);
  2505. }
  2506. static int aead_ccm_validate_input(unsigned short op_type,
  2507. struct aead_request *req,
  2508. struct chcr_aead_ctx *aeadctx,
  2509. unsigned int sub_type)
  2510. {
  2511. if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2512. if (crypto_ccm_check_iv(req->iv)) {
  2513. pr_err("CCM: IV check fails\n");
  2514. return -EINVAL;
  2515. }
  2516. } else {
  2517. if (req->assoclen != 16 && req->assoclen != 20) {
  2518. pr_err("RFC4309: Invalid AAD length %d\n",
  2519. req->assoclen);
  2520. return -EINVAL;
  2521. }
  2522. }
  2523. return 0;
  2524. }
  2525. static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
  2526. unsigned short qid,
  2527. int size)
  2528. {
  2529. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2530. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2531. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2532. struct sk_buff *skb = NULL;
  2533. struct chcr_wr *chcr_req;
  2534. struct cpl_rx_phys_dsgl *phys_cpl;
  2535. struct ulptx_sgl *ulptx;
  2536. unsigned int transhdr_len;
  2537. unsigned int dst_size = 0, kctx_len, dnents, temp;
  2538. unsigned int sub_type, assoclen = req->assoclen;
  2539. unsigned int authsize = crypto_aead_authsize(tfm);
  2540. int error = -EINVAL;
  2541. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2542. GFP_ATOMIC;
  2543. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2544. sub_type = get_aead_subtype(tfm);
  2545. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2546. assoclen -= 8;
  2547. reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
  2548. error = chcr_aead_common_init(req);
  2549. if (error)
  2550. return ERR_PTR(error);
  2551. error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
  2552. if (error)
  2553. goto err;
  2554. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2555. dnents += sg_nents_xlen(req->dst, req->cryptlen
  2556. + (reqctx->op ? -authsize : authsize),
  2557. CHCR_DST_SG_SIZE, req->assoclen);
  2558. dnents += MIN_CCM_SG; // For IV and B0
  2559. dst_size = get_space_for_phys_dsgl(dnents);
  2560. kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
  2561. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2562. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
  2563. reqctx->b0_len) <= SGE_MAX_WR_LEN;
  2564. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
  2565. reqctx->b0_len, 16) :
  2566. (sgl_len(reqctx->src_nents + reqctx->aad_nents +
  2567. MIN_CCM_SG) * 8);
  2568. transhdr_len += temp;
  2569. transhdr_len = roundup(transhdr_len, 16);
  2570. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
  2571. reqctx->b0_len, transhdr_len, reqctx->op)) {
  2572. atomic_inc(&adap->chcr_stats.fallback);
  2573. chcr_aead_common_exit(req);
  2574. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2575. }
  2576. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2577. if (!skb) {
  2578. error = -ENOMEM;
  2579. goto err;
  2580. }
  2581. chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
  2582. fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
  2583. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2584. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2585. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2586. aeadctx->key, aeadctx->enckey_len);
  2587. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2588. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2589. error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
  2590. if (error)
  2591. goto dstmap_fail;
  2592. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2593. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2594. atomic_inc(&adap->chcr_stats.aead_rqst);
  2595. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2596. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
  2597. reqctx->b0_len) : 0);
  2598. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
  2599. transhdr_len, temp, 0);
  2600. reqctx->skb = skb;
  2601. return skb;
  2602. dstmap_fail:
  2603. kfree_skb(skb);
  2604. err:
  2605. chcr_aead_common_exit(req);
  2606. return ERR_PTR(error);
  2607. }
  2608. static struct sk_buff *create_gcm_wr(struct aead_request *req,
  2609. unsigned short qid,
  2610. int size)
  2611. {
  2612. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2613. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2614. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2615. struct sk_buff *skb = NULL;
  2616. struct chcr_wr *chcr_req;
  2617. struct cpl_rx_phys_dsgl *phys_cpl;
  2618. struct ulptx_sgl *ulptx;
  2619. unsigned int transhdr_len, dnents = 0;
  2620. unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
  2621. unsigned int authsize = crypto_aead_authsize(tfm);
  2622. int error = -EINVAL;
  2623. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2624. GFP_ATOMIC;
  2625. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2626. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
  2627. assoclen = req->assoclen - 8;
  2628. reqctx->b0_len = 0;
  2629. error = chcr_aead_common_init(req);
  2630. if (error)
  2631. return ERR_PTR(error);
  2632. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2633. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2634. (reqctx->op ? -authsize : authsize),
  2635. CHCR_DST_SG_SIZE, req->assoclen);
  2636. dnents += MIN_GCM_SG; // For IV
  2637. dst_size = get_space_for_phys_dsgl(dnents);
  2638. kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
  2639. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2640. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
  2641. SGE_MAX_WR_LEN;
  2642. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
  2643. (sgl_len(reqctx->src_nents +
  2644. reqctx->aad_nents + MIN_GCM_SG) * 8);
  2645. transhdr_len += temp;
  2646. transhdr_len = roundup(transhdr_len, 16);
  2647. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2648. transhdr_len, reqctx->op)) {
  2649. atomic_inc(&adap->chcr_stats.fallback);
  2650. chcr_aead_common_exit(req);
  2651. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2652. }
  2653. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2654. if (!skb) {
  2655. error = -ENOMEM;
  2656. goto err;
  2657. }
  2658. chcr_req = __skb_put_zero(skb, transhdr_len);
  2659. //Offset of tag from end
  2660. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2661. chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
  2662. a_ctx(tfm)->dev->rx_channel_id, 2,
  2663. (assoclen + 1));
  2664. chcr_req->sec_cpl.pldlen =
  2665. htonl(assoclen + IV + req->cryptlen);
  2666. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2667. assoclen ? 1 : 0, assoclen,
  2668. assoclen + IV + 1, 0);
  2669. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  2670. FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
  2671. temp, temp);
  2672. chcr_req->sec_cpl.seqno_numivs =
  2673. FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
  2674. CHCR_ENCRYPT_OP) ? 1 : 0,
  2675. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  2676. CHCR_SCMD_AUTH_MODE_GHASH,
  2677. aeadctx->hmac_ctrl, IV >> 1);
  2678. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2679. 0, 0, dst_size);
  2680. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2681. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2682. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2683. GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
  2684. /* prepare a 16 byte iv */
  2685. /* S A L T | IV | 0x00000001 */
  2686. if (get_aead_subtype(tfm) ==
  2687. CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
  2688. memcpy(reqctx->iv, aeadctx->salt, 4);
  2689. memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
  2690. } else {
  2691. memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
  2692. }
  2693. *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
  2694. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2695. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2696. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2697. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2698. atomic_inc(&adap->chcr_stats.aead_rqst);
  2699. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2700. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2701. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2702. transhdr_len, temp, reqctx->verify);
  2703. reqctx->skb = skb;
  2704. return skb;
  2705. err:
  2706. chcr_aead_common_exit(req);
  2707. return ERR_PTR(error);
  2708. }
  2709. static int chcr_aead_cra_init(struct crypto_aead *tfm)
  2710. {
  2711. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2712. struct aead_alg *alg = crypto_aead_alg(tfm);
  2713. aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
  2714. CRYPTO_ALG_NEED_FALLBACK |
  2715. CRYPTO_ALG_ASYNC);
  2716. if (IS_ERR(aeadctx->sw_cipher))
  2717. return PTR_ERR(aeadctx->sw_cipher);
  2718. crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
  2719. sizeof(struct aead_request) +
  2720. crypto_aead_reqsize(aeadctx->sw_cipher)));
  2721. return chcr_device_init(a_ctx(tfm));
  2722. }
  2723. static void chcr_aead_cra_exit(struct crypto_aead *tfm)
  2724. {
  2725. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2726. crypto_free_aead(aeadctx->sw_cipher);
  2727. }
  2728. static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
  2729. unsigned int authsize)
  2730. {
  2731. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2732. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
  2733. aeadctx->mayverify = VERIFY_HW;
  2734. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2735. }
  2736. static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
  2737. unsigned int authsize)
  2738. {
  2739. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2740. u32 maxauth = crypto_aead_maxauthsize(tfm);
  2741. /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
  2742. * true for sha1. authsize == 12 condition should be before
  2743. * authsize == (maxauth >> 1)
  2744. */
  2745. if (authsize == ICV_4) {
  2746. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2747. aeadctx->mayverify = VERIFY_HW;
  2748. } else if (authsize == ICV_6) {
  2749. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2750. aeadctx->mayverify = VERIFY_HW;
  2751. } else if (authsize == ICV_10) {
  2752. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2753. aeadctx->mayverify = VERIFY_HW;
  2754. } else if (authsize == ICV_12) {
  2755. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2756. aeadctx->mayverify = VERIFY_HW;
  2757. } else if (authsize == ICV_14) {
  2758. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2759. aeadctx->mayverify = VERIFY_HW;
  2760. } else if (authsize == (maxauth >> 1)) {
  2761. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2762. aeadctx->mayverify = VERIFY_HW;
  2763. } else if (authsize == maxauth) {
  2764. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2765. aeadctx->mayverify = VERIFY_HW;
  2766. } else {
  2767. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2768. aeadctx->mayverify = VERIFY_SW;
  2769. }
  2770. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2771. }
  2772. static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  2773. {
  2774. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2775. switch (authsize) {
  2776. case ICV_4:
  2777. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2778. aeadctx->mayverify = VERIFY_HW;
  2779. break;
  2780. case ICV_8:
  2781. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2782. aeadctx->mayverify = VERIFY_HW;
  2783. break;
  2784. case ICV_12:
  2785. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2786. aeadctx->mayverify = VERIFY_HW;
  2787. break;
  2788. case ICV_14:
  2789. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2790. aeadctx->mayverify = VERIFY_HW;
  2791. break;
  2792. case ICV_16:
  2793. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2794. aeadctx->mayverify = VERIFY_HW;
  2795. break;
  2796. case ICV_13:
  2797. case ICV_15:
  2798. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2799. aeadctx->mayverify = VERIFY_SW;
  2800. break;
  2801. default:
  2802. crypto_tfm_set_flags((struct crypto_tfm *) tfm,
  2803. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2804. return -EINVAL;
  2805. }
  2806. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2807. }
  2808. static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
  2809. unsigned int authsize)
  2810. {
  2811. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2812. switch (authsize) {
  2813. case ICV_8:
  2814. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2815. aeadctx->mayverify = VERIFY_HW;
  2816. break;
  2817. case ICV_12:
  2818. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2819. aeadctx->mayverify = VERIFY_HW;
  2820. break;
  2821. case ICV_16:
  2822. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2823. aeadctx->mayverify = VERIFY_HW;
  2824. break;
  2825. default:
  2826. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2827. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2828. return -EINVAL;
  2829. }
  2830. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2831. }
  2832. static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
  2833. unsigned int authsize)
  2834. {
  2835. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2836. switch (authsize) {
  2837. case ICV_4:
  2838. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2839. aeadctx->mayverify = VERIFY_HW;
  2840. break;
  2841. case ICV_6:
  2842. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2843. aeadctx->mayverify = VERIFY_HW;
  2844. break;
  2845. case ICV_8:
  2846. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2847. aeadctx->mayverify = VERIFY_HW;
  2848. break;
  2849. case ICV_10:
  2850. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2851. aeadctx->mayverify = VERIFY_HW;
  2852. break;
  2853. case ICV_12:
  2854. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2855. aeadctx->mayverify = VERIFY_HW;
  2856. break;
  2857. case ICV_14:
  2858. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2859. aeadctx->mayverify = VERIFY_HW;
  2860. break;
  2861. case ICV_16:
  2862. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2863. aeadctx->mayverify = VERIFY_HW;
  2864. break;
  2865. default:
  2866. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2867. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2868. return -EINVAL;
  2869. }
  2870. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2871. }
  2872. static int chcr_ccm_common_setkey(struct crypto_aead *aead,
  2873. const u8 *key,
  2874. unsigned int keylen)
  2875. {
  2876. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2877. unsigned char ck_size, mk_size;
  2878. int key_ctx_size = 0;
  2879. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
  2880. if (keylen == AES_KEYSIZE_128) {
  2881. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2882. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  2883. } else if (keylen == AES_KEYSIZE_192) {
  2884. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2885. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
  2886. } else if (keylen == AES_KEYSIZE_256) {
  2887. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2888. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  2889. } else {
  2890. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2891. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2892. aeadctx->enckey_len = 0;
  2893. return -EINVAL;
  2894. }
  2895. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
  2896. key_ctx_size >> 4);
  2897. memcpy(aeadctx->key, key, keylen);
  2898. aeadctx->enckey_len = keylen;
  2899. return 0;
  2900. }
  2901. static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
  2902. const u8 *key,
  2903. unsigned int keylen)
  2904. {
  2905. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2906. int error;
  2907. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2908. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2909. CRYPTO_TFM_REQ_MASK);
  2910. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2911. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2912. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2913. CRYPTO_TFM_RES_MASK);
  2914. if (error)
  2915. return error;
  2916. return chcr_ccm_common_setkey(aead, key, keylen);
  2917. }
  2918. static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
  2919. unsigned int keylen)
  2920. {
  2921. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2922. int error;
  2923. if (keylen < 3) {
  2924. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2925. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2926. aeadctx->enckey_len = 0;
  2927. return -EINVAL;
  2928. }
  2929. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2930. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2931. CRYPTO_TFM_REQ_MASK);
  2932. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2933. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2934. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2935. CRYPTO_TFM_RES_MASK);
  2936. if (error)
  2937. return error;
  2938. keylen -= 3;
  2939. memcpy(aeadctx->salt, key + keylen, 3);
  2940. return chcr_ccm_common_setkey(aead, key, keylen);
  2941. }
  2942. static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  2943. unsigned int keylen)
  2944. {
  2945. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2946. struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
  2947. struct crypto_cipher *cipher;
  2948. unsigned int ck_size;
  2949. int ret = 0, key_ctx_size = 0;
  2950. aeadctx->enckey_len = 0;
  2951. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2952. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
  2953. & CRYPTO_TFM_REQ_MASK);
  2954. ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2955. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2956. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2957. CRYPTO_TFM_RES_MASK);
  2958. if (ret)
  2959. goto out;
  2960. if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
  2961. keylen > 3) {
  2962. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  2963. memcpy(aeadctx->salt, key + keylen, 4);
  2964. }
  2965. if (keylen == AES_KEYSIZE_128) {
  2966. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2967. } else if (keylen == AES_KEYSIZE_192) {
  2968. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2969. } else if (keylen == AES_KEYSIZE_256) {
  2970. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2971. } else {
  2972. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2973. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2974. pr_err("GCM: Invalid key length %d\n", keylen);
  2975. ret = -EINVAL;
  2976. goto out;
  2977. }
  2978. memcpy(aeadctx->key, key, keylen);
  2979. aeadctx->enckey_len = keylen;
  2980. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
  2981. AEAD_H_SIZE;
  2982. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  2983. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  2984. 0, 0,
  2985. key_ctx_size >> 4);
  2986. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  2987. * It will go in key context
  2988. */
  2989. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  2990. if (IS_ERR(cipher)) {
  2991. aeadctx->enckey_len = 0;
  2992. ret = -ENOMEM;
  2993. goto out;
  2994. }
  2995. ret = crypto_cipher_setkey(cipher, key, keylen);
  2996. if (ret) {
  2997. aeadctx->enckey_len = 0;
  2998. goto out1;
  2999. }
  3000. memset(gctx->ghash_h, 0, AEAD_H_SIZE);
  3001. crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
  3002. out1:
  3003. crypto_free_cipher(cipher);
  3004. out:
  3005. return ret;
  3006. }
  3007. static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
  3008. unsigned int keylen)
  3009. {
  3010. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3011. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3012. /* it contains auth and cipher key both*/
  3013. struct crypto_authenc_keys keys;
  3014. unsigned int bs, subtype;
  3015. unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
  3016. int err = 0, i, key_ctx_len = 0;
  3017. unsigned char ck_size = 0;
  3018. unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
  3019. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  3020. struct algo_param param;
  3021. int align;
  3022. u8 *o_ptr = NULL;
  3023. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3024. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3025. & CRYPTO_TFM_REQ_MASK);
  3026. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3027. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3028. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3029. & CRYPTO_TFM_RES_MASK);
  3030. if (err)
  3031. goto out;
  3032. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3033. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3034. goto out;
  3035. }
  3036. if (get_alg_config(&param, max_authsize)) {
  3037. pr_err("chcr : Unsupported digest size\n");
  3038. goto out;
  3039. }
  3040. subtype = get_aead_subtype(authenc);
  3041. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3042. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3043. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3044. goto out;
  3045. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3046. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3047. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3048. }
  3049. if (keys.enckeylen == AES_KEYSIZE_128) {
  3050. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3051. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3052. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3053. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3054. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3055. } else {
  3056. pr_err("chcr : Unsupported cipher key\n");
  3057. goto out;
  3058. }
  3059. /* Copy only encryption key. We use authkey to generate h(ipad) and
  3060. * h(opad) so authkey is not needed again. authkeylen size have the
  3061. * size of the hash digest size.
  3062. */
  3063. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3064. aeadctx->enckey_len = keys.enckeylen;
  3065. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3066. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3067. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3068. aeadctx->enckey_len << 3);
  3069. }
  3070. base_hash = chcr_alloc_shash(max_authsize);
  3071. if (IS_ERR(base_hash)) {
  3072. pr_err("chcr : Base driver cannot be loaded\n");
  3073. aeadctx->enckey_len = 0;
  3074. memzero_explicit(&keys, sizeof(keys));
  3075. return -EINVAL;
  3076. }
  3077. {
  3078. SHASH_DESC_ON_STACK(shash, base_hash);
  3079. shash->tfm = base_hash;
  3080. shash->flags = crypto_shash_get_flags(base_hash);
  3081. bs = crypto_shash_blocksize(base_hash);
  3082. align = KEYCTX_ALIGN_PAD(max_authsize);
  3083. o_ptr = actx->h_iopad + param.result_size + align;
  3084. if (keys.authkeylen > bs) {
  3085. err = crypto_shash_digest(shash, keys.authkey,
  3086. keys.authkeylen,
  3087. o_ptr);
  3088. if (err) {
  3089. pr_err("chcr : Base driver cannot be loaded\n");
  3090. goto out;
  3091. }
  3092. keys.authkeylen = max_authsize;
  3093. } else
  3094. memcpy(o_ptr, keys.authkey, keys.authkeylen);
  3095. /* Compute the ipad-digest*/
  3096. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3097. memcpy(pad, o_ptr, keys.authkeylen);
  3098. for (i = 0; i < bs >> 2; i++)
  3099. *((unsigned int *)pad + i) ^= IPAD_DATA;
  3100. if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
  3101. max_authsize))
  3102. goto out;
  3103. /* Compute the opad-digest */
  3104. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3105. memcpy(pad, o_ptr, keys.authkeylen);
  3106. for (i = 0; i < bs >> 2; i++)
  3107. *((unsigned int *)pad + i) ^= OPAD_DATA;
  3108. if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
  3109. goto out;
  3110. /* convert the ipad and opad digest to network order */
  3111. chcr_change_order(actx->h_iopad, param.result_size);
  3112. chcr_change_order(o_ptr, param.result_size);
  3113. key_ctx_len = sizeof(struct _key_ctx) +
  3114. roundup(keys.enckeylen, 16) +
  3115. (param.result_size + align) * 2;
  3116. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
  3117. 0, 1, key_ctx_len >> 4);
  3118. actx->auth_mode = param.auth_mode;
  3119. chcr_free_shash(base_hash);
  3120. memzero_explicit(&keys, sizeof(keys));
  3121. return 0;
  3122. }
  3123. out:
  3124. aeadctx->enckey_len = 0;
  3125. memzero_explicit(&keys, sizeof(keys));
  3126. if (!IS_ERR(base_hash))
  3127. chcr_free_shash(base_hash);
  3128. return -EINVAL;
  3129. }
  3130. static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
  3131. const u8 *key, unsigned int keylen)
  3132. {
  3133. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3134. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3135. struct crypto_authenc_keys keys;
  3136. int err;
  3137. /* it contains auth and cipher key both*/
  3138. unsigned int subtype;
  3139. int key_ctx_len = 0;
  3140. unsigned char ck_size = 0;
  3141. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3142. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3143. & CRYPTO_TFM_REQ_MASK);
  3144. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3145. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3146. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3147. & CRYPTO_TFM_RES_MASK);
  3148. if (err)
  3149. goto out;
  3150. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3151. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3152. goto out;
  3153. }
  3154. subtype = get_aead_subtype(authenc);
  3155. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3156. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3157. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3158. goto out;
  3159. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3160. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3161. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3162. }
  3163. if (keys.enckeylen == AES_KEYSIZE_128) {
  3164. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3165. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3166. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3167. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3168. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3169. } else {
  3170. pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
  3171. goto out;
  3172. }
  3173. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3174. aeadctx->enckey_len = keys.enckeylen;
  3175. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3176. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3177. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3178. aeadctx->enckey_len << 3);
  3179. }
  3180. key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
  3181. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
  3182. 0, key_ctx_len >> 4);
  3183. actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
  3184. memzero_explicit(&keys, sizeof(keys));
  3185. return 0;
  3186. out:
  3187. aeadctx->enckey_len = 0;
  3188. memzero_explicit(&keys, sizeof(keys));
  3189. return -EINVAL;
  3190. }
  3191. static int chcr_aead_op(struct aead_request *req,
  3192. int size,
  3193. create_wr_t create_wr_fn)
  3194. {
  3195. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3196. struct uld_ctx *u_ctx;
  3197. struct sk_buff *skb;
  3198. int isfull = 0;
  3199. if (!a_ctx(tfm)->dev) {
  3200. pr_err("chcr : %s : No crypto device.\n", __func__);
  3201. return -ENXIO;
  3202. }
  3203. u_ctx = ULD_CTX(a_ctx(tfm));
  3204. if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  3205. a_ctx(tfm)->tx_qidx)) {
  3206. isfull = 1;
  3207. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  3208. return -ENOSPC;
  3209. }
  3210. /* Form a WR from req */
  3211. skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
  3212. if (IS_ERR(skb) || !skb)
  3213. return PTR_ERR(skb);
  3214. skb->dev = u_ctx->lldi.ports[0];
  3215. set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
  3216. chcr_send_wr(skb);
  3217. return isfull ? -EBUSY : -EINPROGRESS;
  3218. }
  3219. static int chcr_aead_encrypt(struct aead_request *req)
  3220. {
  3221. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3222. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3223. reqctx->verify = VERIFY_HW;
  3224. reqctx->op = CHCR_ENCRYPT_OP;
  3225. switch (get_aead_subtype(tfm)) {
  3226. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3227. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3228. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3229. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3230. return chcr_aead_op(req, 0, create_authenc_wr);
  3231. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3232. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3233. return chcr_aead_op(req, 0, create_aead_ccm_wr);
  3234. default:
  3235. return chcr_aead_op(req, 0, create_gcm_wr);
  3236. }
  3237. }
  3238. static int chcr_aead_decrypt(struct aead_request *req)
  3239. {
  3240. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3241. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  3242. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3243. int size;
  3244. if (aeadctx->mayverify == VERIFY_SW) {
  3245. size = crypto_aead_maxauthsize(tfm);
  3246. reqctx->verify = VERIFY_SW;
  3247. } else {
  3248. size = 0;
  3249. reqctx->verify = VERIFY_HW;
  3250. }
  3251. reqctx->op = CHCR_DECRYPT_OP;
  3252. switch (get_aead_subtype(tfm)) {
  3253. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3254. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3255. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3256. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3257. return chcr_aead_op(req, size, create_authenc_wr);
  3258. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3259. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3260. return chcr_aead_op(req, size, create_aead_ccm_wr);
  3261. default:
  3262. return chcr_aead_op(req, size, create_gcm_wr);
  3263. }
  3264. }
  3265. static struct chcr_alg_template driver_algs[] = {
  3266. /* AES-CBC */
  3267. {
  3268. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
  3269. .is_registered = 0,
  3270. .alg.crypto = {
  3271. .cra_name = "cbc(aes)",
  3272. .cra_driver_name = "cbc-aes-chcr",
  3273. .cra_blocksize = AES_BLOCK_SIZE,
  3274. .cra_init = chcr_cra_init,
  3275. .cra_exit = chcr_cra_exit,
  3276. .cra_u.ablkcipher = {
  3277. .min_keysize = AES_MIN_KEY_SIZE,
  3278. .max_keysize = AES_MAX_KEY_SIZE,
  3279. .ivsize = AES_BLOCK_SIZE,
  3280. .setkey = chcr_aes_cbc_setkey,
  3281. .encrypt = chcr_aes_encrypt,
  3282. .decrypt = chcr_aes_decrypt,
  3283. }
  3284. }
  3285. },
  3286. {
  3287. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
  3288. .is_registered = 0,
  3289. .alg.crypto = {
  3290. .cra_name = "xts(aes)",
  3291. .cra_driver_name = "xts-aes-chcr",
  3292. .cra_blocksize = AES_BLOCK_SIZE,
  3293. .cra_init = chcr_cra_init,
  3294. .cra_exit = NULL,
  3295. .cra_u .ablkcipher = {
  3296. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  3297. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  3298. .ivsize = AES_BLOCK_SIZE,
  3299. .setkey = chcr_aes_xts_setkey,
  3300. .encrypt = chcr_aes_encrypt,
  3301. .decrypt = chcr_aes_decrypt,
  3302. }
  3303. }
  3304. },
  3305. {
  3306. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
  3307. .is_registered = 0,
  3308. .alg.crypto = {
  3309. .cra_name = "ctr(aes)",
  3310. .cra_driver_name = "ctr-aes-chcr",
  3311. .cra_blocksize = 1,
  3312. .cra_init = chcr_cra_init,
  3313. .cra_exit = chcr_cra_exit,
  3314. .cra_u.ablkcipher = {
  3315. .min_keysize = AES_MIN_KEY_SIZE,
  3316. .max_keysize = AES_MAX_KEY_SIZE,
  3317. .ivsize = AES_BLOCK_SIZE,
  3318. .setkey = chcr_aes_ctr_setkey,
  3319. .encrypt = chcr_aes_encrypt,
  3320. .decrypt = chcr_aes_decrypt,
  3321. }
  3322. }
  3323. },
  3324. {
  3325. .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
  3326. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
  3327. .is_registered = 0,
  3328. .alg.crypto = {
  3329. .cra_name = "rfc3686(ctr(aes))",
  3330. .cra_driver_name = "rfc3686-ctr-aes-chcr",
  3331. .cra_blocksize = 1,
  3332. .cra_init = chcr_rfc3686_init,
  3333. .cra_exit = chcr_cra_exit,
  3334. .cra_u.ablkcipher = {
  3335. .min_keysize = AES_MIN_KEY_SIZE +
  3336. CTR_RFC3686_NONCE_SIZE,
  3337. .max_keysize = AES_MAX_KEY_SIZE +
  3338. CTR_RFC3686_NONCE_SIZE,
  3339. .ivsize = CTR_RFC3686_IV_SIZE,
  3340. .setkey = chcr_aes_rfc3686_setkey,
  3341. .encrypt = chcr_aes_encrypt,
  3342. .decrypt = chcr_aes_decrypt,
  3343. .geniv = "seqiv",
  3344. }
  3345. }
  3346. },
  3347. /* SHA */
  3348. {
  3349. .type = CRYPTO_ALG_TYPE_AHASH,
  3350. .is_registered = 0,
  3351. .alg.hash = {
  3352. .halg.digestsize = SHA1_DIGEST_SIZE,
  3353. .halg.base = {
  3354. .cra_name = "sha1",
  3355. .cra_driver_name = "sha1-chcr",
  3356. .cra_blocksize = SHA1_BLOCK_SIZE,
  3357. }
  3358. }
  3359. },
  3360. {
  3361. .type = CRYPTO_ALG_TYPE_AHASH,
  3362. .is_registered = 0,
  3363. .alg.hash = {
  3364. .halg.digestsize = SHA256_DIGEST_SIZE,
  3365. .halg.base = {
  3366. .cra_name = "sha256",
  3367. .cra_driver_name = "sha256-chcr",
  3368. .cra_blocksize = SHA256_BLOCK_SIZE,
  3369. }
  3370. }
  3371. },
  3372. {
  3373. .type = CRYPTO_ALG_TYPE_AHASH,
  3374. .is_registered = 0,
  3375. .alg.hash = {
  3376. .halg.digestsize = SHA224_DIGEST_SIZE,
  3377. .halg.base = {
  3378. .cra_name = "sha224",
  3379. .cra_driver_name = "sha224-chcr",
  3380. .cra_blocksize = SHA224_BLOCK_SIZE,
  3381. }
  3382. }
  3383. },
  3384. {
  3385. .type = CRYPTO_ALG_TYPE_AHASH,
  3386. .is_registered = 0,
  3387. .alg.hash = {
  3388. .halg.digestsize = SHA384_DIGEST_SIZE,
  3389. .halg.base = {
  3390. .cra_name = "sha384",
  3391. .cra_driver_name = "sha384-chcr",
  3392. .cra_blocksize = SHA384_BLOCK_SIZE,
  3393. }
  3394. }
  3395. },
  3396. {
  3397. .type = CRYPTO_ALG_TYPE_AHASH,
  3398. .is_registered = 0,
  3399. .alg.hash = {
  3400. .halg.digestsize = SHA512_DIGEST_SIZE,
  3401. .halg.base = {
  3402. .cra_name = "sha512",
  3403. .cra_driver_name = "sha512-chcr",
  3404. .cra_blocksize = SHA512_BLOCK_SIZE,
  3405. }
  3406. }
  3407. },
  3408. /* HMAC */
  3409. {
  3410. .type = CRYPTO_ALG_TYPE_HMAC,
  3411. .is_registered = 0,
  3412. .alg.hash = {
  3413. .halg.digestsize = SHA1_DIGEST_SIZE,
  3414. .halg.base = {
  3415. .cra_name = "hmac(sha1)",
  3416. .cra_driver_name = "hmac-sha1-chcr",
  3417. .cra_blocksize = SHA1_BLOCK_SIZE,
  3418. }
  3419. }
  3420. },
  3421. {
  3422. .type = CRYPTO_ALG_TYPE_HMAC,
  3423. .is_registered = 0,
  3424. .alg.hash = {
  3425. .halg.digestsize = SHA224_DIGEST_SIZE,
  3426. .halg.base = {
  3427. .cra_name = "hmac(sha224)",
  3428. .cra_driver_name = "hmac-sha224-chcr",
  3429. .cra_blocksize = SHA224_BLOCK_SIZE,
  3430. }
  3431. }
  3432. },
  3433. {
  3434. .type = CRYPTO_ALG_TYPE_HMAC,
  3435. .is_registered = 0,
  3436. .alg.hash = {
  3437. .halg.digestsize = SHA256_DIGEST_SIZE,
  3438. .halg.base = {
  3439. .cra_name = "hmac(sha256)",
  3440. .cra_driver_name = "hmac-sha256-chcr",
  3441. .cra_blocksize = SHA256_BLOCK_SIZE,
  3442. }
  3443. }
  3444. },
  3445. {
  3446. .type = CRYPTO_ALG_TYPE_HMAC,
  3447. .is_registered = 0,
  3448. .alg.hash = {
  3449. .halg.digestsize = SHA384_DIGEST_SIZE,
  3450. .halg.base = {
  3451. .cra_name = "hmac(sha384)",
  3452. .cra_driver_name = "hmac-sha384-chcr",
  3453. .cra_blocksize = SHA384_BLOCK_SIZE,
  3454. }
  3455. }
  3456. },
  3457. {
  3458. .type = CRYPTO_ALG_TYPE_HMAC,
  3459. .is_registered = 0,
  3460. .alg.hash = {
  3461. .halg.digestsize = SHA512_DIGEST_SIZE,
  3462. .halg.base = {
  3463. .cra_name = "hmac(sha512)",
  3464. .cra_driver_name = "hmac-sha512-chcr",
  3465. .cra_blocksize = SHA512_BLOCK_SIZE,
  3466. }
  3467. }
  3468. },
  3469. /* Add AEAD Algorithms */
  3470. {
  3471. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
  3472. .is_registered = 0,
  3473. .alg.aead = {
  3474. .base = {
  3475. .cra_name = "gcm(aes)",
  3476. .cra_driver_name = "gcm-aes-chcr",
  3477. .cra_blocksize = 1,
  3478. .cra_priority = CHCR_AEAD_PRIORITY,
  3479. .cra_ctxsize = sizeof(struct chcr_context) +
  3480. sizeof(struct chcr_aead_ctx) +
  3481. sizeof(struct chcr_gcm_ctx),
  3482. },
  3483. .ivsize = GCM_AES_IV_SIZE,
  3484. .maxauthsize = GHASH_DIGEST_SIZE,
  3485. .setkey = chcr_gcm_setkey,
  3486. .setauthsize = chcr_gcm_setauthsize,
  3487. }
  3488. },
  3489. {
  3490. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
  3491. .is_registered = 0,
  3492. .alg.aead = {
  3493. .base = {
  3494. .cra_name = "rfc4106(gcm(aes))",
  3495. .cra_driver_name = "rfc4106-gcm-aes-chcr",
  3496. .cra_blocksize = 1,
  3497. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3498. .cra_ctxsize = sizeof(struct chcr_context) +
  3499. sizeof(struct chcr_aead_ctx) +
  3500. sizeof(struct chcr_gcm_ctx),
  3501. },
  3502. .ivsize = GCM_RFC4106_IV_SIZE,
  3503. .maxauthsize = GHASH_DIGEST_SIZE,
  3504. .setkey = chcr_gcm_setkey,
  3505. .setauthsize = chcr_4106_4309_setauthsize,
  3506. }
  3507. },
  3508. {
  3509. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
  3510. .is_registered = 0,
  3511. .alg.aead = {
  3512. .base = {
  3513. .cra_name = "ccm(aes)",
  3514. .cra_driver_name = "ccm-aes-chcr",
  3515. .cra_blocksize = 1,
  3516. .cra_priority = CHCR_AEAD_PRIORITY,
  3517. .cra_ctxsize = sizeof(struct chcr_context) +
  3518. sizeof(struct chcr_aead_ctx),
  3519. },
  3520. .ivsize = AES_BLOCK_SIZE,
  3521. .maxauthsize = GHASH_DIGEST_SIZE,
  3522. .setkey = chcr_aead_ccm_setkey,
  3523. .setauthsize = chcr_ccm_setauthsize,
  3524. }
  3525. },
  3526. {
  3527. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
  3528. .is_registered = 0,
  3529. .alg.aead = {
  3530. .base = {
  3531. .cra_name = "rfc4309(ccm(aes))",
  3532. .cra_driver_name = "rfc4309-ccm-aes-chcr",
  3533. .cra_blocksize = 1,
  3534. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3535. .cra_ctxsize = sizeof(struct chcr_context) +
  3536. sizeof(struct chcr_aead_ctx),
  3537. },
  3538. .ivsize = 8,
  3539. .maxauthsize = GHASH_DIGEST_SIZE,
  3540. .setkey = chcr_aead_rfc4309_setkey,
  3541. .setauthsize = chcr_4106_4309_setauthsize,
  3542. }
  3543. },
  3544. {
  3545. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3546. .is_registered = 0,
  3547. .alg.aead = {
  3548. .base = {
  3549. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  3550. .cra_driver_name =
  3551. "authenc-hmac-sha1-cbc-aes-chcr",
  3552. .cra_blocksize = AES_BLOCK_SIZE,
  3553. .cra_priority = CHCR_AEAD_PRIORITY,
  3554. .cra_ctxsize = sizeof(struct chcr_context) +
  3555. sizeof(struct chcr_aead_ctx) +
  3556. sizeof(struct chcr_authenc_ctx),
  3557. },
  3558. .ivsize = AES_BLOCK_SIZE,
  3559. .maxauthsize = SHA1_DIGEST_SIZE,
  3560. .setkey = chcr_authenc_setkey,
  3561. .setauthsize = chcr_authenc_setauthsize,
  3562. }
  3563. },
  3564. {
  3565. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3566. .is_registered = 0,
  3567. .alg.aead = {
  3568. .base = {
  3569. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  3570. .cra_driver_name =
  3571. "authenc-hmac-sha256-cbc-aes-chcr",
  3572. .cra_blocksize = AES_BLOCK_SIZE,
  3573. .cra_priority = CHCR_AEAD_PRIORITY,
  3574. .cra_ctxsize = sizeof(struct chcr_context) +
  3575. sizeof(struct chcr_aead_ctx) +
  3576. sizeof(struct chcr_authenc_ctx),
  3577. },
  3578. .ivsize = AES_BLOCK_SIZE,
  3579. .maxauthsize = SHA256_DIGEST_SIZE,
  3580. .setkey = chcr_authenc_setkey,
  3581. .setauthsize = chcr_authenc_setauthsize,
  3582. }
  3583. },
  3584. {
  3585. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3586. .is_registered = 0,
  3587. .alg.aead = {
  3588. .base = {
  3589. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  3590. .cra_driver_name =
  3591. "authenc-hmac-sha224-cbc-aes-chcr",
  3592. .cra_blocksize = AES_BLOCK_SIZE,
  3593. .cra_priority = CHCR_AEAD_PRIORITY,
  3594. .cra_ctxsize = sizeof(struct chcr_context) +
  3595. sizeof(struct chcr_aead_ctx) +
  3596. sizeof(struct chcr_authenc_ctx),
  3597. },
  3598. .ivsize = AES_BLOCK_SIZE,
  3599. .maxauthsize = SHA224_DIGEST_SIZE,
  3600. .setkey = chcr_authenc_setkey,
  3601. .setauthsize = chcr_authenc_setauthsize,
  3602. }
  3603. },
  3604. {
  3605. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3606. .is_registered = 0,
  3607. .alg.aead = {
  3608. .base = {
  3609. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  3610. .cra_driver_name =
  3611. "authenc-hmac-sha384-cbc-aes-chcr",
  3612. .cra_blocksize = AES_BLOCK_SIZE,
  3613. .cra_priority = CHCR_AEAD_PRIORITY,
  3614. .cra_ctxsize = sizeof(struct chcr_context) +
  3615. sizeof(struct chcr_aead_ctx) +
  3616. sizeof(struct chcr_authenc_ctx),
  3617. },
  3618. .ivsize = AES_BLOCK_SIZE,
  3619. .maxauthsize = SHA384_DIGEST_SIZE,
  3620. .setkey = chcr_authenc_setkey,
  3621. .setauthsize = chcr_authenc_setauthsize,
  3622. }
  3623. },
  3624. {
  3625. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3626. .is_registered = 0,
  3627. .alg.aead = {
  3628. .base = {
  3629. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  3630. .cra_driver_name =
  3631. "authenc-hmac-sha512-cbc-aes-chcr",
  3632. .cra_blocksize = AES_BLOCK_SIZE,
  3633. .cra_priority = CHCR_AEAD_PRIORITY,
  3634. .cra_ctxsize = sizeof(struct chcr_context) +
  3635. sizeof(struct chcr_aead_ctx) +
  3636. sizeof(struct chcr_authenc_ctx),
  3637. },
  3638. .ivsize = AES_BLOCK_SIZE,
  3639. .maxauthsize = SHA512_DIGEST_SIZE,
  3640. .setkey = chcr_authenc_setkey,
  3641. .setauthsize = chcr_authenc_setauthsize,
  3642. }
  3643. },
  3644. {
  3645. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
  3646. .is_registered = 0,
  3647. .alg.aead = {
  3648. .base = {
  3649. .cra_name = "authenc(digest_null,cbc(aes))",
  3650. .cra_driver_name =
  3651. "authenc-digest_null-cbc-aes-chcr",
  3652. .cra_blocksize = AES_BLOCK_SIZE,
  3653. .cra_priority = CHCR_AEAD_PRIORITY,
  3654. .cra_ctxsize = sizeof(struct chcr_context) +
  3655. sizeof(struct chcr_aead_ctx) +
  3656. sizeof(struct chcr_authenc_ctx),
  3657. },
  3658. .ivsize = AES_BLOCK_SIZE,
  3659. .maxauthsize = 0,
  3660. .setkey = chcr_aead_digest_null_setkey,
  3661. .setauthsize = chcr_authenc_null_setauthsize,
  3662. }
  3663. },
  3664. {
  3665. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3666. .is_registered = 0,
  3667. .alg.aead = {
  3668. .base = {
  3669. .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3670. .cra_driver_name =
  3671. "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
  3672. .cra_blocksize = 1,
  3673. .cra_priority = CHCR_AEAD_PRIORITY,
  3674. .cra_ctxsize = sizeof(struct chcr_context) +
  3675. sizeof(struct chcr_aead_ctx) +
  3676. sizeof(struct chcr_authenc_ctx),
  3677. },
  3678. .ivsize = CTR_RFC3686_IV_SIZE,
  3679. .maxauthsize = SHA1_DIGEST_SIZE,
  3680. .setkey = chcr_authenc_setkey,
  3681. .setauthsize = chcr_authenc_setauthsize,
  3682. }
  3683. },
  3684. {
  3685. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3686. .is_registered = 0,
  3687. .alg.aead = {
  3688. .base = {
  3689. .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3690. .cra_driver_name =
  3691. "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
  3692. .cra_blocksize = 1,
  3693. .cra_priority = CHCR_AEAD_PRIORITY,
  3694. .cra_ctxsize = sizeof(struct chcr_context) +
  3695. sizeof(struct chcr_aead_ctx) +
  3696. sizeof(struct chcr_authenc_ctx),
  3697. },
  3698. .ivsize = CTR_RFC3686_IV_SIZE,
  3699. .maxauthsize = SHA256_DIGEST_SIZE,
  3700. .setkey = chcr_authenc_setkey,
  3701. .setauthsize = chcr_authenc_setauthsize,
  3702. }
  3703. },
  3704. {
  3705. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3706. .is_registered = 0,
  3707. .alg.aead = {
  3708. .base = {
  3709. .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3710. .cra_driver_name =
  3711. "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
  3712. .cra_blocksize = 1,
  3713. .cra_priority = CHCR_AEAD_PRIORITY,
  3714. .cra_ctxsize = sizeof(struct chcr_context) +
  3715. sizeof(struct chcr_aead_ctx) +
  3716. sizeof(struct chcr_authenc_ctx),
  3717. },
  3718. .ivsize = CTR_RFC3686_IV_SIZE,
  3719. .maxauthsize = SHA224_DIGEST_SIZE,
  3720. .setkey = chcr_authenc_setkey,
  3721. .setauthsize = chcr_authenc_setauthsize,
  3722. }
  3723. },
  3724. {
  3725. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3726. .is_registered = 0,
  3727. .alg.aead = {
  3728. .base = {
  3729. .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3730. .cra_driver_name =
  3731. "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
  3732. .cra_blocksize = 1,
  3733. .cra_priority = CHCR_AEAD_PRIORITY,
  3734. .cra_ctxsize = sizeof(struct chcr_context) +
  3735. sizeof(struct chcr_aead_ctx) +
  3736. sizeof(struct chcr_authenc_ctx),
  3737. },
  3738. .ivsize = CTR_RFC3686_IV_SIZE,
  3739. .maxauthsize = SHA384_DIGEST_SIZE,
  3740. .setkey = chcr_authenc_setkey,
  3741. .setauthsize = chcr_authenc_setauthsize,
  3742. }
  3743. },
  3744. {
  3745. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3746. .is_registered = 0,
  3747. .alg.aead = {
  3748. .base = {
  3749. .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3750. .cra_driver_name =
  3751. "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
  3752. .cra_blocksize = 1,
  3753. .cra_priority = CHCR_AEAD_PRIORITY,
  3754. .cra_ctxsize = sizeof(struct chcr_context) +
  3755. sizeof(struct chcr_aead_ctx) +
  3756. sizeof(struct chcr_authenc_ctx),
  3757. },
  3758. .ivsize = CTR_RFC3686_IV_SIZE,
  3759. .maxauthsize = SHA512_DIGEST_SIZE,
  3760. .setkey = chcr_authenc_setkey,
  3761. .setauthsize = chcr_authenc_setauthsize,
  3762. }
  3763. },
  3764. {
  3765. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
  3766. .is_registered = 0,
  3767. .alg.aead = {
  3768. .base = {
  3769. .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
  3770. .cra_driver_name =
  3771. "authenc-digest_null-rfc3686-ctr-aes-chcr",
  3772. .cra_blocksize = 1,
  3773. .cra_priority = CHCR_AEAD_PRIORITY,
  3774. .cra_ctxsize = sizeof(struct chcr_context) +
  3775. sizeof(struct chcr_aead_ctx) +
  3776. sizeof(struct chcr_authenc_ctx),
  3777. },
  3778. .ivsize = CTR_RFC3686_IV_SIZE,
  3779. .maxauthsize = 0,
  3780. .setkey = chcr_aead_digest_null_setkey,
  3781. .setauthsize = chcr_authenc_null_setauthsize,
  3782. }
  3783. },
  3784. };
  3785. /*
  3786. * chcr_unregister_alg - Deregister crypto algorithms with
  3787. * kernel framework.
  3788. */
  3789. static int chcr_unregister_alg(void)
  3790. {
  3791. int i;
  3792. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3793. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3794. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3795. if (driver_algs[i].is_registered)
  3796. crypto_unregister_alg(
  3797. &driver_algs[i].alg.crypto);
  3798. break;
  3799. case CRYPTO_ALG_TYPE_AEAD:
  3800. if (driver_algs[i].is_registered)
  3801. crypto_unregister_aead(
  3802. &driver_algs[i].alg.aead);
  3803. break;
  3804. case CRYPTO_ALG_TYPE_AHASH:
  3805. if (driver_algs[i].is_registered)
  3806. crypto_unregister_ahash(
  3807. &driver_algs[i].alg.hash);
  3808. break;
  3809. }
  3810. driver_algs[i].is_registered = 0;
  3811. }
  3812. return 0;
  3813. }
  3814. #define SZ_AHASH_CTX sizeof(struct chcr_context)
  3815. #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
  3816. #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
  3817. /*
  3818. * chcr_register_alg - Register crypto algorithms with kernel framework.
  3819. */
  3820. static int chcr_register_alg(void)
  3821. {
  3822. struct crypto_alg ai;
  3823. struct ahash_alg *a_hash;
  3824. int err = 0, i;
  3825. char *name = NULL;
  3826. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3827. if (driver_algs[i].is_registered)
  3828. continue;
  3829. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3830. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3831. driver_algs[i].alg.crypto.cra_priority =
  3832. CHCR_CRA_PRIORITY;
  3833. driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
  3834. driver_algs[i].alg.crypto.cra_flags =
  3835. CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  3836. CRYPTO_ALG_NEED_FALLBACK;
  3837. driver_algs[i].alg.crypto.cra_ctxsize =
  3838. sizeof(struct chcr_context) +
  3839. sizeof(struct ablk_ctx);
  3840. driver_algs[i].alg.crypto.cra_alignmask = 0;
  3841. driver_algs[i].alg.crypto.cra_type =
  3842. &crypto_ablkcipher_type;
  3843. err = crypto_register_alg(&driver_algs[i].alg.crypto);
  3844. name = driver_algs[i].alg.crypto.cra_driver_name;
  3845. break;
  3846. case CRYPTO_ALG_TYPE_AEAD:
  3847. driver_algs[i].alg.aead.base.cra_flags =
  3848. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
  3849. driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
  3850. driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
  3851. driver_algs[i].alg.aead.init = chcr_aead_cra_init;
  3852. driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
  3853. driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
  3854. err = crypto_register_aead(&driver_algs[i].alg.aead);
  3855. name = driver_algs[i].alg.aead.base.cra_driver_name;
  3856. break;
  3857. case CRYPTO_ALG_TYPE_AHASH:
  3858. a_hash = &driver_algs[i].alg.hash;
  3859. a_hash->update = chcr_ahash_update;
  3860. a_hash->final = chcr_ahash_final;
  3861. a_hash->finup = chcr_ahash_finup;
  3862. a_hash->digest = chcr_ahash_digest;
  3863. a_hash->export = chcr_ahash_export;
  3864. a_hash->import = chcr_ahash_import;
  3865. a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
  3866. a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
  3867. a_hash->halg.base.cra_module = THIS_MODULE;
  3868. a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
  3869. a_hash->halg.base.cra_alignmask = 0;
  3870. a_hash->halg.base.cra_exit = NULL;
  3871. if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
  3872. a_hash->halg.base.cra_init = chcr_hmac_cra_init;
  3873. a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
  3874. a_hash->init = chcr_hmac_init;
  3875. a_hash->setkey = chcr_ahash_setkey;
  3876. a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
  3877. } else {
  3878. a_hash->init = chcr_sha_init;
  3879. a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
  3880. a_hash->halg.base.cra_init = chcr_sha_cra_init;
  3881. }
  3882. err = crypto_register_ahash(&driver_algs[i].alg.hash);
  3883. ai = driver_algs[i].alg.hash.halg.base;
  3884. name = ai.cra_driver_name;
  3885. break;
  3886. }
  3887. if (err) {
  3888. pr_err("chcr : %s : Algorithm registration failed\n",
  3889. name);
  3890. goto register_err;
  3891. } else {
  3892. driver_algs[i].is_registered = 1;
  3893. }
  3894. }
  3895. return 0;
  3896. register_err:
  3897. chcr_unregister_alg();
  3898. return err;
  3899. }
  3900. /*
  3901. * start_crypto - Register the crypto algorithms.
  3902. * This should called once when the first device comesup. After this
  3903. * kernel will start calling driver APIs for crypto operations.
  3904. */
  3905. int start_crypto(void)
  3906. {
  3907. return chcr_register_alg();
  3908. }
  3909. /*
  3910. * stop_crypto - Deregister all the crypto algorithms with kernel.
  3911. * This should be called once when the last device goes down. After this
  3912. * kernel will not call the driver API for crypto operations.
  3913. */
  3914. int stop_crypto(void)
  3915. {
  3916. chcr_unregister_alg();
  3917. return 0;
  3918. }