chcr_algo.c 124 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Manoj Malviya (manojmalviya@chelsio.com)
  36. * Atul Gupta (atul.gupta@chelsio.com)
  37. * Jitendra Lulla (jlulla@chelsio.com)
  38. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39. * Harsh Jain (harsh@chelsio.com)
  40. */
  41. #define pr_fmt(fmt) "chcr:" fmt
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/crypto.h>
  45. #include <linux/cryptohash.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/rtnetlink.h>
  48. #include <linux/highmem.h>
  49. #include <linux/scatterlist.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/gcm.h>
  54. #include <crypto/sha.h>
  55. #include <crypto/authenc.h>
  56. #include <crypto/ctr.h>
  57. #include <crypto/gf128mul.h>
  58. #include <crypto/internal/aead.h>
  59. #include <crypto/null.h>
  60. #include <crypto/internal/skcipher.h>
  61. #include <crypto/aead.h>
  62. #include <crypto/scatterwalk.h>
  63. #include <crypto/internal/hash.h>
  64. #include "t4fw_api.h"
  65. #include "t4_msg.h"
  66. #include "chcr_core.h"
  67. #include "chcr_algo.h"
  68. #include "chcr_crypto.h"
  69. #define IV AES_BLOCK_SIZE
  70. static unsigned int sgl_ent_len[] = {
  71. 0, 0, 16, 24, 40, 48, 64, 72, 88,
  72. 96, 112, 120, 136, 144, 160, 168, 184,
  73. 192, 208, 216, 232, 240, 256, 264, 280,
  74. 288, 304, 312, 328, 336, 352, 360, 376
  75. };
  76. static unsigned int dsgl_ent_len[] = {
  77. 0, 32, 32, 48, 48, 64, 64, 80, 80,
  78. 112, 112, 128, 128, 144, 144, 160, 160,
  79. 192, 192, 208, 208, 224, 224, 240, 240,
  80. 272, 272, 288, 288, 304, 304, 320, 320
  81. };
  82. static u32 round_constant[11] = {
  83. 0x01000000, 0x02000000, 0x04000000, 0x08000000,
  84. 0x10000000, 0x20000000, 0x40000000, 0x80000000,
  85. 0x1B000000, 0x36000000, 0x6C000000
  86. };
  87. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  88. unsigned char *input, int err);
  89. static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  90. {
  91. return ctx->crypto_ctx->aeadctx;
  92. }
  93. static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  94. {
  95. return ctx->crypto_ctx->ablkctx;
  96. }
  97. static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  98. {
  99. return ctx->crypto_ctx->hmacctx;
  100. }
  101. static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  102. {
  103. return gctx->ctx->gcm;
  104. }
  105. static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  106. {
  107. return gctx->ctx->authenc;
  108. }
  109. static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  110. {
  111. return ctx->dev->u_ctx;
  112. }
  113. static inline int is_ofld_imm(const struct sk_buff *skb)
  114. {
  115. return (skb->len <= SGE_MAX_WR_LEN);
  116. }
  117. static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
  118. {
  119. memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
  120. }
  121. static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
  122. unsigned int entlen,
  123. unsigned int skip)
  124. {
  125. int nents = 0;
  126. unsigned int less;
  127. unsigned int skip_len = 0;
  128. while (sg && skip) {
  129. if (sg_dma_len(sg) <= skip) {
  130. skip -= sg_dma_len(sg);
  131. skip_len = 0;
  132. sg = sg_next(sg);
  133. } else {
  134. skip_len = skip;
  135. skip = 0;
  136. }
  137. }
  138. while (sg && reqlen) {
  139. less = min(reqlen, sg_dma_len(sg) - skip_len);
  140. nents += DIV_ROUND_UP(less, entlen);
  141. reqlen -= less;
  142. skip_len = 0;
  143. sg = sg_next(sg);
  144. }
  145. return nents;
  146. }
  147. static inline int get_aead_subtype(struct crypto_aead *aead)
  148. {
  149. struct aead_alg *alg = crypto_aead_alg(aead);
  150. struct chcr_alg_template *chcr_crypto_alg =
  151. container_of(alg, struct chcr_alg_template, alg.aead);
  152. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  153. }
  154. void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
  155. {
  156. u8 temp[SHA512_DIGEST_SIZE];
  157. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  158. int authsize = crypto_aead_authsize(tfm);
  159. struct cpl_fw6_pld *fw6_pld;
  160. int cmp = 0;
  161. fw6_pld = (struct cpl_fw6_pld *)input;
  162. if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
  163. (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
  164. cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
  165. } else {
  166. sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
  167. authsize, req->assoclen +
  168. req->cryptlen - authsize);
  169. cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
  170. }
  171. if (cmp)
  172. *err = -EBADMSG;
  173. else
  174. *err = 0;
  175. }
  176. static inline void chcr_handle_aead_resp(struct aead_request *req,
  177. unsigned char *input,
  178. int err)
  179. {
  180. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  181. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  182. struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
  183. chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
  184. if (reqctx->b0_dma)
  185. dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
  186. reqctx->b0_len, DMA_BIDIRECTIONAL);
  187. if (reqctx->verify == VERIFY_SW) {
  188. chcr_verify_tag(req, input, &err);
  189. reqctx->verify = VERIFY_HW;
  190. }
  191. req->base.complete(&req->base, err);
  192. }
  193. static void get_aes_decrypt_key(unsigned char *dec_key,
  194. const unsigned char *key,
  195. unsigned int keylength)
  196. {
  197. u32 temp;
  198. u32 w_ring[MAX_NK];
  199. int i, j, k;
  200. u8 nr, nk;
  201. switch (keylength) {
  202. case AES_KEYLENGTH_128BIT:
  203. nk = KEYLENGTH_4BYTES;
  204. nr = NUMBER_OF_ROUNDS_10;
  205. break;
  206. case AES_KEYLENGTH_192BIT:
  207. nk = KEYLENGTH_6BYTES;
  208. nr = NUMBER_OF_ROUNDS_12;
  209. break;
  210. case AES_KEYLENGTH_256BIT:
  211. nk = KEYLENGTH_8BYTES;
  212. nr = NUMBER_OF_ROUNDS_14;
  213. break;
  214. default:
  215. return;
  216. }
  217. for (i = 0; i < nk; i++)
  218. w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
  219. i = 0;
  220. temp = w_ring[nk - 1];
  221. while (i + nk < (nr + 1) * 4) {
  222. if (!(i % nk)) {
  223. /* RotWord(temp) */
  224. temp = (temp << 8) | (temp >> 24);
  225. temp = aes_ks_subword(temp);
  226. temp ^= round_constant[i / nk];
  227. } else if (nk == 8 && (i % 4 == 0)) {
  228. temp = aes_ks_subword(temp);
  229. }
  230. w_ring[i % nk] ^= temp;
  231. temp = w_ring[i % nk];
  232. i++;
  233. }
  234. i--;
  235. for (k = 0, j = i % nk; k < nk; k++) {
  236. *((u32 *)dec_key + k) = htonl(w_ring[j]);
  237. j--;
  238. if (j < 0)
  239. j += nk;
  240. }
  241. }
  242. static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  243. {
  244. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  245. switch (ds) {
  246. case SHA1_DIGEST_SIZE:
  247. base_hash = crypto_alloc_shash("sha1", 0, 0);
  248. break;
  249. case SHA224_DIGEST_SIZE:
  250. base_hash = crypto_alloc_shash("sha224", 0, 0);
  251. break;
  252. case SHA256_DIGEST_SIZE:
  253. base_hash = crypto_alloc_shash("sha256", 0, 0);
  254. break;
  255. case SHA384_DIGEST_SIZE:
  256. base_hash = crypto_alloc_shash("sha384", 0, 0);
  257. break;
  258. case SHA512_DIGEST_SIZE:
  259. base_hash = crypto_alloc_shash("sha512", 0, 0);
  260. break;
  261. }
  262. return base_hash;
  263. }
  264. static int chcr_compute_partial_hash(struct shash_desc *desc,
  265. char *iopad, char *result_hash,
  266. int digest_size)
  267. {
  268. struct sha1_state sha1_st;
  269. struct sha256_state sha256_st;
  270. struct sha512_state sha512_st;
  271. int error;
  272. if (digest_size == SHA1_DIGEST_SIZE) {
  273. error = crypto_shash_init(desc) ?:
  274. crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
  275. crypto_shash_export(desc, (void *)&sha1_st);
  276. memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
  277. } else if (digest_size == SHA224_DIGEST_SIZE) {
  278. error = crypto_shash_init(desc) ?:
  279. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  280. crypto_shash_export(desc, (void *)&sha256_st);
  281. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  282. } else if (digest_size == SHA256_DIGEST_SIZE) {
  283. error = crypto_shash_init(desc) ?:
  284. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  285. crypto_shash_export(desc, (void *)&sha256_st);
  286. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  287. } else if (digest_size == SHA384_DIGEST_SIZE) {
  288. error = crypto_shash_init(desc) ?:
  289. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  290. crypto_shash_export(desc, (void *)&sha512_st);
  291. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  292. } else if (digest_size == SHA512_DIGEST_SIZE) {
  293. error = crypto_shash_init(desc) ?:
  294. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  295. crypto_shash_export(desc, (void *)&sha512_st);
  296. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  297. } else {
  298. error = -EINVAL;
  299. pr_err("Unknown digest size %d\n", digest_size);
  300. }
  301. return error;
  302. }
  303. static void chcr_change_order(char *buf, int ds)
  304. {
  305. int i;
  306. if (ds == SHA512_DIGEST_SIZE) {
  307. for (i = 0; i < (ds / sizeof(u64)); i++)
  308. *((__be64 *)buf + i) =
  309. cpu_to_be64(*((u64 *)buf + i));
  310. } else {
  311. for (i = 0; i < (ds / sizeof(u32)); i++)
  312. *((__be32 *)buf + i) =
  313. cpu_to_be32(*((u32 *)buf + i));
  314. }
  315. }
  316. static inline int is_hmac(struct crypto_tfm *tfm)
  317. {
  318. struct crypto_alg *alg = tfm->__crt_alg;
  319. struct chcr_alg_template *chcr_crypto_alg =
  320. container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
  321. alg.hash);
  322. if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
  323. return 1;
  324. return 0;
  325. }
  326. static inline void dsgl_walk_init(struct dsgl_walk *walk,
  327. struct cpl_rx_phys_dsgl *dsgl)
  328. {
  329. walk->dsgl = dsgl;
  330. walk->nents = 0;
  331. walk->to = (struct phys_sge_pairs *)(dsgl + 1);
  332. }
  333. static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
  334. {
  335. struct cpl_rx_phys_dsgl *phys_cpl;
  336. phys_cpl = walk->dsgl;
  337. phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
  338. | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
  339. phys_cpl->pcirlxorder_to_noofsgentr =
  340. htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
  341. CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
  342. CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
  343. CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
  344. CPL_RX_PHYS_DSGL_DCAID_V(0) |
  345. CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
  346. phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
  347. phys_cpl->rss_hdr_int.qid = htons(qid);
  348. phys_cpl->rss_hdr_int.hash_val = 0;
  349. }
  350. static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
  351. size_t size,
  352. dma_addr_t *addr)
  353. {
  354. int j;
  355. if (!size)
  356. return;
  357. j = walk->nents;
  358. walk->to->len[j % 8] = htons(size);
  359. walk->to->addr[j % 8] = cpu_to_be64(*addr);
  360. j++;
  361. if ((j % 8) == 0)
  362. walk->to++;
  363. walk->nents = j;
  364. }
  365. static void dsgl_walk_add_sg(struct dsgl_walk *walk,
  366. struct scatterlist *sg,
  367. unsigned int slen,
  368. unsigned int skip)
  369. {
  370. int skip_len = 0;
  371. unsigned int left_size = slen, len = 0;
  372. unsigned int j = walk->nents;
  373. int offset, ent_len;
  374. if (!slen)
  375. return;
  376. while (sg && skip) {
  377. if (sg_dma_len(sg) <= skip) {
  378. skip -= sg_dma_len(sg);
  379. skip_len = 0;
  380. sg = sg_next(sg);
  381. } else {
  382. skip_len = skip;
  383. skip = 0;
  384. }
  385. }
  386. while (left_size && sg) {
  387. len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  388. offset = 0;
  389. while (len) {
  390. ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
  391. walk->to->len[j % 8] = htons(ent_len);
  392. walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
  393. offset + skip_len);
  394. offset += ent_len;
  395. len -= ent_len;
  396. j++;
  397. if ((j % 8) == 0)
  398. walk->to++;
  399. }
  400. walk->last_sg = sg;
  401. walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
  402. skip_len) + skip_len;
  403. left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  404. skip_len = 0;
  405. sg = sg_next(sg);
  406. }
  407. walk->nents = j;
  408. }
  409. static inline void ulptx_walk_init(struct ulptx_walk *walk,
  410. struct ulptx_sgl *ulp)
  411. {
  412. walk->sgl = ulp;
  413. walk->nents = 0;
  414. walk->pair_idx = 0;
  415. walk->pair = ulp->sge;
  416. walk->last_sg = NULL;
  417. walk->last_sg_len = 0;
  418. }
  419. static inline void ulptx_walk_end(struct ulptx_walk *walk)
  420. {
  421. walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  422. ULPTX_NSGE_V(walk->nents));
  423. }
  424. static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
  425. size_t size,
  426. dma_addr_t *addr)
  427. {
  428. if (!size)
  429. return;
  430. if (walk->nents == 0) {
  431. walk->sgl->len0 = cpu_to_be32(size);
  432. walk->sgl->addr0 = cpu_to_be64(*addr);
  433. } else {
  434. walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
  435. walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
  436. walk->pair_idx = !walk->pair_idx;
  437. if (!walk->pair_idx)
  438. walk->pair++;
  439. }
  440. walk->nents++;
  441. }
  442. static void ulptx_walk_add_sg(struct ulptx_walk *walk,
  443. struct scatterlist *sg,
  444. unsigned int len,
  445. unsigned int skip)
  446. {
  447. int small;
  448. int skip_len = 0;
  449. unsigned int sgmin;
  450. if (!len)
  451. return;
  452. while (sg && skip) {
  453. if (sg_dma_len(sg) <= skip) {
  454. skip -= sg_dma_len(sg);
  455. skip_len = 0;
  456. sg = sg_next(sg);
  457. } else {
  458. skip_len = skip;
  459. skip = 0;
  460. }
  461. }
  462. WARN(!sg, "SG should not be null here\n");
  463. if (sg && (walk->nents == 0)) {
  464. small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
  465. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  466. walk->sgl->len0 = cpu_to_be32(sgmin);
  467. walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
  468. walk->nents++;
  469. len -= sgmin;
  470. walk->last_sg = sg;
  471. walk->last_sg_len = sgmin + skip_len;
  472. skip_len += sgmin;
  473. if (sg_dma_len(sg) == skip_len) {
  474. sg = sg_next(sg);
  475. skip_len = 0;
  476. }
  477. }
  478. while (sg && len) {
  479. small = min(sg_dma_len(sg) - skip_len, len);
  480. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  481. walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
  482. walk->pair->addr[walk->pair_idx] =
  483. cpu_to_be64(sg_dma_address(sg) + skip_len);
  484. walk->pair_idx = !walk->pair_idx;
  485. walk->nents++;
  486. if (!walk->pair_idx)
  487. walk->pair++;
  488. len -= sgmin;
  489. skip_len += sgmin;
  490. walk->last_sg = sg;
  491. walk->last_sg_len = skip_len;
  492. if (sg_dma_len(sg) == skip_len) {
  493. sg = sg_next(sg);
  494. skip_len = 0;
  495. }
  496. }
  497. }
  498. static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  499. {
  500. struct crypto_alg *alg = tfm->__crt_alg;
  501. struct chcr_alg_template *chcr_crypto_alg =
  502. container_of(alg, struct chcr_alg_template, alg.crypto);
  503. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  504. }
  505. static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  506. {
  507. struct adapter *adap = netdev2adap(dev);
  508. struct sge_uld_txq_info *txq_info =
  509. adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
  510. struct sge_uld_txq *txq;
  511. int ret = 0;
  512. local_bh_disable();
  513. txq = &txq_info->uldtxq[idx];
  514. spin_lock(&txq->sendq.lock);
  515. if (txq->full)
  516. ret = -1;
  517. spin_unlock(&txq->sendq.lock);
  518. local_bh_enable();
  519. return ret;
  520. }
  521. static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
  522. struct _key_ctx *key_ctx)
  523. {
  524. if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
  525. memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
  526. } else {
  527. memcpy(key_ctx->key,
  528. ablkctx->key + (ablkctx->enckey_len >> 1),
  529. ablkctx->enckey_len >> 1);
  530. memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
  531. ablkctx->rrkey, ablkctx->enckey_len >> 1);
  532. }
  533. return 0;
  534. }
  535. static int chcr_hash_ent_in_wr(struct scatterlist *src,
  536. unsigned int minsg,
  537. unsigned int space,
  538. unsigned int srcskip)
  539. {
  540. int srclen = 0;
  541. int srcsg = minsg;
  542. int soffset = 0, sless;
  543. if (sg_dma_len(src) == srcskip) {
  544. src = sg_next(src);
  545. srcskip = 0;
  546. }
  547. while (src && space > (sgl_ent_len[srcsg + 1])) {
  548. sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
  549. CHCR_SRC_SG_SIZE);
  550. srclen += sless;
  551. soffset += sless;
  552. srcsg++;
  553. if (sg_dma_len(src) == (soffset + srcskip)) {
  554. src = sg_next(src);
  555. soffset = 0;
  556. srcskip = 0;
  557. }
  558. }
  559. return srclen;
  560. }
  561. static int chcr_sg_ent_in_wr(struct scatterlist *src,
  562. struct scatterlist *dst,
  563. unsigned int minsg,
  564. unsigned int space,
  565. unsigned int srcskip,
  566. unsigned int dstskip)
  567. {
  568. int srclen = 0, dstlen = 0;
  569. int srcsg = minsg, dstsg = minsg;
  570. int offset = 0, soffset = 0, less, sless = 0;
  571. if (sg_dma_len(src) == srcskip) {
  572. src = sg_next(src);
  573. srcskip = 0;
  574. }
  575. if (sg_dma_len(dst) == dstskip) {
  576. dst = sg_next(dst);
  577. dstskip = 0;
  578. }
  579. while (src && dst &&
  580. space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
  581. sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
  582. CHCR_SRC_SG_SIZE);
  583. srclen += sless;
  584. srcsg++;
  585. offset = 0;
  586. while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
  587. space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
  588. if (srclen <= dstlen)
  589. break;
  590. less = min_t(unsigned int, sg_dma_len(dst) - offset -
  591. dstskip, CHCR_DST_SG_SIZE);
  592. dstlen += less;
  593. offset += less;
  594. if ((offset + dstskip) == sg_dma_len(dst)) {
  595. dst = sg_next(dst);
  596. offset = 0;
  597. }
  598. dstsg++;
  599. dstskip = 0;
  600. }
  601. soffset += sless;
  602. if ((soffset + srcskip) == sg_dma_len(src)) {
  603. src = sg_next(src);
  604. srcskip = 0;
  605. soffset = 0;
  606. }
  607. }
  608. return min(srclen, dstlen);
  609. }
  610. static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
  611. u32 flags,
  612. struct scatterlist *src,
  613. struct scatterlist *dst,
  614. unsigned int nbytes,
  615. u8 *iv,
  616. unsigned short op_type)
  617. {
  618. int err;
  619. SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
  620. skcipher_request_set_tfm(subreq, cipher);
  621. skcipher_request_set_callback(subreq, flags, NULL, NULL);
  622. skcipher_request_set_crypt(subreq, src, dst,
  623. nbytes, iv);
  624. err = op_type ? crypto_skcipher_decrypt(subreq) :
  625. crypto_skcipher_encrypt(subreq);
  626. skcipher_request_zero(subreq);
  627. return err;
  628. }
  629. static inline void create_wreq(struct chcr_context *ctx,
  630. struct chcr_wr *chcr_req,
  631. struct crypto_async_request *req,
  632. unsigned int imm,
  633. int hash_sz,
  634. unsigned int len16,
  635. unsigned int sc_len,
  636. unsigned int lcb)
  637. {
  638. struct uld_ctx *u_ctx = ULD_CTX(ctx);
  639. int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
  640. chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
  641. chcr_req->wreq.pld_size_hash_size =
  642. htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
  643. chcr_req->wreq.len16_pkd =
  644. htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
  645. chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
  646. chcr_req->wreq.rx_chid_to_rx_q_id =
  647. FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
  648. !!lcb, ctx->tx_qidx);
  649. chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
  650. qid);
  651. chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
  652. ((sizeof(chcr_req->wreq)) >> 4)));
  653. chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
  654. chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  655. sizeof(chcr_req->key_ctx) + sc_len);
  656. }
  657. /**
  658. * create_cipher_wr - form the WR for cipher operations
  659. * @req: cipher req.
  660. * @ctx: crypto driver context of the request.
  661. * @qid: ingress qid where response of this WR should be received.
  662. * @op_type: encryption or decryption
  663. */
  664. static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
  665. {
  666. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  667. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  668. struct sk_buff *skb = NULL;
  669. struct chcr_wr *chcr_req;
  670. struct cpl_rx_phys_dsgl *phys_cpl;
  671. struct ulptx_sgl *ulptx;
  672. struct chcr_blkcipher_req_ctx *reqctx =
  673. ablkcipher_request_ctx(wrparam->req);
  674. unsigned int temp = 0, transhdr_len, dst_size;
  675. int error;
  676. int nents;
  677. unsigned int kctx_len;
  678. gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  679. GFP_KERNEL : GFP_ATOMIC;
  680. struct adapter *adap = padap(c_ctx(tfm)->dev);
  681. nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
  682. reqctx->dst_ofst);
  683. dst_size = get_space_for_phys_dsgl(nents + 1);
  684. kctx_len = roundup(ablkctx->enckey_len, 16);
  685. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  686. nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
  687. CHCR_SRC_SG_SIZE, reqctx->src_ofst);
  688. temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
  689. (sgl_len(nents + MIN_CIPHER_SG) * 8);
  690. transhdr_len += temp;
  691. transhdr_len = roundup(transhdr_len, 16);
  692. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  693. if (!skb) {
  694. error = -ENOMEM;
  695. goto err;
  696. }
  697. chcr_req = __skb_put_zero(skb, transhdr_len);
  698. chcr_req->sec_cpl.op_ivinsrtofst =
  699. FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
  700. chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
  701. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  702. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
  703. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  704. FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
  705. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
  706. ablkctx->ciph_mode,
  707. 0, 0, IV >> 1);
  708. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
  709. 0, 0, dst_size);
  710. chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
  711. if ((reqctx->op == CHCR_DECRYPT_OP) &&
  712. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  713. CRYPTO_ALG_SUB_TYPE_CTR)) &&
  714. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  715. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
  716. generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
  717. } else {
  718. if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
  719. (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
  720. memcpy(chcr_req->key_ctx.key, ablkctx->key,
  721. ablkctx->enckey_len);
  722. } else {
  723. memcpy(chcr_req->key_ctx.key, ablkctx->key +
  724. (ablkctx->enckey_len >> 1),
  725. ablkctx->enckey_len >> 1);
  726. memcpy(chcr_req->key_ctx.key +
  727. (ablkctx->enckey_len >> 1),
  728. ablkctx->key,
  729. ablkctx->enckey_len >> 1);
  730. }
  731. }
  732. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  733. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  734. chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
  735. chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
  736. atomic_inc(&adap->chcr_stats.cipher_rqst);
  737. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
  738. +(reqctx->imm ? (IV + wrparam->bytes) : 0);
  739. create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
  740. transhdr_len, temp,
  741. ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
  742. reqctx->skb = skb;
  743. if (reqctx->op && (ablkctx->ciph_mode ==
  744. CHCR_SCMD_CIPHER_MODE_AES_CBC))
  745. sg_pcopy_to_buffer(wrparam->req->src,
  746. sg_nents(wrparam->req->src), wrparam->req->info, 16,
  747. reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
  748. return skb;
  749. err:
  750. return ERR_PTR(error);
  751. }
  752. static inline int chcr_keyctx_ck_size(unsigned int keylen)
  753. {
  754. int ck_size = 0;
  755. if (keylen == AES_KEYSIZE_128)
  756. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  757. else if (keylen == AES_KEYSIZE_192)
  758. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  759. else if (keylen == AES_KEYSIZE_256)
  760. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  761. else
  762. ck_size = 0;
  763. return ck_size;
  764. }
  765. static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
  766. const u8 *key,
  767. unsigned int keylen)
  768. {
  769. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  770. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  771. int err = 0;
  772. crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  773. crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
  774. CRYPTO_TFM_REQ_MASK);
  775. err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
  776. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  777. tfm->crt_flags |=
  778. crypto_skcipher_get_flags(ablkctx->sw_cipher) &
  779. CRYPTO_TFM_RES_MASK;
  780. return err;
  781. }
  782. static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
  783. const u8 *key,
  784. unsigned int keylen)
  785. {
  786. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  787. unsigned int ck_size, context_size;
  788. u16 alignment = 0;
  789. int err;
  790. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  791. if (err)
  792. goto badkey_err;
  793. ck_size = chcr_keyctx_ck_size(keylen);
  794. alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
  795. memcpy(ablkctx->key, key, keylen);
  796. ablkctx->enckey_len = keylen;
  797. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
  798. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  799. keylen + alignment) >> 4;
  800. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  801. 0, 0, context_size);
  802. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  803. return 0;
  804. badkey_err:
  805. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  806. ablkctx->enckey_len = 0;
  807. return err;
  808. }
  809. static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
  810. const u8 *key,
  811. unsigned int keylen)
  812. {
  813. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  814. unsigned int ck_size, context_size;
  815. u16 alignment = 0;
  816. int err;
  817. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  818. if (err)
  819. goto badkey_err;
  820. ck_size = chcr_keyctx_ck_size(keylen);
  821. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  822. memcpy(ablkctx->key, key, keylen);
  823. ablkctx->enckey_len = keylen;
  824. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  825. keylen + alignment) >> 4;
  826. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  827. 0, 0, context_size);
  828. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  829. return 0;
  830. badkey_err:
  831. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  832. ablkctx->enckey_len = 0;
  833. return err;
  834. }
  835. static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
  836. const u8 *key,
  837. unsigned int keylen)
  838. {
  839. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  840. unsigned int ck_size, context_size;
  841. u16 alignment = 0;
  842. int err;
  843. if (keylen < CTR_RFC3686_NONCE_SIZE)
  844. return -EINVAL;
  845. memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  846. CTR_RFC3686_NONCE_SIZE);
  847. keylen -= CTR_RFC3686_NONCE_SIZE;
  848. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  849. if (err)
  850. goto badkey_err;
  851. ck_size = chcr_keyctx_ck_size(keylen);
  852. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  853. memcpy(ablkctx->key, key, keylen);
  854. ablkctx->enckey_len = keylen;
  855. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  856. keylen + alignment) >> 4;
  857. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  858. 0, 0, context_size);
  859. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  860. return 0;
  861. badkey_err:
  862. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  863. ablkctx->enckey_len = 0;
  864. return err;
  865. }
  866. static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
  867. {
  868. unsigned int size = AES_BLOCK_SIZE;
  869. __be32 *b = (__be32 *)(dstiv + size);
  870. u32 c, prev;
  871. memcpy(dstiv, srciv, AES_BLOCK_SIZE);
  872. for (; size >= 4; size -= 4) {
  873. prev = be32_to_cpu(*--b);
  874. c = prev + add;
  875. *b = cpu_to_be32(c);
  876. if (prev < c)
  877. break;
  878. add = 1;
  879. }
  880. }
  881. static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
  882. {
  883. __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
  884. u64 c;
  885. u32 temp = be32_to_cpu(*--b);
  886. temp = ~temp;
  887. c = (u64)temp + 1; // No of block can processed withou overflow
  888. if ((bytes / AES_BLOCK_SIZE) > c)
  889. bytes = c * AES_BLOCK_SIZE;
  890. return bytes;
  891. }
  892. static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
  893. u32 isfinal)
  894. {
  895. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  896. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  897. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  898. struct crypto_cipher *cipher;
  899. int ret, i;
  900. u8 *key;
  901. unsigned int keylen;
  902. int round = reqctx->last_req_len / AES_BLOCK_SIZE;
  903. int round8 = round / 8;
  904. cipher = ablkctx->aes_generic;
  905. memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
  906. keylen = ablkctx->enckey_len / 2;
  907. key = ablkctx->key + keylen;
  908. ret = crypto_cipher_setkey(cipher, key, keylen);
  909. if (ret)
  910. goto out;
  911. /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
  912. for (i = 0; i < round8; i++)
  913. gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
  914. for (i = 0; i < (round % 8); i++)
  915. gf128mul_x_ble((le128 *)iv, (le128 *)iv);
  916. if (!isfinal)
  917. crypto_cipher_decrypt_one(cipher, iv, iv);
  918. out:
  919. return ret;
  920. }
  921. static int chcr_update_cipher_iv(struct ablkcipher_request *req,
  922. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  923. {
  924. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  925. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  926. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  927. int ret = 0;
  928. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  929. ctr_add_iv(iv, req->info, (reqctx->processed /
  930. AES_BLOCK_SIZE));
  931. else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
  932. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  933. CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
  934. AES_BLOCK_SIZE) + 1);
  935. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  936. ret = chcr_update_tweak(req, iv, 0);
  937. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  938. if (reqctx->op)
  939. /*Updated before sending last WR*/
  940. memcpy(iv, req->info, AES_BLOCK_SIZE);
  941. else
  942. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  943. }
  944. return ret;
  945. }
  946. /* We need separate function for final iv because in rfc3686 Initial counter
  947. * starts from 1 and buffer size of iv is 8 byte only which remains constant
  948. * for subsequent update requests
  949. */
  950. static int chcr_final_cipher_iv(struct ablkcipher_request *req,
  951. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  952. {
  953. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  954. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  955. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  956. int ret = 0;
  957. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  958. ctr_add_iv(iv, req->info, (reqctx->processed /
  959. AES_BLOCK_SIZE));
  960. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  961. ret = chcr_update_tweak(req, iv, 1);
  962. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  963. /*Already updated for Decrypt*/
  964. if (!reqctx->op)
  965. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  966. }
  967. return ret;
  968. }
  969. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  970. unsigned char *input, int err)
  971. {
  972. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  973. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  974. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  975. struct sk_buff *skb;
  976. struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
  977. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  978. struct cipher_wr_param wrparam;
  979. int bytes;
  980. if (err)
  981. goto unmap;
  982. if (req->nbytes == reqctx->processed) {
  983. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  984. req);
  985. err = chcr_final_cipher_iv(req, fw6_pld, req->info);
  986. goto complete;
  987. }
  988. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  989. c_ctx(tfm)->tx_qidx))) {
  990. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  991. err = -EBUSY;
  992. goto unmap;
  993. }
  994. }
  995. if (!reqctx->imm) {
  996. bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
  997. CIP_SPACE_LEFT(ablkctx->enckey_len),
  998. reqctx->src_ofst, reqctx->dst_ofst);
  999. if ((bytes + reqctx->processed) >= req->nbytes)
  1000. bytes = req->nbytes - reqctx->processed;
  1001. else
  1002. bytes = rounddown(bytes, 16);
  1003. } else {
  1004. /*CTR mode counter overfloa*/
  1005. bytes = req->nbytes - reqctx->processed;
  1006. }
  1007. dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1008. reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  1009. err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
  1010. dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1011. reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  1012. if (err)
  1013. goto unmap;
  1014. if (unlikely(bytes == 0)) {
  1015. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1016. req);
  1017. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1018. req->base.flags,
  1019. req->src,
  1020. req->dst,
  1021. req->nbytes,
  1022. req->info,
  1023. reqctx->op);
  1024. goto complete;
  1025. }
  1026. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1027. CRYPTO_ALG_SUB_TYPE_CTR)
  1028. bytes = adjust_ctr_overflow(reqctx->iv, bytes);
  1029. wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
  1030. wrparam.req = req;
  1031. wrparam.bytes = bytes;
  1032. skb = create_cipher_wr(&wrparam);
  1033. if (IS_ERR(skb)) {
  1034. pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
  1035. err = PTR_ERR(skb);
  1036. goto unmap;
  1037. }
  1038. skb->dev = u_ctx->lldi.ports[0];
  1039. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1040. chcr_send_wr(skb);
  1041. reqctx->last_req_len = bytes;
  1042. reqctx->processed += bytes;
  1043. return 0;
  1044. unmap:
  1045. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1046. complete:
  1047. req->base.complete(&req->base, err);
  1048. return err;
  1049. }
  1050. static int process_cipher(struct ablkcipher_request *req,
  1051. unsigned short qid,
  1052. struct sk_buff **skb,
  1053. unsigned short op_type)
  1054. {
  1055. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1056. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  1057. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  1058. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  1059. struct cipher_wr_param wrparam;
  1060. int bytes, err = -EINVAL;
  1061. reqctx->processed = 0;
  1062. if (!req->info)
  1063. goto error;
  1064. if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
  1065. (req->nbytes == 0) ||
  1066. (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
  1067. pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
  1068. ablkctx->enckey_len, req->nbytes, ivsize);
  1069. goto error;
  1070. }
  1071. chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1072. if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
  1073. AES_MIN_KEY_SIZE +
  1074. sizeof(struct cpl_rx_phys_dsgl) +
  1075. /*Min dsgl size*/
  1076. 32))) {
  1077. /* Can be sent as Imm*/
  1078. unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
  1079. dnents = sg_nents_xlen(req->dst, req->nbytes,
  1080. CHCR_DST_SG_SIZE, 0);
  1081. dnents += 1; // IV
  1082. phys_dsgl = get_space_for_phys_dsgl(dnents);
  1083. kctx_len = roundup(ablkctx->enckey_len, 16);
  1084. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
  1085. reqctx->imm = (transhdr_len + IV + req->nbytes) <=
  1086. SGE_MAX_WR_LEN;
  1087. bytes = IV + req->nbytes;
  1088. } else {
  1089. reqctx->imm = 0;
  1090. }
  1091. if (!reqctx->imm) {
  1092. bytes = chcr_sg_ent_in_wr(req->src, req->dst,
  1093. MIN_CIPHER_SG,
  1094. CIP_SPACE_LEFT(ablkctx->enckey_len),
  1095. 0, 0);
  1096. if ((bytes + reqctx->processed) >= req->nbytes)
  1097. bytes = req->nbytes - reqctx->processed;
  1098. else
  1099. bytes = rounddown(bytes, 16);
  1100. } else {
  1101. bytes = req->nbytes;
  1102. }
  1103. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1104. CRYPTO_ALG_SUB_TYPE_CTR) {
  1105. bytes = adjust_ctr_overflow(req->info, bytes);
  1106. }
  1107. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1108. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
  1109. memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
  1110. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
  1111. CTR_RFC3686_IV_SIZE);
  1112. /* initialize counter portion of counter block */
  1113. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  1114. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1115. } else {
  1116. memcpy(reqctx->iv, req->info, IV);
  1117. }
  1118. if (unlikely(bytes == 0)) {
  1119. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1120. req);
  1121. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1122. req->base.flags,
  1123. req->src,
  1124. req->dst,
  1125. req->nbytes,
  1126. reqctx->iv,
  1127. op_type);
  1128. goto error;
  1129. }
  1130. reqctx->op = op_type;
  1131. reqctx->srcsg = req->src;
  1132. reqctx->dstsg = req->dst;
  1133. reqctx->src_ofst = 0;
  1134. reqctx->dst_ofst = 0;
  1135. wrparam.qid = qid;
  1136. wrparam.req = req;
  1137. wrparam.bytes = bytes;
  1138. *skb = create_cipher_wr(&wrparam);
  1139. if (IS_ERR(*skb)) {
  1140. err = PTR_ERR(*skb);
  1141. goto unmap;
  1142. }
  1143. reqctx->processed = bytes;
  1144. reqctx->last_req_len = bytes;
  1145. return 0;
  1146. unmap:
  1147. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1148. error:
  1149. return err;
  1150. }
  1151. static int chcr_aes_encrypt(struct ablkcipher_request *req)
  1152. {
  1153. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1154. struct sk_buff *skb = NULL;
  1155. int err;
  1156. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1157. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1158. c_ctx(tfm)->tx_qidx))) {
  1159. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1160. return -EBUSY;
  1161. }
  1162. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1163. &skb, CHCR_ENCRYPT_OP);
  1164. if (err || !skb)
  1165. return err;
  1166. skb->dev = u_ctx->lldi.ports[0];
  1167. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1168. chcr_send_wr(skb);
  1169. return -EINPROGRESS;
  1170. }
  1171. static int chcr_aes_decrypt(struct ablkcipher_request *req)
  1172. {
  1173. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1174. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1175. struct sk_buff *skb = NULL;
  1176. int err;
  1177. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1178. c_ctx(tfm)->tx_qidx))) {
  1179. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1180. return -EBUSY;
  1181. }
  1182. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1183. &skb, CHCR_DECRYPT_OP);
  1184. if (err || !skb)
  1185. return err;
  1186. skb->dev = u_ctx->lldi.ports[0];
  1187. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1188. chcr_send_wr(skb);
  1189. return -EINPROGRESS;
  1190. }
  1191. static int chcr_device_init(struct chcr_context *ctx)
  1192. {
  1193. struct uld_ctx *u_ctx = NULL;
  1194. struct adapter *adap;
  1195. unsigned int id;
  1196. int txq_perchan, txq_idx, ntxq;
  1197. int err = 0, rxq_perchan, rxq_idx;
  1198. id = smp_processor_id();
  1199. if (!ctx->dev) {
  1200. u_ctx = assign_chcr_device();
  1201. if (!u_ctx) {
  1202. pr_err("chcr device assignment fails\n");
  1203. goto out;
  1204. }
  1205. ctx->dev = u_ctx->dev;
  1206. adap = padap(ctx->dev);
  1207. ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
  1208. adap->vres.ncrypto_fc);
  1209. rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
  1210. txq_perchan = ntxq / u_ctx->lldi.nchan;
  1211. rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
  1212. rxq_idx += id % rxq_perchan;
  1213. txq_idx = ctx->dev->tx_channel_id * txq_perchan;
  1214. txq_idx += id % txq_perchan;
  1215. spin_lock(&ctx->dev->lock_chcr_dev);
  1216. ctx->rx_qidx = rxq_idx;
  1217. ctx->tx_qidx = txq_idx;
  1218. ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
  1219. ctx->dev->rx_channel_id = 0;
  1220. spin_unlock(&ctx->dev->lock_chcr_dev);
  1221. }
  1222. out:
  1223. return err;
  1224. }
  1225. static int chcr_cra_init(struct crypto_tfm *tfm)
  1226. {
  1227. struct crypto_alg *alg = tfm->__crt_alg;
  1228. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1229. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1230. ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
  1231. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1232. if (IS_ERR(ablkctx->sw_cipher)) {
  1233. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1234. return PTR_ERR(ablkctx->sw_cipher);
  1235. }
  1236. if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
  1237. /* To update tweak*/
  1238. ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
  1239. if (IS_ERR(ablkctx->aes_generic)) {
  1240. pr_err("failed to allocate aes cipher for tweak\n");
  1241. return PTR_ERR(ablkctx->aes_generic);
  1242. }
  1243. } else
  1244. ablkctx->aes_generic = NULL;
  1245. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1246. return chcr_device_init(crypto_tfm_ctx(tfm));
  1247. }
  1248. static int chcr_rfc3686_init(struct crypto_tfm *tfm)
  1249. {
  1250. struct crypto_alg *alg = tfm->__crt_alg;
  1251. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1252. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1253. /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
  1254. * cannot be used as fallback in chcr_handle_cipher_response
  1255. */
  1256. ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
  1257. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1258. if (IS_ERR(ablkctx->sw_cipher)) {
  1259. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1260. return PTR_ERR(ablkctx->sw_cipher);
  1261. }
  1262. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1263. return chcr_device_init(crypto_tfm_ctx(tfm));
  1264. }
  1265. static void chcr_cra_exit(struct crypto_tfm *tfm)
  1266. {
  1267. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1268. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1269. crypto_free_skcipher(ablkctx->sw_cipher);
  1270. if (ablkctx->aes_generic)
  1271. crypto_free_cipher(ablkctx->aes_generic);
  1272. }
  1273. static int get_alg_config(struct algo_param *params,
  1274. unsigned int auth_size)
  1275. {
  1276. switch (auth_size) {
  1277. case SHA1_DIGEST_SIZE:
  1278. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
  1279. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
  1280. params->result_size = SHA1_DIGEST_SIZE;
  1281. break;
  1282. case SHA224_DIGEST_SIZE:
  1283. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1284. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
  1285. params->result_size = SHA256_DIGEST_SIZE;
  1286. break;
  1287. case SHA256_DIGEST_SIZE:
  1288. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1289. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
  1290. params->result_size = SHA256_DIGEST_SIZE;
  1291. break;
  1292. case SHA384_DIGEST_SIZE:
  1293. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1294. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
  1295. params->result_size = SHA512_DIGEST_SIZE;
  1296. break;
  1297. case SHA512_DIGEST_SIZE:
  1298. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1299. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
  1300. params->result_size = SHA512_DIGEST_SIZE;
  1301. break;
  1302. default:
  1303. pr_err("chcr : ERROR, unsupported digest size\n");
  1304. return -EINVAL;
  1305. }
  1306. return 0;
  1307. }
  1308. static inline void chcr_free_shash(struct crypto_shash *base_hash)
  1309. {
  1310. crypto_free_shash(base_hash);
  1311. }
  1312. /**
  1313. * create_hash_wr - Create hash work request
  1314. * @req - Cipher req base
  1315. */
  1316. static struct sk_buff *create_hash_wr(struct ahash_request *req,
  1317. struct hash_wr_param *param)
  1318. {
  1319. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1320. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1321. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1322. struct sk_buff *skb = NULL;
  1323. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1324. struct chcr_wr *chcr_req;
  1325. struct ulptx_sgl *ulptx;
  1326. unsigned int nents = 0, transhdr_len;
  1327. unsigned int temp = 0;
  1328. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1329. GFP_ATOMIC;
  1330. struct adapter *adap = padap(h_ctx(tfm)->dev);
  1331. int error = 0;
  1332. transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
  1333. req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
  1334. param->sg_len) <= SGE_MAX_WR_LEN;
  1335. nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
  1336. CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
  1337. nents += param->bfr_len ? 1 : 0;
  1338. transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
  1339. param->sg_len, 16) : (sgl_len(nents) * 8);
  1340. transhdr_len = roundup(transhdr_len, 16);
  1341. skb = alloc_skb(transhdr_len, flags);
  1342. if (!skb)
  1343. return ERR_PTR(-ENOMEM);
  1344. chcr_req = __skb_put_zero(skb, transhdr_len);
  1345. chcr_req->sec_cpl.op_ivinsrtofst =
  1346. FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
  1347. chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  1348. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  1349. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
  1350. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  1351. FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
  1352. chcr_req->sec_cpl.seqno_numivs =
  1353. FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
  1354. param->opad_needed, 0);
  1355. chcr_req->sec_cpl.ivgen_hdrlen =
  1356. FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  1357. memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
  1358. param->alg_prm.result_size);
  1359. if (param->opad_needed)
  1360. memcpy(chcr_req->key_ctx.key +
  1361. ((param->alg_prm.result_size <= 32) ? 32 :
  1362. CHCR_HASH_MAX_DIGEST_SIZE),
  1363. hmacctx->opad, param->alg_prm.result_size);
  1364. chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
  1365. param->alg_prm.mk_size, 0,
  1366. param->opad_needed,
  1367. ((param->kctx_len +
  1368. sizeof(chcr_req->key_ctx)) >> 4));
  1369. chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  1370. ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
  1371. DUMMY_BYTES);
  1372. if (param->bfr_len != 0) {
  1373. req_ctx->hctx_wr.dma_addr =
  1374. dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
  1375. param->bfr_len, DMA_TO_DEVICE);
  1376. if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
  1377. req_ctx->hctx_wr. dma_addr)) {
  1378. error = -ENOMEM;
  1379. goto err;
  1380. }
  1381. req_ctx->hctx_wr.dma_len = param->bfr_len;
  1382. } else {
  1383. req_ctx->hctx_wr.dma_addr = 0;
  1384. }
  1385. chcr_add_hash_src_ent(req, ulptx, param);
  1386. /* Request upto max wr size */
  1387. temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
  1388. (param->sg_len + param->bfr_len) : 0);
  1389. atomic_inc(&adap->chcr_stats.digest_rqst);
  1390. create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
  1391. param->hash_size, transhdr_len,
  1392. temp, 0);
  1393. req_ctx->hctx_wr.skb = skb;
  1394. return skb;
  1395. err:
  1396. kfree_skb(skb);
  1397. return ERR_PTR(error);
  1398. }
  1399. static int chcr_ahash_update(struct ahash_request *req)
  1400. {
  1401. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1402. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1403. struct uld_ctx *u_ctx = NULL;
  1404. struct sk_buff *skb;
  1405. u8 remainder = 0, bs;
  1406. unsigned int nbytes = req->nbytes;
  1407. struct hash_wr_param params;
  1408. int error;
  1409. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1410. u_ctx = ULD_CTX(h_ctx(rtfm));
  1411. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1412. h_ctx(rtfm)->tx_qidx))) {
  1413. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1414. return -EBUSY;
  1415. }
  1416. if (nbytes + req_ctx->reqlen >= bs) {
  1417. remainder = (nbytes + req_ctx->reqlen) % bs;
  1418. nbytes = nbytes + req_ctx->reqlen - remainder;
  1419. } else {
  1420. sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
  1421. + req_ctx->reqlen, nbytes, 0);
  1422. req_ctx->reqlen += nbytes;
  1423. return 0;
  1424. }
  1425. chcr_init_hctx_per_wr(req_ctx);
  1426. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1427. if (error)
  1428. return -ENOMEM;
  1429. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1430. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1431. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1432. HASH_SPACE_LEFT(params.kctx_len), 0);
  1433. if (params.sg_len > req->nbytes)
  1434. params.sg_len = req->nbytes;
  1435. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
  1436. req_ctx->reqlen;
  1437. params.opad_needed = 0;
  1438. params.more = 1;
  1439. params.last = 0;
  1440. params.bfr_len = req_ctx->reqlen;
  1441. params.scmd1 = 0;
  1442. req_ctx->hctx_wr.srcsg = req->src;
  1443. params.hash_size = params.alg_prm.result_size;
  1444. req_ctx->data_len += params.sg_len + params.bfr_len;
  1445. skb = create_hash_wr(req, &params);
  1446. if (IS_ERR(skb)) {
  1447. error = PTR_ERR(skb);
  1448. goto unmap;
  1449. }
  1450. req_ctx->hctx_wr.processed += params.sg_len;
  1451. if (remainder) {
  1452. /* Swap buffers */
  1453. swap(req_ctx->reqbfr, req_ctx->skbfr);
  1454. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  1455. req_ctx->reqbfr, remainder, req->nbytes -
  1456. remainder);
  1457. }
  1458. req_ctx->reqlen = remainder;
  1459. skb->dev = u_ctx->lldi.ports[0];
  1460. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1461. chcr_send_wr(skb);
  1462. return -EINPROGRESS;
  1463. unmap:
  1464. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1465. return error;
  1466. }
  1467. static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
  1468. {
  1469. memset(bfr_ptr, 0, bs);
  1470. *bfr_ptr = 0x80;
  1471. if (bs == 64)
  1472. *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
  1473. else
  1474. *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
  1475. }
  1476. static int chcr_ahash_final(struct ahash_request *req)
  1477. {
  1478. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1479. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1480. struct hash_wr_param params;
  1481. struct sk_buff *skb;
  1482. struct uld_ctx *u_ctx = NULL;
  1483. u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1484. chcr_init_hctx_per_wr(req_ctx);
  1485. u_ctx = ULD_CTX(h_ctx(rtfm));
  1486. if (is_hmac(crypto_ahash_tfm(rtfm)))
  1487. params.opad_needed = 1;
  1488. else
  1489. params.opad_needed = 0;
  1490. params.sg_len = 0;
  1491. req_ctx->hctx_wr.isfinal = 1;
  1492. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1493. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1494. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1495. params.opad_needed = 1;
  1496. params.kctx_len *= 2;
  1497. } else {
  1498. params.opad_needed = 0;
  1499. }
  1500. req_ctx->hctx_wr.result = 1;
  1501. params.bfr_len = req_ctx->reqlen;
  1502. req_ctx->data_len += params.bfr_len + params.sg_len;
  1503. req_ctx->hctx_wr.srcsg = req->src;
  1504. if (req_ctx->reqlen == 0) {
  1505. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1506. params.last = 0;
  1507. params.more = 1;
  1508. params.scmd1 = 0;
  1509. params.bfr_len = bs;
  1510. } else {
  1511. params.scmd1 = req_ctx->data_len;
  1512. params.last = 1;
  1513. params.more = 0;
  1514. }
  1515. params.hash_size = crypto_ahash_digestsize(rtfm);
  1516. skb = create_hash_wr(req, &params);
  1517. if (IS_ERR(skb))
  1518. return PTR_ERR(skb);
  1519. req_ctx->reqlen = 0;
  1520. skb->dev = u_ctx->lldi.ports[0];
  1521. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1522. chcr_send_wr(skb);
  1523. return -EINPROGRESS;
  1524. }
  1525. static int chcr_ahash_finup(struct ahash_request *req)
  1526. {
  1527. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1528. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1529. struct uld_ctx *u_ctx = NULL;
  1530. struct sk_buff *skb;
  1531. struct hash_wr_param params;
  1532. u8 bs;
  1533. int error;
  1534. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1535. u_ctx = ULD_CTX(h_ctx(rtfm));
  1536. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1537. h_ctx(rtfm)->tx_qidx))) {
  1538. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1539. return -EBUSY;
  1540. }
  1541. chcr_init_hctx_per_wr(req_ctx);
  1542. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1543. if (error)
  1544. return -ENOMEM;
  1545. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1546. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1547. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1548. params.kctx_len *= 2;
  1549. params.opad_needed = 1;
  1550. } else {
  1551. params.opad_needed = 0;
  1552. }
  1553. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1554. HASH_SPACE_LEFT(params.kctx_len), 0);
  1555. if (params.sg_len < req->nbytes) {
  1556. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1557. params.kctx_len /= 2;
  1558. params.opad_needed = 0;
  1559. }
  1560. params.last = 0;
  1561. params.more = 1;
  1562. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
  1563. - req_ctx->reqlen;
  1564. params.hash_size = params.alg_prm.result_size;
  1565. params.scmd1 = 0;
  1566. } else {
  1567. params.last = 1;
  1568. params.more = 0;
  1569. params.sg_len = req->nbytes;
  1570. params.hash_size = crypto_ahash_digestsize(rtfm);
  1571. params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
  1572. params.sg_len;
  1573. }
  1574. params.bfr_len = req_ctx->reqlen;
  1575. req_ctx->data_len += params.bfr_len + params.sg_len;
  1576. req_ctx->hctx_wr.result = 1;
  1577. req_ctx->hctx_wr.srcsg = req->src;
  1578. if ((req_ctx->reqlen + req->nbytes) == 0) {
  1579. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1580. params.last = 0;
  1581. params.more = 1;
  1582. params.scmd1 = 0;
  1583. params.bfr_len = bs;
  1584. }
  1585. skb = create_hash_wr(req, &params);
  1586. if (IS_ERR(skb)) {
  1587. error = PTR_ERR(skb);
  1588. goto unmap;
  1589. }
  1590. req_ctx->reqlen = 0;
  1591. req_ctx->hctx_wr.processed += params.sg_len;
  1592. skb->dev = u_ctx->lldi.ports[0];
  1593. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1594. chcr_send_wr(skb);
  1595. return -EINPROGRESS;
  1596. unmap:
  1597. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1598. return error;
  1599. }
  1600. static int chcr_ahash_digest(struct ahash_request *req)
  1601. {
  1602. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1603. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1604. struct uld_ctx *u_ctx = NULL;
  1605. struct sk_buff *skb;
  1606. struct hash_wr_param params;
  1607. u8 bs;
  1608. int error;
  1609. rtfm->init(req);
  1610. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1611. u_ctx = ULD_CTX(h_ctx(rtfm));
  1612. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1613. h_ctx(rtfm)->tx_qidx))) {
  1614. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1615. return -EBUSY;
  1616. }
  1617. chcr_init_hctx_per_wr(req_ctx);
  1618. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1619. if (error)
  1620. return -ENOMEM;
  1621. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1622. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1623. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1624. params.kctx_len *= 2;
  1625. params.opad_needed = 1;
  1626. } else {
  1627. params.opad_needed = 0;
  1628. }
  1629. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1630. HASH_SPACE_LEFT(params.kctx_len), 0);
  1631. if (params.sg_len < req->nbytes) {
  1632. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1633. params.kctx_len /= 2;
  1634. params.opad_needed = 0;
  1635. }
  1636. params.last = 0;
  1637. params.more = 1;
  1638. params.scmd1 = 0;
  1639. params.sg_len = rounddown(params.sg_len, bs);
  1640. params.hash_size = params.alg_prm.result_size;
  1641. } else {
  1642. params.sg_len = req->nbytes;
  1643. params.hash_size = crypto_ahash_digestsize(rtfm);
  1644. params.last = 1;
  1645. params.more = 0;
  1646. params.scmd1 = req->nbytes + req_ctx->data_len;
  1647. }
  1648. params.bfr_len = 0;
  1649. req_ctx->hctx_wr.result = 1;
  1650. req_ctx->hctx_wr.srcsg = req->src;
  1651. req_ctx->data_len += params.bfr_len + params.sg_len;
  1652. if (req->nbytes == 0) {
  1653. create_last_hash_block(req_ctx->reqbfr, bs, 0);
  1654. params.more = 1;
  1655. params.bfr_len = bs;
  1656. }
  1657. skb = create_hash_wr(req, &params);
  1658. if (IS_ERR(skb)) {
  1659. error = PTR_ERR(skb);
  1660. goto unmap;
  1661. }
  1662. req_ctx->hctx_wr.processed += params.sg_len;
  1663. skb->dev = u_ctx->lldi.ports[0];
  1664. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1665. chcr_send_wr(skb);
  1666. return -EINPROGRESS;
  1667. unmap:
  1668. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1669. return error;
  1670. }
  1671. static int chcr_ahash_continue(struct ahash_request *req)
  1672. {
  1673. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1674. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1675. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1676. struct uld_ctx *u_ctx = NULL;
  1677. struct sk_buff *skb;
  1678. struct hash_wr_param params;
  1679. u8 bs;
  1680. int error;
  1681. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1682. u_ctx = ULD_CTX(h_ctx(rtfm));
  1683. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1684. h_ctx(rtfm)->tx_qidx))) {
  1685. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1686. return -EBUSY;
  1687. }
  1688. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1689. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1690. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1691. params.kctx_len *= 2;
  1692. params.opad_needed = 1;
  1693. } else {
  1694. params.opad_needed = 0;
  1695. }
  1696. params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
  1697. HASH_SPACE_LEFT(params.kctx_len),
  1698. hctx_wr->src_ofst);
  1699. if ((params.sg_len + hctx_wr->processed) > req->nbytes)
  1700. params.sg_len = req->nbytes - hctx_wr->processed;
  1701. if (!hctx_wr->result ||
  1702. ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
  1703. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1704. params.kctx_len /= 2;
  1705. params.opad_needed = 0;
  1706. }
  1707. params.last = 0;
  1708. params.more = 1;
  1709. params.sg_len = rounddown(params.sg_len, bs);
  1710. params.hash_size = params.alg_prm.result_size;
  1711. params.scmd1 = 0;
  1712. } else {
  1713. params.last = 1;
  1714. params.more = 0;
  1715. params.hash_size = crypto_ahash_digestsize(rtfm);
  1716. params.scmd1 = reqctx->data_len + params.sg_len;
  1717. }
  1718. params.bfr_len = 0;
  1719. reqctx->data_len += params.sg_len;
  1720. skb = create_hash_wr(req, &params);
  1721. if (IS_ERR(skb)) {
  1722. error = PTR_ERR(skb);
  1723. goto err;
  1724. }
  1725. hctx_wr->processed += params.sg_len;
  1726. skb->dev = u_ctx->lldi.ports[0];
  1727. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1728. chcr_send_wr(skb);
  1729. return 0;
  1730. err:
  1731. return error;
  1732. }
  1733. static inline void chcr_handle_ahash_resp(struct ahash_request *req,
  1734. unsigned char *input,
  1735. int err)
  1736. {
  1737. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1738. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1739. int digestsize, updated_digestsize;
  1740. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1741. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1742. if (input == NULL)
  1743. goto out;
  1744. digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  1745. updated_digestsize = digestsize;
  1746. if (digestsize == SHA224_DIGEST_SIZE)
  1747. updated_digestsize = SHA256_DIGEST_SIZE;
  1748. else if (digestsize == SHA384_DIGEST_SIZE)
  1749. updated_digestsize = SHA512_DIGEST_SIZE;
  1750. if (hctx_wr->dma_addr) {
  1751. dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
  1752. hctx_wr->dma_len, DMA_TO_DEVICE);
  1753. hctx_wr->dma_addr = 0;
  1754. }
  1755. if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
  1756. req->nbytes)) {
  1757. if (hctx_wr->result == 1) {
  1758. hctx_wr->result = 0;
  1759. memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
  1760. digestsize);
  1761. } else {
  1762. memcpy(reqctx->partial_hash,
  1763. input + sizeof(struct cpl_fw6_pld),
  1764. updated_digestsize);
  1765. }
  1766. goto unmap;
  1767. }
  1768. memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
  1769. updated_digestsize);
  1770. err = chcr_ahash_continue(req);
  1771. if (err)
  1772. goto unmap;
  1773. return;
  1774. unmap:
  1775. if (hctx_wr->is_sg_map)
  1776. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1777. out:
  1778. req->base.complete(&req->base, err);
  1779. }
  1780. /*
  1781. * chcr_handle_resp - Unmap the DMA buffers associated with the request
  1782. * @req: crypto request
  1783. */
  1784. int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
  1785. int err)
  1786. {
  1787. struct crypto_tfm *tfm = req->tfm;
  1788. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1789. struct adapter *adap = padap(ctx->dev);
  1790. switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  1791. case CRYPTO_ALG_TYPE_AEAD:
  1792. chcr_handle_aead_resp(aead_request_cast(req), input, err);
  1793. break;
  1794. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1795. err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
  1796. input, err);
  1797. break;
  1798. case CRYPTO_ALG_TYPE_AHASH:
  1799. chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
  1800. }
  1801. atomic_inc(&adap->chcr_stats.complete);
  1802. return err;
  1803. }
  1804. static int chcr_ahash_export(struct ahash_request *areq, void *out)
  1805. {
  1806. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1807. struct chcr_ahash_req_ctx *state = out;
  1808. state->reqlen = req_ctx->reqlen;
  1809. state->data_len = req_ctx->data_len;
  1810. memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
  1811. memcpy(state->partial_hash, req_ctx->partial_hash,
  1812. CHCR_HASH_MAX_DIGEST_SIZE);
  1813. chcr_init_hctx_per_wr(state);
  1814. return 0;
  1815. }
  1816. static int chcr_ahash_import(struct ahash_request *areq, const void *in)
  1817. {
  1818. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1819. struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  1820. req_ctx->reqlen = state->reqlen;
  1821. req_ctx->data_len = state->data_len;
  1822. req_ctx->reqbfr = req_ctx->bfr1;
  1823. req_ctx->skbfr = req_ctx->bfr2;
  1824. memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
  1825. memcpy(req_ctx->partial_hash, state->partial_hash,
  1826. CHCR_HASH_MAX_DIGEST_SIZE);
  1827. chcr_init_hctx_per_wr(req_ctx);
  1828. return 0;
  1829. }
  1830. static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1831. unsigned int keylen)
  1832. {
  1833. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1834. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1835. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1836. unsigned int i, err = 0, updated_digestsize;
  1837. SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
  1838. /* use the key to calculate the ipad and opad. ipad will sent with the
  1839. * first request's data. opad will be sent with the final hash result
  1840. * ipad in hmacctx->ipad and opad in hmacctx->opad location
  1841. */
  1842. shash->tfm = hmacctx->base_hash;
  1843. shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
  1844. if (keylen > bs) {
  1845. err = crypto_shash_digest(shash, key, keylen,
  1846. hmacctx->ipad);
  1847. if (err)
  1848. goto out;
  1849. keylen = digestsize;
  1850. } else {
  1851. memcpy(hmacctx->ipad, key, keylen);
  1852. }
  1853. memset(hmacctx->ipad + keylen, 0, bs - keylen);
  1854. memcpy(hmacctx->opad, hmacctx->ipad, bs);
  1855. for (i = 0; i < bs / sizeof(int); i++) {
  1856. *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
  1857. *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
  1858. }
  1859. updated_digestsize = digestsize;
  1860. if (digestsize == SHA224_DIGEST_SIZE)
  1861. updated_digestsize = SHA256_DIGEST_SIZE;
  1862. else if (digestsize == SHA384_DIGEST_SIZE)
  1863. updated_digestsize = SHA512_DIGEST_SIZE;
  1864. err = chcr_compute_partial_hash(shash, hmacctx->ipad,
  1865. hmacctx->ipad, digestsize);
  1866. if (err)
  1867. goto out;
  1868. chcr_change_order(hmacctx->ipad, updated_digestsize);
  1869. err = chcr_compute_partial_hash(shash, hmacctx->opad,
  1870. hmacctx->opad, digestsize);
  1871. if (err)
  1872. goto out;
  1873. chcr_change_order(hmacctx->opad, updated_digestsize);
  1874. out:
  1875. return err;
  1876. }
  1877. static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1878. unsigned int key_len)
  1879. {
  1880. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  1881. unsigned short context_size = 0;
  1882. int err;
  1883. err = chcr_cipher_fallback_setkey(cipher, key, key_len);
  1884. if (err)
  1885. goto badkey_err;
  1886. memcpy(ablkctx->key, key, key_len);
  1887. ablkctx->enckey_len = key_len;
  1888. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
  1889. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
  1890. ablkctx->key_ctx_hdr =
  1891. FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
  1892. CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
  1893. CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
  1894. CHCR_KEYCTX_NO_KEY, 1,
  1895. 0, context_size);
  1896. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
  1897. return 0;
  1898. badkey_err:
  1899. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1900. ablkctx->enckey_len = 0;
  1901. return err;
  1902. }
  1903. static int chcr_sha_init(struct ahash_request *areq)
  1904. {
  1905. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1906. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1907. int digestsize = crypto_ahash_digestsize(tfm);
  1908. req_ctx->data_len = 0;
  1909. req_ctx->reqlen = 0;
  1910. req_ctx->reqbfr = req_ctx->bfr1;
  1911. req_ctx->skbfr = req_ctx->bfr2;
  1912. copy_hash_init_values(req_ctx->partial_hash, digestsize);
  1913. return 0;
  1914. }
  1915. static int chcr_sha_cra_init(struct crypto_tfm *tfm)
  1916. {
  1917. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1918. sizeof(struct chcr_ahash_req_ctx));
  1919. return chcr_device_init(crypto_tfm_ctx(tfm));
  1920. }
  1921. static int chcr_hmac_init(struct ahash_request *areq)
  1922. {
  1923. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1924. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
  1925. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
  1926. unsigned int digestsize = crypto_ahash_digestsize(rtfm);
  1927. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1928. chcr_sha_init(areq);
  1929. req_ctx->data_len = bs;
  1930. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1931. if (digestsize == SHA224_DIGEST_SIZE)
  1932. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1933. SHA256_DIGEST_SIZE);
  1934. else if (digestsize == SHA384_DIGEST_SIZE)
  1935. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1936. SHA512_DIGEST_SIZE);
  1937. else
  1938. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1939. digestsize);
  1940. }
  1941. return 0;
  1942. }
  1943. static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
  1944. {
  1945. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1946. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1947. unsigned int digestsize =
  1948. crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
  1949. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1950. sizeof(struct chcr_ahash_req_ctx));
  1951. hmacctx->base_hash = chcr_alloc_shash(digestsize);
  1952. if (IS_ERR(hmacctx->base_hash))
  1953. return PTR_ERR(hmacctx->base_hash);
  1954. return chcr_device_init(crypto_tfm_ctx(tfm));
  1955. }
  1956. static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  1957. {
  1958. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1959. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1960. if (hmacctx->base_hash) {
  1961. chcr_free_shash(hmacctx->base_hash);
  1962. hmacctx->base_hash = NULL;
  1963. }
  1964. }
  1965. static int chcr_aead_common_init(struct aead_request *req,
  1966. unsigned short op_type)
  1967. {
  1968. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1969. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  1970. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1971. int error = -EINVAL;
  1972. unsigned int authsize = crypto_aead_authsize(tfm);
  1973. /* validate key size */
  1974. if (aeadctx->enckey_len == 0)
  1975. goto err;
  1976. if (op_type && req->cryptlen < authsize)
  1977. goto err;
  1978. error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  1979. op_type);
  1980. if (error) {
  1981. error = -ENOMEM;
  1982. goto err;
  1983. }
  1984. reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
  1985. CHCR_SRC_SG_SIZE, 0);
  1986. reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
  1987. CHCR_SRC_SG_SIZE, req->assoclen);
  1988. return 0;
  1989. err:
  1990. return error;
  1991. }
  1992. static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
  1993. int aadmax, int wrlen,
  1994. unsigned short op_type)
  1995. {
  1996. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  1997. if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
  1998. dst_nents > MAX_DSGL_ENT ||
  1999. (req->assoclen > aadmax) ||
  2000. (wrlen > SGE_MAX_WR_LEN))
  2001. return 1;
  2002. return 0;
  2003. }
  2004. static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
  2005. {
  2006. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2007. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2008. struct aead_request *subreq = aead_request_ctx(req);
  2009. aead_request_set_tfm(subreq, aeadctx->sw_cipher);
  2010. aead_request_set_callback(subreq, req->base.flags,
  2011. req->base.complete, req->base.data);
  2012. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  2013. req->iv);
  2014. aead_request_set_ad(subreq, req->assoclen);
  2015. return op_type ? crypto_aead_decrypt(subreq) :
  2016. crypto_aead_encrypt(subreq);
  2017. }
  2018. static struct sk_buff *create_authenc_wr(struct aead_request *req,
  2019. unsigned short qid,
  2020. int size,
  2021. unsigned short op_type)
  2022. {
  2023. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2024. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2025. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2026. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2027. struct sk_buff *skb = NULL;
  2028. struct chcr_wr *chcr_req;
  2029. struct cpl_rx_phys_dsgl *phys_cpl;
  2030. struct ulptx_sgl *ulptx;
  2031. unsigned int transhdr_len;
  2032. unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
  2033. unsigned int kctx_len = 0, dnents;
  2034. unsigned int assoclen = req->assoclen;
  2035. unsigned int authsize = crypto_aead_authsize(tfm);
  2036. int error = -EINVAL;
  2037. int null = 0;
  2038. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2039. GFP_ATOMIC;
  2040. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2041. if (req->cryptlen == 0)
  2042. return NULL;
  2043. reqctx->b0_dma = 0;
  2044. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
  2045. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2046. null = 1;
  2047. assoclen = 0;
  2048. }
  2049. error = chcr_aead_common_init(req, op_type);
  2050. if (error)
  2051. return ERR_PTR(error);
  2052. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2053. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2054. (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
  2055. req->assoclen);
  2056. dnents += MIN_AUTH_SG; // For IV
  2057. dst_size = get_space_for_phys_dsgl(dnents);
  2058. kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
  2059. - sizeof(chcr_req->key_ctx);
  2060. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2061. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
  2062. SGE_MAX_WR_LEN;
  2063. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
  2064. : (sgl_len(reqctx->src_nents + reqctx->aad_nents
  2065. + MIN_GCM_SG) * 8);
  2066. transhdr_len += temp;
  2067. transhdr_len = roundup(transhdr_len, 16);
  2068. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2069. transhdr_len, op_type)) {
  2070. atomic_inc(&adap->chcr_stats.fallback);
  2071. chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  2072. op_type);
  2073. return ERR_PTR(chcr_aead_fallback(req, op_type));
  2074. }
  2075. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2076. if (!skb) {
  2077. error = -ENOMEM;
  2078. goto err;
  2079. }
  2080. chcr_req = __skb_put_zero(skb, transhdr_len);
  2081. temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2082. /*
  2083. * Input order is AAD,IV and Payload. where IV should be included as
  2084. * the part of authdata. All other fields should be filled according
  2085. * to the hardware spec
  2086. */
  2087. chcr_req->sec_cpl.op_ivinsrtofst =
  2088. FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
  2089. assoclen + 1);
  2090. chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
  2091. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2092. assoclen ? 1 : 0, assoclen,
  2093. assoclen + IV + 1,
  2094. (temp & 0x1F0) >> 4);
  2095. chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
  2096. temp & 0xF,
  2097. null ? 0 : assoclen + IV + 1,
  2098. temp, temp);
  2099. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
  2100. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
  2101. temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  2102. else
  2103. temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  2104. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2105. (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
  2106. temp,
  2107. actx->auth_mode, aeadctx->hmac_ctrl,
  2108. IV >> 1);
  2109. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2110. 0, 0, dst_size);
  2111. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2112. if (op_type == CHCR_ENCRYPT_OP ||
  2113. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2114. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
  2115. memcpy(chcr_req->key_ctx.key, aeadctx->key,
  2116. aeadctx->enckey_len);
  2117. else
  2118. memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
  2119. aeadctx->enckey_len);
  2120. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2121. actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
  2122. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2123. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2124. memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
  2125. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
  2126. CTR_RFC3686_IV_SIZE);
  2127. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  2128. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  2129. } else {
  2130. memcpy(reqctx->iv, req->iv, IV);
  2131. }
  2132. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2133. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2134. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
  2135. chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
  2136. atomic_inc(&adap->chcr_stats.cipher_rqst);
  2137. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2138. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2139. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2140. transhdr_len, temp, 0);
  2141. reqctx->skb = skb;
  2142. reqctx->op = op_type;
  2143. return skb;
  2144. err:
  2145. chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  2146. op_type);
  2147. return ERR_PTR(error);
  2148. }
  2149. int chcr_aead_dma_map(struct device *dev,
  2150. struct aead_request *req,
  2151. unsigned short op_type)
  2152. {
  2153. int error;
  2154. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2155. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2156. unsigned int authsize = crypto_aead_authsize(tfm);
  2157. int dst_size;
  2158. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2159. -authsize : authsize);
  2160. if (!req->cryptlen || !dst_size)
  2161. return 0;
  2162. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
  2163. DMA_BIDIRECTIONAL);
  2164. if (dma_mapping_error(dev, reqctx->iv_dma))
  2165. return -ENOMEM;
  2166. if (req->src == req->dst) {
  2167. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2168. DMA_BIDIRECTIONAL);
  2169. if (!error)
  2170. goto err;
  2171. } else {
  2172. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2173. DMA_TO_DEVICE);
  2174. if (!error)
  2175. goto err;
  2176. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2177. DMA_FROM_DEVICE);
  2178. if (!error) {
  2179. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2180. DMA_TO_DEVICE);
  2181. goto err;
  2182. }
  2183. }
  2184. return 0;
  2185. err:
  2186. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2187. return -ENOMEM;
  2188. }
  2189. void chcr_aead_dma_unmap(struct device *dev,
  2190. struct aead_request *req,
  2191. unsigned short op_type)
  2192. {
  2193. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2194. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2195. unsigned int authsize = crypto_aead_authsize(tfm);
  2196. int dst_size;
  2197. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2198. -authsize : authsize);
  2199. if (!req->cryptlen || !dst_size)
  2200. return;
  2201. dma_unmap_single(dev, reqctx->iv_dma, IV,
  2202. DMA_BIDIRECTIONAL);
  2203. if (req->src == req->dst) {
  2204. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2205. DMA_BIDIRECTIONAL);
  2206. } else {
  2207. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2208. DMA_TO_DEVICE);
  2209. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2210. DMA_FROM_DEVICE);
  2211. }
  2212. }
  2213. void chcr_add_aead_src_ent(struct aead_request *req,
  2214. struct ulptx_sgl *ulptx,
  2215. unsigned int assoclen,
  2216. unsigned short op_type)
  2217. {
  2218. struct ulptx_walk ulp_walk;
  2219. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2220. if (reqctx->imm) {
  2221. u8 *buf = (u8 *)ulptx;
  2222. if (reqctx->b0_dma) {
  2223. memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
  2224. buf += reqctx->b0_len;
  2225. }
  2226. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2227. buf, assoclen, 0);
  2228. buf += assoclen;
  2229. memcpy(buf, reqctx->iv, IV);
  2230. buf += IV;
  2231. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2232. buf, req->cryptlen, req->assoclen);
  2233. } else {
  2234. ulptx_walk_init(&ulp_walk, ulptx);
  2235. if (reqctx->b0_dma)
  2236. ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
  2237. &reqctx->b0_dma);
  2238. ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
  2239. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2240. ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
  2241. req->assoclen);
  2242. ulptx_walk_end(&ulp_walk);
  2243. }
  2244. }
  2245. void chcr_add_aead_dst_ent(struct aead_request *req,
  2246. struct cpl_rx_phys_dsgl *phys_cpl,
  2247. unsigned int assoclen,
  2248. unsigned short op_type,
  2249. unsigned short qid)
  2250. {
  2251. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2252. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2253. struct dsgl_walk dsgl_walk;
  2254. unsigned int authsize = crypto_aead_authsize(tfm);
  2255. u32 temp;
  2256. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2257. if (reqctx->b0_dma)
  2258. dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
  2259. dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
  2260. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2261. temp = req->cryptlen + (op_type ? -authsize : authsize);
  2262. dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
  2263. dsgl_walk_end(&dsgl_walk, qid);
  2264. }
  2265. void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
  2266. struct ulptx_sgl *ulptx,
  2267. struct cipher_wr_param *wrparam)
  2268. {
  2269. struct ulptx_walk ulp_walk;
  2270. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2271. if (reqctx->imm) {
  2272. u8 *buf = (u8 *)ulptx;
  2273. memcpy(buf, reqctx->iv, IV);
  2274. buf += IV;
  2275. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2276. buf, wrparam->bytes, reqctx->processed);
  2277. } else {
  2278. ulptx_walk_init(&ulp_walk, ulptx);
  2279. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2280. ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
  2281. reqctx->src_ofst);
  2282. reqctx->srcsg = ulp_walk.last_sg;
  2283. reqctx->src_ofst = ulp_walk.last_sg_len;
  2284. ulptx_walk_end(&ulp_walk);
  2285. }
  2286. }
  2287. void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
  2288. struct cpl_rx_phys_dsgl *phys_cpl,
  2289. struct cipher_wr_param *wrparam,
  2290. unsigned short qid)
  2291. {
  2292. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2293. struct dsgl_walk dsgl_walk;
  2294. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2295. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2296. dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
  2297. reqctx->dst_ofst);
  2298. reqctx->dstsg = dsgl_walk.last_sg;
  2299. reqctx->dst_ofst = dsgl_walk.last_sg_len;
  2300. dsgl_walk_end(&dsgl_walk, qid);
  2301. }
  2302. void chcr_add_hash_src_ent(struct ahash_request *req,
  2303. struct ulptx_sgl *ulptx,
  2304. struct hash_wr_param *param)
  2305. {
  2306. struct ulptx_walk ulp_walk;
  2307. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  2308. if (reqctx->hctx_wr.imm) {
  2309. u8 *buf = (u8 *)ulptx;
  2310. if (param->bfr_len) {
  2311. memcpy(buf, reqctx->reqbfr, param->bfr_len);
  2312. buf += param->bfr_len;
  2313. }
  2314. sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
  2315. sg_nents(reqctx->hctx_wr.srcsg), buf,
  2316. param->sg_len, 0);
  2317. } else {
  2318. ulptx_walk_init(&ulp_walk, ulptx);
  2319. if (param->bfr_len)
  2320. ulptx_walk_add_page(&ulp_walk, param->bfr_len,
  2321. &reqctx->hctx_wr.dma_addr);
  2322. ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
  2323. param->sg_len, reqctx->hctx_wr.src_ofst);
  2324. reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
  2325. reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
  2326. ulptx_walk_end(&ulp_walk);
  2327. }
  2328. }
  2329. int chcr_hash_dma_map(struct device *dev,
  2330. struct ahash_request *req)
  2331. {
  2332. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2333. int error = 0;
  2334. if (!req->nbytes)
  2335. return 0;
  2336. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2337. DMA_TO_DEVICE);
  2338. if (!error)
  2339. return -ENOMEM;
  2340. req_ctx->hctx_wr.is_sg_map = 1;
  2341. return 0;
  2342. }
  2343. void chcr_hash_dma_unmap(struct device *dev,
  2344. struct ahash_request *req)
  2345. {
  2346. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2347. if (!req->nbytes)
  2348. return;
  2349. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2350. DMA_TO_DEVICE);
  2351. req_ctx->hctx_wr.is_sg_map = 0;
  2352. }
  2353. int chcr_cipher_dma_map(struct device *dev,
  2354. struct ablkcipher_request *req)
  2355. {
  2356. int error;
  2357. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2358. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
  2359. DMA_BIDIRECTIONAL);
  2360. if (dma_mapping_error(dev, reqctx->iv_dma))
  2361. return -ENOMEM;
  2362. if (req->src == req->dst) {
  2363. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2364. DMA_BIDIRECTIONAL);
  2365. if (!error)
  2366. goto err;
  2367. } else {
  2368. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2369. DMA_TO_DEVICE);
  2370. if (!error)
  2371. goto err;
  2372. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2373. DMA_FROM_DEVICE);
  2374. if (!error) {
  2375. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2376. DMA_TO_DEVICE);
  2377. goto err;
  2378. }
  2379. }
  2380. return 0;
  2381. err:
  2382. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2383. return -ENOMEM;
  2384. }
  2385. void chcr_cipher_dma_unmap(struct device *dev,
  2386. struct ablkcipher_request *req)
  2387. {
  2388. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2389. dma_unmap_single(dev, reqctx->iv_dma, IV,
  2390. DMA_BIDIRECTIONAL);
  2391. if (req->src == req->dst) {
  2392. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2393. DMA_BIDIRECTIONAL);
  2394. } else {
  2395. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2396. DMA_TO_DEVICE);
  2397. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2398. DMA_FROM_DEVICE);
  2399. }
  2400. }
  2401. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  2402. {
  2403. __be32 data;
  2404. memset(block, 0, csize);
  2405. block += csize;
  2406. if (csize >= 4)
  2407. csize = 4;
  2408. else if (msglen > (unsigned int)(1 << (8 * csize)))
  2409. return -EOVERFLOW;
  2410. data = cpu_to_be32(msglen);
  2411. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  2412. return 0;
  2413. }
  2414. static void generate_b0(struct aead_request *req,
  2415. struct chcr_aead_ctx *aeadctx,
  2416. unsigned short op_type)
  2417. {
  2418. unsigned int l, lp, m;
  2419. int rc;
  2420. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2421. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2422. u8 *b0 = reqctx->scratch_pad;
  2423. m = crypto_aead_authsize(aead);
  2424. memcpy(b0, reqctx->iv, 16);
  2425. lp = b0[0];
  2426. l = lp + 1;
  2427. /* set m, bits 3-5 */
  2428. *b0 |= (8 * ((m - 2) / 2));
  2429. /* set adata, bit 6, if associated data is used */
  2430. if (req->assoclen)
  2431. *b0 |= 64;
  2432. rc = set_msg_len(b0 + 16 - l,
  2433. (op_type == CHCR_DECRYPT_OP) ?
  2434. req->cryptlen - m : req->cryptlen, l);
  2435. }
  2436. static inline int crypto_ccm_check_iv(const u8 *iv)
  2437. {
  2438. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  2439. if (iv[0] < 1 || iv[0] > 7)
  2440. return -EINVAL;
  2441. return 0;
  2442. }
  2443. static int ccm_format_packet(struct aead_request *req,
  2444. struct chcr_aead_ctx *aeadctx,
  2445. unsigned int sub_type,
  2446. unsigned short op_type)
  2447. {
  2448. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2449. int rc = 0;
  2450. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2451. reqctx->iv[0] = 3;
  2452. memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
  2453. memcpy(reqctx->iv + 4, req->iv, 8);
  2454. memset(reqctx->iv + 12, 0, 4);
  2455. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2456. htons(req->assoclen - 8);
  2457. } else {
  2458. memcpy(reqctx->iv, req->iv, 16);
  2459. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2460. htons(req->assoclen);
  2461. }
  2462. generate_b0(req, aeadctx, op_type);
  2463. /* zero the ctr value */
  2464. memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
  2465. return rc;
  2466. }
  2467. static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
  2468. unsigned int dst_size,
  2469. struct aead_request *req,
  2470. unsigned short op_type)
  2471. {
  2472. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2473. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2474. unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
  2475. unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
  2476. unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
  2477. unsigned int ccm_xtra;
  2478. unsigned char tag_offset = 0, auth_offset = 0;
  2479. unsigned int assoclen;
  2480. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2481. assoclen = req->assoclen - 8;
  2482. else
  2483. assoclen = req->assoclen;
  2484. ccm_xtra = CCM_B0_SIZE +
  2485. ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
  2486. auth_offset = req->cryptlen ?
  2487. (assoclen + IV + 1 + ccm_xtra) : 0;
  2488. if (op_type == CHCR_DECRYPT_OP) {
  2489. if (crypto_aead_authsize(tfm) != req->cryptlen)
  2490. tag_offset = crypto_aead_authsize(tfm);
  2491. else
  2492. auth_offset = 0;
  2493. }
  2494. sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
  2495. 2, assoclen + 1 + ccm_xtra);
  2496. sec_cpl->pldlen =
  2497. htonl(assoclen + IV + req->cryptlen + ccm_xtra);
  2498. /* For CCM there wil be b0 always. So AAD start will be 1 always */
  2499. sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2500. 1, assoclen + ccm_xtra, assoclen
  2501. + IV + 1 + ccm_xtra, 0);
  2502. sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
  2503. auth_offset, tag_offset,
  2504. (op_type == CHCR_ENCRYPT_OP) ? 0 :
  2505. crypto_aead_authsize(tfm));
  2506. sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2507. (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
  2508. cipher_mode, mac_mode,
  2509. aeadctx->hmac_ctrl, IV >> 1);
  2510. sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
  2511. 0, dst_size);
  2512. }
  2513. static int aead_ccm_validate_input(unsigned short op_type,
  2514. struct aead_request *req,
  2515. struct chcr_aead_ctx *aeadctx,
  2516. unsigned int sub_type)
  2517. {
  2518. if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2519. if (crypto_ccm_check_iv(req->iv)) {
  2520. pr_err("CCM: IV check fails\n");
  2521. return -EINVAL;
  2522. }
  2523. } else {
  2524. if (req->assoclen != 16 && req->assoclen != 20) {
  2525. pr_err("RFC4309: Invalid AAD length %d\n",
  2526. req->assoclen);
  2527. return -EINVAL;
  2528. }
  2529. }
  2530. return 0;
  2531. }
  2532. static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
  2533. unsigned short qid,
  2534. int size,
  2535. unsigned short op_type)
  2536. {
  2537. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2538. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2539. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2540. struct sk_buff *skb = NULL;
  2541. struct chcr_wr *chcr_req;
  2542. struct cpl_rx_phys_dsgl *phys_cpl;
  2543. struct ulptx_sgl *ulptx;
  2544. unsigned int transhdr_len;
  2545. unsigned int dst_size = 0, kctx_len, dnents, temp;
  2546. unsigned int sub_type, assoclen = req->assoclen;
  2547. unsigned int authsize = crypto_aead_authsize(tfm);
  2548. int error = -EINVAL;
  2549. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2550. GFP_ATOMIC;
  2551. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2552. reqctx->b0_dma = 0;
  2553. sub_type = get_aead_subtype(tfm);
  2554. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2555. assoclen -= 8;
  2556. error = chcr_aead_common_init(req, op_type);
  2557. if (error)
  2558. return ERR_PTR(error);
  2559. reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
  2560. error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
  2561. if (error)
  2562. goto err;
  2563. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2564. dnents += sg_nents_xlen(req->dst, req->cryptlen
  2565. + (op_type ? -authsize : authsize),
  2566. CHCR_DST_SG_SIZE, req->assoclen);
  2567. dnents += MIN_CCM_SG; // For IV and B0
  2568. dst_size = get_space_for_phys_dsgl(dnents);
  2569. kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
  2570. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2571. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
  2572. reqctx->b0_len) <= SGE_MAX_WR_LEN;
  2573. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
  2574. reqctx->b0_len, 16) :
  2575. (sgl_len(reqctx->src_nents + reqctx->aad_nents +
  2576. MIN_CCM_SG) * 8);
  2577. transhdr_len += temp;
  2578. transhdr_len = roundup(transhdr_len, 16);
  2579. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
  2580. reqctx->b0_len, transhdr_len, op_type)) {
  2581. atomic_inc(&adap->chcr_stats.fallback);
  2582. chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  2583. op_type);
  2584. return ERR_PTR(chcr_aead_fallback(req, op_type));
  2585. }
  2586. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2587. if (!skb) {
  2588. error = -ENOMEM;
  2589. goto err;
  2590. }
  2591. chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
  2592. fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
  2593. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2594. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2595. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2596. aeadctx->key, aeadctx->enckey_len);
  2597. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2598. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2599. error = ccm_format_packet(req, aeadctx, sub_type, op_type);
  2600. if (error)
  2601. goto dstmap_fail;
  2602. reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
  2603. &reqctx->scratch_pad, reqctx->b0_len,
  2604. DMA_BIDIRECTIONAL);
  2605. if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
  2606. reqctx->b0_dma)) {
  2607. error = -ENOMEM;
  2608. goto dstmap_fail;
  2609. }
  2610. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
  2611. chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
  2612. atomic_inc(&adap->chcr_stats.aead_rqst);
  2613. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2614. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
  2615. reqctx->b0_len) : 0);
  2616. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
  2617. transhdr_len, temp, 0);
  2618. reqctx->skb = skb;
  2619. reqctx->op = op_type;
  2620. return skb;
  2621. dstmap_fail:
  2622. kfree_skb(skb);
  2623. err:
  2624. chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
  2625. return ERR_PTR(error);
  2626. }
  2627. static struct sk_buff *create_gcm_wr(struct aead_request *req,
  2628. unsigned short qid,
  2629. int size,
  2630. unsigned short op_type)
  2631. {
  2632. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2633. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2634. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2635. struct sk_buff *skb = NULL;
  2636. struct chcr_wr *chcr_req;
  2637. struct cpl_rx_phys_dsgl *phys_cpl;
  2638. struct ulptx_sgl *ulptx;
  2639. unsigned int transhdr_len, dnents = 0;
  2640. unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
  2641. unsigned int authsize = crypto_aead_authsize(tfm);
  2642. int error = -EINVAL;
  2643. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2644. GFP_ATOMIC;
  2645. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2646. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
  2647. assoclen = req->assoclen - 8;
  2648. reqctx->b0_dma = 0;
  2649. error = chcr_aead_common_init(req, op_type);
  2650. if (error)
  2651. return ERR_PTR(error);
  2652. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2653. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2654. (op_type ? -authsize : authsize),
  2655. CHCR_DST_SG_SIZE, req->assoclen);
  2656. dnents += MIN_GCM_SG; // For IV
  2657. dst_size = get_space_for_phys_dsgl(dnents);
  2658. kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
  2659. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2660. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
  2661. SGE_MAX_WR_LEN;
  2662. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
  2663. (sgl_len(reqctx->src_nents +
  2664. reqctx->aad_nents + MIN_GCM_SG) * 8);
  2665. transhdr_len += temp;
  2666. transhdr_len = roundup(transhdr_len, 16);
  2667. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2668. transhdr_len, op_type)) {
  2669. atomic_inc(&adap->chcr_stats.fallback);
  2670. chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  2671. op_type);
  2672. return ERR_PTR(chcr_aead_fallback(req, op_type));
  2673. }
  2674. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2675. if (!skb) {
  2676. error = -ENOMEM;
  2677. goto err;
  2678. }
  2679. chcr_req = __skb_put_zero(skb, transhdr_len);
  2680. //Offset of tag from end
  2681. temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2682. chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
  2683. a_ctx(tfm)->dev->rx_channel_id, 2,
  2684. (assoclen + 1));
  2685. chcr_req->sec_cpl.pldlen =
  2686. htonl(assoclen + IV + req->cryptlen);
  2687. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2688. assoclen ? 1 : 0, assoclen,
  2689. assoclen + IV + 1, 0);
  2690. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  2691. FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
  2692. temp, temp);
  2693. chcr_req->sec_cpl.seqno_numivs =
  2694. FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
  2695. CHCR_ENCRYPT_OP) ? 1 : 0,
  2696. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  2697. CHCR_SCMD_AUTH_MODE_GHASH,
  2698. aeadctx->hmac_ctrl, IV >> 1);
  2699. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2700. 0, 0, dst_size);
  2701. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2702. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2703. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2704. GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
  2705. /* prepare a 16 byte iv */
  2706. /* S A L T | IV | 0x00000001 */
  2707. if (get_aead_subtype(tfm) ==
  2708. CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
  2709. memcpy(reqctx->iv, aeadctx->salt, 4);
  2710. memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
  2711. } else {
  2712. memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
  2713. }
  2714. *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
  2715. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2716. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2717. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
  2718. chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
  2719. atomic_inc(&adap->chcr_stats.aead_rqst);
  2720. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2721. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2722. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2723. transhdr_len, temp, reqctx->verify);
  2724. reqctx->skb = skb;
  2725. reqctx->op = op_type;
  2726. return skb;
  2727. err:
  2728. chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
  2729. return ERR_PTR(error);
  2730. }
  2731. static int chcr_aead_cra_init(struct crypto_aead *tfm)
  2732. {
  2733. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2734. struct aead_alg *alg = crypto_aead_alg(tfm);
  2735. aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
  2736. CRYPTO_ALG_NEED_FALLBACK |
  2737. CRYPTO_ALG_ASYNC);
  2738. if (IS_ERR(aeadctx->sw_cipher))
  2739. return PTR_ERR(aeadctx->sw_cipher);
  2740. crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
  2741. sizeof(struct aead_request) +
  2742. crypto_aead_reqsize(aeadctx->sw_cipher)));
  2743. return chcr_device_init(a_ctx(tfm));
  2744. }
  2745. static void chcr_aead_cra_exit(struct crypto_aead *tfm)
  2746. {
  2747. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2748. crypto_free_aead(aeadctx->sw_cipher);
  2749. }
  2750. static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
  2751. unsigned int authsize)
  2752. {
  2753. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2754. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
  2755. aeadctx->mayverify = VERIFY_HW;
  2756. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2757. }
  2758. static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
  2759. unsigned int authsize)
  2760. {
  2761. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2762. u32 maxauth = crypto_aead_maxauthsize(tfm);
  2763. /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
  2764. * true for sha1. authsize == 12 condition should be before
  2765. * authsize == (maxauth >> 1)
  2766. */
  2767. if (authsize == ICV_4) {
  2768. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2769. aeadctx->mayverify = VERIFY_HW;
  2770. } else if (authsize == ICV_6) {
  2771. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2772. aeadctx->mayverify = VERIFY_HW;
  2773. } else if (authsize == ICV_10) {
  2774. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2775. aeadctx->mayverify = VERIFY_HW;
  2776. } else if (authsize == ICV_12) {
  2777. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2778. aeadctx->mayverify = VERIFY_HW;
  2779. } else if (authsize == ICV_14) {
  2780. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2781. aeadctx->mayverify = VERIFY_HW;
  2782. } else if (authsize == (maxauth >> 1)) {
  2783. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2784. aeadctx->mayverify = VERIFY_HW;
  2785. } else if (authsize == maxauth) {
  2786. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2787. aeadctx->mayverify = VERIFY_HW;
  2788. } else {
  2789. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2790. aeadctx->mayverify = VERIFY_SW;
  2791. }
  2792. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2793. }
  2794. static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  2795. {
  2796. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2797. switch (authsize) {
  2798. case ICV_4:
  2799. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2800. aeadctx->mayverify = VERIFY_HW;
  2801. break;
  2802. case ICV_8:
  2803. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2804. aeadctx->mayverify = VERIFY_HW;
  2805. break;
  2806. case ICV_12:
  2807. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2808. aeadctx->mayverify = VERIFY_HW;
  2809. break;
  2810. case ICV_14:
  2811. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2812. aeadctx->mayverify = VERIFY_HW;
  2813. break;
  2814. case ICV_16:
  2815. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2816. aeadctx->mayverify = VERIFY_HW;
  2817. break;
  2818. case ICV_13:
  2819. case ICV_15:
  2820. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2821. aeadctx->mayverify = VERIFY_SW;
  2822. break;
  2823. default:
  2824. crypto_tfm_set_flags((struct crypto_tfm *) tfm,
  2825. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2826. return -EINVAL;
  2827. }
  2828. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2829. }
  2830. static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
  2831. unsigned int authsize)
  2832. {
  2833. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2834. switch (authsize) {
  2835. case ICV_8:
  2836. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2837. aeadctx->mayverify = VERIFY_HW;
  2838. break;
  2839. case ICV_12:
  2840. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2841. aeadctx->mayverify = VERIFY_HW;
  2842. break;
  2843. case ICV_16:
  2844. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2845. aeadctx->mayverify = VERIFY_HW;
  2846. break;
  2847. default:
  2848. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2849. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2850. return -EINVAL;
  2851. }
  2852. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2853. }
  2854. static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
  2855. unsigned int authsize)
  2856. {
  2857. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2858. switch (authsize) {
  2859. case ICV_4:
  2860. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2861. aeadctx->mayverify = VERIFY_HW;
  2862. break;
  2863. case ICV_6:
  2864. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2865. aeadctx->mayverify = VERIFY_HW;
  2866. break;
  2867. case ICV_8:
  2868. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2869. aeadctx->mayverify = VERIFY_HW;
  2870. break;
  2871. case ICV_10:
  2872. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2873. aeadctx->mayverify = VERIFY_HW;
  2874. break;
  2875. case ICV_12:
  2876. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2877. aeadctx->mayverify = VERIFY_HW;
  2878. break;
  2879. case ICV_14:
  2880. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2881. aeadctx->mayverify = VERIFY_HW;
  2882. break;
  2883. case ICV_16:
  2884. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2885. aeadctx->mayverify = VERIFY_HW;
  2886. break;
  2887. default:
  2888. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2889. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2890. return -EINVAL;
  2891. }
  2892. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2893. }
  2894. static int chcr_ccm_common_setkey(struct crypto_aead *aead,
  2895. const u8 *key,
  2896. unsigned int keylen)
  2897. {
  2898. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2899. unsigned char ck_size, mk_size;
  2900. int key_ctx_size = 0;
  2901. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
  2902. if (keylen == AES_KEYSIZE_128) {
  2903. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2904. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  2905. } else if (keylen == AES_KEYSIZE_192) {
  2906. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2907. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
  2908. } else if (keylen == AES_KEYSIZE_256) {
  2909. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2910. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  2911. } else {
  2912. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2913. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2914. aeadctx->enckey_len = 0;
  2915. return -EINVAL;
  2916. }
  2917. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
  2918. key_ctx_size >> 4);
  2919. memcpy(aeadctx->key, key, keylen);
  2920. aeadctx->enckey_len = keylen;
  2921. return 0;
  2922. }
  2923. static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
  2924. const u8 *key,
  2925. unsigned int keylen)
  2926. {
  2927. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2928. int error;
  2929. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2930. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2931. CRYPTO_TFM_REQ_MASK);
  2932. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2933. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2934. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2935. CRYPTO_TFM_RES_MASK);
  2936. if (error)
  2937. return error;
  2938. return chcr_ccm_common_setkey(aead, key, keylen);
  2939. }
  2940. static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
  2941. unsigned int keylen)
  2942. {
  2943. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2944. int error;
  2945. if (keylen < 3) {
  2946. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2947. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2948. aeadctx->enckey_len = 0;
  2949. return -EINVAL;
  2950. }
  2951. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2952. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2953. CRYPTO_TFM_REQ_MASK);
  2954. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2955. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2956. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2957. CRYPTO_TFM_RES_MASK);
  2958. if (error)
  2959. return error;
  2960. keylen -= 3;
  2961. memcpy(aeadctx->salt, key + keylen, 3);
  2962. return chcr_ccm_common_setkey(aead, key, keylen);
  2963. }
  2964. static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  2965. unsigned int keylen)
  2966. {
  2967. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2968. struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
  2969. struct crypto_cipher *cipher;
  2970. unsigned int ck_size;
  2971. int ret = 0, key_ctx_size = 0;
  2972. aeadctx->enckey_len = 0;
  2973. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2974. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
  2975. & CRYPTO_TFM_REQ_MASK);
  2976. ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2977. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2978. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2979. CRYPTO_TFM_RES_MASK);
  2980. if (ret)
  2981. goto out;
  2982. if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
  2983. keylen > 3) {
  2984. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  2985. memcpy(aeadctx->salt, key + keylen, 4);
  2986. }
  2987. if (keylen == AES_KEYSIZE_128) {
  2988. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2989. } else if (keylen == AES_KEYSIZE_192) {
  2990. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2991. } else if (keylen == AES_KEYSIZE_256) {
  2992. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2993. } else {
  2994. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2995. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2996. pr_err("GCM: Invalid key length %d\n", keylen);
  2997. ret = -EINVAL;
  2998. goto out;
  2999. }
  3000. memcpy(aeadctx->key, key, keylen);
  3001. aeadctx->enckey_len = keylen;
  3002. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
  3003. AEAD_H_SIZE;
  3004. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  3005. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  3006. 0, 0,
  3007. key_ctx_size >> 4);
  3008. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  3009. * It will go in key context
  3010. */
  3011. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  3012. if (IS_ERR(cipher)) {
  3013. aeadctx->enckey_len = 0;
  3014. ret = -ENOMEM;
  3015. goto out;
  3016. }
  3017. ret = crypto_cipher_setkey(cipher, key, keylen);
  3018. if (ret) {
  3019. aeadctx->enckey_len = 0;
  3020. goto out1;
  3021. }
  3022. memset(gctx->ghash_h, 0, AEAD_H_SIZE);
  3023. crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
  3024. out1:
  3025. crypto_free_cipher(cipher);
  3026. out:
  3027. return ret;
  3028. }
  3029. static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
  3030. unsigned int keylen)
  3031. {
  3032. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3033. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3034. /* it contains auth and cipher key both*/
  3035. struct crypto_authenc_keys keys;
  3036. unsigned int bs, subtype;
  3037. unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
  3038. int err = 0, i, key_ctx_len = 0;
  3039. unsigned char ck_size = 0;
  3040. unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
  3041. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  3042. struct algo_param param;
  3043. int align;
  3044. u8 *o_ptr = NULL;
  3045. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3046. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3047. & CRYPTO_TFM_REQ_MASK);
  3048. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3049. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3050. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3051. & CRYPTO_TFM_RES_MASK);
  3052. if (err)
  3053. goto out;
  3054. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3055. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3056. goto out;
  3057. }
  3058. if (get_alg_config(&param, max_authsize)) {
  3059. pr_err("chcr : Unsupported digest size\n");
  3060. goto out;
  3061. }
  3062. subtype = get_aead_subtype(authenc);
  3063. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3064. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3065. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3066. goto out;
  3067. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3068. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3069. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3070. }
  3071. if (keys.enckeylen == AES_KEYSIZE_128) {
  3072. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3073. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3074. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3075. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3076. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3077. } else {
  3078. pr_err("chcr : Unsupported cipher key\n");
  3079. goto out;
  3080. }
  3081. /* Copy only encryption key. We use authkey to generate h(ipad) and
  3082. * h(opad) so authkey is not needed again. authkeylen size have the
  3083. * size of the hash digest size.
  3084. */
  3085. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3086. aeadctx->enckey_len = keys.enckeylen;
  3087. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3088. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3089. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3090. aeadctx->enckey_len << 3);
  3091. }
  3092. base_hash = chcr_alloc_shash(max_authsize);
  3093. if (IS_ERR(base_hash)) {
  3094. pr_err("chcr : Base driver cannot be loaded\n");
  3095. aeadctx->enckey_len = 0;
  3096. memzero_explicit(&keys, sizeof(keys));
  3097. return -EINVAL;
  3098. }
  3099. {
  3100. SHASH_DESC_ON_STACK(shash, base_hash);
  3101. shash->tfm = base_hash;
  3102. shash->flags = crypto_shash_get_flags(base_hash);
  3103. bs = crypto_shash_blocksize(base_hash);
  3104. align = KEYCTX_ALIGN_PAD(max_authsize);
  3105. o_ptr = actx->h_iopad + param.result_size + align;
  3106. if (keys.authkeylen > bs) {
  3107. err = crypto_shash_digest(shash, keys.authkey,
  3108. keys.authkeylen,
  3109. o_ptr);
  3110. if (err) {
  3111. pr_err("chcr : Base driver cannot be loaded\n");
  3112. goto out;
  3113. }
  3114. keys.authkeylen = max_authsize;
  3115. } else
  3116. memcpy(o_ptr, keys.authkey, keys.authkeylen);
  3117. /* Compute the ipad-digest*/
  3118. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3119. memcpy(pad, o_ptr, keys.authkeylen);
  3120. for (i = 0; i < bs >> 2; i++)
  3121. *((unsigned int *)pad + i) ^= IPAD_DATA;
  3122. if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
  3123. max_authsize))
  3124. goto out;
  3125. /* Compute the opad-digest */
  3126. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3127. memcpy(pad, o_ptr, keys.authkeylen);
  3128. for (i = 0; i < bs >> 2; i++)
  3129. *((unsigned int *)pad + i) ^= OPAD_DATA;
  3130. if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
  3131. goto out;
  3132. /* convert the ipad and opad digest to network order */
  3133. chcr_change_order(actx->h_iopad, param.result_size);
  3134. chcr_change_order(o_ptr, param.result_size);
  3135. key_ctx_len = sizeof(struct _key_ctx) +
  3136. roundup(keys.enckeylen, 16) +
  3137. (param.result_size + align) * 2;
  3138. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
  3139. 0, 1, key_ctx_len >> 4);
  3140. actx->auth_mode = param.auth_mode;
  3141. chcr_free_shash(base_hash);
  3142. memzero_explicit(&keys, sizeof(keys));
  3143. return 0;
  3144. }
  3145. out:
  3146. aeadctx->enckey_len = 0;
  3147. memzero_explicit(&keys, sizeof(keys));
  3148. if (!IS_ERR(base_hash))
  3149. chcr_free_shash(base_hash);
  3150. return -EINVAL;
  3151. }
  3152. static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
  3153. const u8 *key, unsigned int keylen)
  3154. {
  3155. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3156. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3157. struct crypto_authenc_keys keys;
  3158. int err;
  3159. /* it contains auth and cipher key both*/
  3160. unsigned int subtype;
  3161. int key_ctx_len = 0;
  3162. unsigned char ck_size = 0;
  3163. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3164. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3165. & CRYPTO_TFM_REQ_MASK);
  3166. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3167. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3168. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3169. & CRYPTO_TFM_RES_MASK);
  3170. if (err)
  3171. goto out;
  3172. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3173. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3174. goto out;
  3175. }
  3176. subtype = get_aead_subtype(authenc);
  3177. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3178. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3179. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3180. goto out;
  3181. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3182. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3183. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3184. }
  3185. if (keys.enckeylen == AES_KEYSIZE_128) {
  3186. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3187. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3188. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3189. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3190. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3191. } else {
  3192. pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
  3193. goto out;
  3194. }
  3195. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3196. aeadctx->enckey_len = keys.enckeylen;
  3197. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3198. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3199. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3200. aeadctx->enckey_len << 3);
  3201. }
  3202. key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
  3203. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
  3204. 0, key_ctx_len >> 4);
  3205. actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
  3206. memzero_explicit(&keys, sizeof(keys));
  3207. return 0;
  3208. out:
  3209. aeadctx->enckey_len = 0;
  3210. memzero_explicit(&keys, sizeof(keys));
  3211. return -EINVAL;
  3212. }
  3213. static int chcr_aead_op(struct aead_request *req,
  3214. unsigned short op_type,
  3215. int size,
  3216. create_wr_t create_wr_fn)
  3217. {
  3218. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3219. struct uld_ctx *u_ctx;
  3220. struct sk_buff *skb;
  3221. if (!a_ctx(tfm)->dev) {
  3222. pr_err("chcr : %s : No crypto device.\n", __func__);
  3223. return -ENXIO;
  3224. }
  3225. u_ctx = ULD_CTX(a_ctx(tfm));
  3226. if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  3227. a_ctx(tfm)->tx_qidx)) {
  3228. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  3229. return -EBUSY;
  3230. }
  3231. /* Form a WR from req */
  3232. skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
  3233. op_type);
  3234. if (IS_ERR(skb) || !skb)
  3235. return PTR_ERR(skb);
  3236. skb->dev = u_ctx->lldi.ports[0];
  3237. set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
  3238. chcr_send_wr(skb);
  3239. return -EINPROGRESS;
  3240. }
  3241. static int chcr_aead_encrypt(struct aead_request *req)
  3242. {
  3243. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3244. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3245. reqctx->verify = VERIFY_HW;
  3246. switch (get_aead_subtype(tfm)) {
  3247. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3248. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3249. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3250. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3251. return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
  3252. create_authenc_wr);
  3253. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3254. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3255. return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
  3256. create_aead_ccm_wr);
  3257. default:
  3258. return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
  3259. create_gcm_wr);
  3260. }
  3261. }
  3262. static int chcr_aead_decrypt(struct aead_request *req)
  3263. {
  3264. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3265. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  3266. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3267. int size;
  3268. if (aeadctx->mayverify == VERIFY_SW) {
  3269. size = crypto_aead_maxauthsize(tfm);
  3270. reqctx->verify = VERIFY_SW;
  3271. } else {
  3272. size = 0;
  3273. reqctx->verify = VERIFY_HW;
  3274. }
  3275. switch (get_aead_subtype(tfm)) {
  3276. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3277. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3278. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3279. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3280. return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
  3281. create_authenc_wr);
  3282. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3283. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3284. return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
  3285. create_aead_ccm_wr);
  3286. default:
  3287. return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
  3288. create_gcm_wr);
  3289. }
  3290. }
  3291. static struct chcr_alg_template driver_algs[] = {
  3292. /* AES-CBC */
  3293. {
  3294. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
  3295. .is_registered = 0,
  3296. .alg.crypto = {
  3297. .cra_name = "cbc(aes)",
  3298. .cra_driver_name = "cbc-aes-chcr",
  3299. .cra_blocksize = AES_BLOCK_SIZE,
  3300. .cra_init = chcr_cra_init,
  3301. .cra_exit = chcr_cra_exit,
  3302. .cra_u.ablkcipher = {
  3303. .min_keysize = AES_MIN_KEY_SIZE,
  3304. .max_keysize = AES_MAX_KEY_SIZE,
  3305. .ivsize = AES_BLOCK_SIZE,
  3306. .setkey = chcr_aes_cbc_setkey,
  3307. .encrypt = chcr_aes_encrypt,
  3308. .decrypt = chcr_aes_decrypt,
  3309. }
  3310. }
  3311. },
  3312. {
  3313. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
  3314. .is_registered = 0,
  3315. .alg.crypto = {
  3316. .cra_name = "xts(aes)",
  3317. .cra_driver_name = "xts-aes-chcr",
  3318. .cra_blocksize = AES_BLOCK_SIZE,
  3319. .cra_init = chcr_cra_init,
  3320. .cra_exit = NULL,
  3321. .cra_u .ablkcipher = {
  3322. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  3323. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  3324. .ivsize = AES_BLOCK_SIZE,
  3325. .setkey = chcr_aes_xts_setkey,
  3326. .encrypt = chcr_aes_encrypt,
  3327. .decrypt = chcr_aes_decrypt,
  3328. }
  3329. }
  3330. },
  3331. {
  3332. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
  3333. .is_registered = 0,
  3334. .alg.crypto = {
  3335. .cra_name = "ctr(aes)",
  3336. .cra_driver_name = "ctr-aes-chcr",
  3337. .cra_blocksize = 1,
  3338. .cra_init = chcr_cra_init,
  3339. .cra_exit = chcr_cra_exit,
  3340. .cra_u.ablkcipher = {
  3341. .min_keysize = AES_MIN_KEY_SIZE,
  3342. .max_keysize = AES_MAX_KEY_SIZE,
  3343. .ivsize = AES_BLOCK_SIZE,
  3344. .setkey = chcr_aes_ctr_setkey,
  3345. .encrypt = chcr_aes_encrypt,
  3346. .decrypt = chcr_aes_decrypt,
  3347. }
  3348. }
  3349. },
  3350. {
  3351. .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
  3352. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
  3353. .is_registered = 0,
  3354. .alg.crypto = {
  3355. .cra_name = "rfc3686(ctr(aes))",
  3356. .cra_driver_name = "rfc3686-ctr-aes-chcr",
  3357. .cra_blocksize = 1,
  3358. .cra_init = chcr_rfc3686_init,
  3359. .cra_exit = chcr_cra_exit,
  3360. .cra_u.ablkcipher = {
  3361. .min_keysize = AES_MIN_KEY_SIZE +
  3362. CTR_RFC3686_NONCE_SIZE,
  3363. .max_keysize = AES_MAX_KEY_SIZE +
  3364. CTR_RFC3686_NONCE_SIZE,
  3365. .ivsize = CTR_RFC3686_IV_SIZE,
  3366. .setkey = chcr_aes_rfc3686_setkey,
  3367. .encrypt = chcr_aes_encrypt,
  3368. .decrypt = chcr_aes_decrypt,
  3369. .geniv = "seqiv",
  3370. }
  3371. }
  3372. },
  3373. /* SHA */
  3374. {
  3375. .type = CRYPTO_ALG_TYPE_AHASH,
  3376. .is_registered = 0,
  3377. .alg.hash = {
  3378. .halg.digestsize = SHA1_DIGEST_SIZE,
  3379. .halg.base = {
  3380. .cra_name = "sha1",
  3381. .cra_driver_name = "sha1-chcr",
  3382. .cra_blocksize = SHA1_BLOCK_SIZE,
  3383. }
  3384. }
  3385. },
  3386. {
  3387. .type = CRYPTO_ALG_TYPE_AHASH,
  3388. .is_registered = 0,
  3389. .alg.hash = {
  3390. .halg.digestsize = SHA256_DIGEST_SIZE,
  3391. .halg.base = {
  3392. .cra_name = "sha256",
  3393. .cra_driver_name = "sha256-chcr",
  3394. .cra_blocksize = SHA256_BLOCK_SIZE,
  3395. }
  3396. }
  3397. },
  3398. {
  3399. .type = CRYPTO_ALG_TYPE_AHASH,
  3400. .is_registered = 0,
  3401. .alg.hash = {
  3402. .halg.digestsize = SHA224_DIGEST_SIZE,
  3403. .halg.base = {
  3404. .cra_name = "sha224",
  3405. .cra_driver_name = "sha224-chcr",
  3406. .cra_blocksize = SHA224_BLOCK_SIZE,
  3407. }
  3408. }
  3409. },
  3410. {
  3411. .type = CRYPTO_ALG_TYPE_AHASH,
  3412. .is_registered = 0,
  3413. .alg.hash = {
  3414. .halg.digestsize = SHA384_DIGEST_SIZE,
  3415. .halg.base = {
  3416. .cra_name = "sha384",
  3417. .cra_driver_name = "sha384-chcr",
  3418. .cra_blocksize = SHA384_BLOCK_SIZE,
  3419. }
  3420. }
  3421. },
  3422. {
  3423. .type = CRYPTO_ALG_TYPE_AHASH,
  3424. .is_registered = 0,
  3425. .alg.hash = {
  3426. .halg.digestsize = SHA512_DIGEST_SIZE,
  3427. .halg.base = {
  3428. .cra_name = "sha512",
  3429. .cra_driver_name = "sha512-chcr",
  3430. .cra_blocksize = SHA512_BLOCK_SIZE,
  3431. }
  3432. }
  3433. },
  3434. /* HMAC */
  3435. {
  3436. .type = CRYPTO_ALG_TYPE_HMAC,
  3437. .is_registered = 0,
  3438. .alg.hash = {
  3439. .halg.digestsize = SHA1_DIGEST_SIZE,
  3440. .halg.base = {
  3441. .cra_name = "hmac(sha1)",
  3442. .cra_driver_name = "hmac-sha1-chcr",
  3443. .cra_blocksize = SHA1_BLOCK_SIZE,
  3444. }
  3445. }
  3446. },
  3447. {
  3448. .type = CRYPTO_ALG_TYPE_HMAC,
  3449. .is_registered = 0,
  3450. .alg.hash = {
  3451. .halg.digestsize = SHA224_DIGEST_SIZE,
  3452. .halg.base = {
  3453. .cra_name = "hmac(sha224)",
  3454. .cra_driver_name = "hmac-sha224-chcr",
  3455. .cra_blocksize = SHA224_BLOCK_SIZE,
  3456. }
  3457. }
  3458. },
  3459. {
  3460. .type = CRYPTO_ALG_TYPE_HMAC,
  3461. .is_registered = 0,
  3462. .alg.hash = {
  3463. .halg.digestsize = SHA256_DIGEST_SIZE,
  3464. .halg.base = {
  3465. .cra_name = "hmac(sha256)",
  3466. .cra_driver_name = "hmac-sha256-chcr",
  3467. .cra_blocksize = SHA256_BLOCK_SIZE,
  3468. }
  3469. }
  3470. },
  3471. {
  3472. .type = CRYPTO_ALG_TYPE_HMAC,
  3473. .is_registered = 0,
  3474. .alg.hash = {
  3475. .halg.digestsize = SHA384_DIGEST_SIZE,
  3476. .halg.base = {
  3477. .cra_name = "hmac(sha384)",
  3478. .cra_driver_name = "hmac-sha384-chcr",
  3479. .cra_blocksize = SHA384_BLOCK_SIZE,
  3480. }
  3481. }
  3482. },
  3483. {
  3484. .type = CRYPTO_ALG_TYPE_HMAC,
  3485. .is_registered = 0,
  3486. .alg.hash = {
  3487. .halg.digestsize = SHA512_DIGEST_SIZE,
  3488. .halg.base = {
  3489. .cra_name = "hmac(sha512)",
  3490. .cra_driver_name = "hmac-sha512-chcr",
  3491. .cra_blocksize = SHA512_BLOCK_SIZE,
  3492. }
  3493. }
  3494. },
  3495. /* Add AEAD Algorithms */
  3496. {
  3497. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
  3498. .is_registered = 0,
  3499. .alg.aead = {
  3500. .base = {
  3501. .cra_name = "gcm(aes)",
  3502. .cra_driver_name = "gcm-aes-chcr",
  3503. .cra_blocksize = 1,
  3504. .cra_priority = CHCR_AEAD_PRIORITY,
  3505. .cra_ctxsize = sizeof(struct chcr_context) +
  3506. sizeof(struct chcr_aead_ctx) +
  3507. sizeof(struct chcr_gcm_ctx),
  3508. },
  3509. .ivsize = GCM_AES_IV_SIZE,
  3510. .maxauthsize = GHASH_DIGEST_SIZE,
  3511. .setkey = chcr_gcm_setkey,
  3512. .setauthsize = chcr_gcm_setauthsize,
  3513. }
  3514. },
  3515. {
  3516. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
  3517. .is_registered = 0,
  3518. .alg.aead = {
  3519. .base = {
  3520. .cra_name = "rfc4106(gcm(aes))",
  3521. .cra_driver_name = "rfc4106-gcm-aes-chcr",
  3522. .cra_blocksize = 1,
  3523. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3524. .cra_ctxsize = sizeof(struct chcr_context) +
  3525. sizeof(struct chcr_aead_ctx) +
  3526. sizeof(struct chcr_gcm_ctx),
  3527. },
  3528. .ivsize = GCM_RFC4106_IV_SIZE,
  3529. .maxauthsize = GHASH_DIGEST_SIZE,
  3530. .setkey = chcr_gcm_setkey,
  3531. .setauthsize = chcr_4106_4309_setauthsize,
  3532. }
  3533. },
  3534. {
  3535. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
  3536. .is_registered = 0,
  3537. .alg.aead = {
  3538. .base = {
  3539. .cra_name = "ccm(aes)",
  3540. .cra_driver_name = "ccm-aes-chcr",
  3541. .cra_blocksize = 1,
  3542. .cra_priority = CHCR_AEAD_PRIORITY,
  3543. .cra_ctxsize = sizeof(struct chcr_context) +
  3544. sizeof(struct chcr_aead_ctx),
  3545. },
  3546. .ivsize = AES_BLOCK_SIZE,
  3547. .maxauthsize = GHASH_DIGEST_SIZE,
  3548. .setkey = chcr_aead_ccm_setkey,
  3549. .setauthsize = chcr_ccm_setauthsize,
  3550. }
  3551. },
  3552. {
  3553. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
  3554. .is_registered = 0,
  3555. .alg.aead = {
  3556. .base = {
  3557. .cra_name = "rfc4309(ccm(aes))",
  3558. .cra_driver_name = "rfc4309-ccm-aes-chcr",
  3559. .cra_blocksize = 1,
  3560. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3561. .cra_ctxsize = sizeof(struct chcr_context) +
  3562. sizeof(struct chcr_aead_ctx),
  3563. },
  3564. .ivsize = 8,
  3565. .maxauthsize = GHASH_DIGEST_SIZE,
  3566. .setkey = chcr_aead_rfc4309_setkey,
  3567. .setauthsize = chcr_4106_4309_setauthsize,
  3568. }
  3569. },
  3570. {
  3571. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3572. .is_registered = 0,
  3573. .alg.aead = {
  3574. .base = {
  3575. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  3576. .cra_driver_name =
  3577. "authenc-hmac-sha1-cbc-aes-chcr",
  3578. .cra_blocksize = AES_BLOCK_SIZE,
  3579. .cra_priority = CHCR_AEAD_PRIORITY,
  3580. .cra_ctxsize = sizeof(struct chcr_context) +
  3581. sizeof(struct chcr_aead_ctx) +
  3582. sizeof(struct chcr_authenc_ctx),
  3583. },
  3584. .ivsize = AES_BLOCK_SIZE,
  3585. .maxauthsize = SHA1_DIGEST_SIZE,
  3586. .setkey = chcr_authenc_setkey,
  3587. .setauthsize = chcr_authenc_setauthsize,
  3588. }
  3589. },
  3590. {
  3591. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3592. .is_registered = 0,
  3593. .alg.aead = {
  3594. .base = {
  3595. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  3596. .cra_driver_name =
  3597. "authenc-hmac-sha256-cbc-aes-chcr",
  3598. .cra_blocksize = AES_BLOCK_SIZE,
  3599. .cra_priority = CHCR_AEAD_PRIORITY,
  3600. .cra_ctxsize = sizeof(struct chcr_context) +
  3601. sizeof(struct chcr_aead_ctx) +
  3602. sizeof(struct chcr_authenc_ctx),
  3603. },
  3604. .ivsize = AES_BLOCK_SIZE,
  3605. .maxauthsize = SHA256_DIGEST_SIZE,
  3606. .setkey = chcr_authenc_setkey,
  3607. .setauthsize = chcr_authenc_setauthsize,
  3608. }
  3609. },
  3610. {
  3611. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3612. .is_registered = 0,
  3613. .alg.aead = {
  3614. .base = {
  3615. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  3616. .cra_driver_name =
  3617. "authenc-hmac-sha224-cbc-aes-chcr",
  3618. .cra_blocksize = AES_BLOCK_SIZE,
  3619. .cra_priority = CHCR_AEAD_PRIORITY,
  3620. .cra_ctxsize = sizeof(struct chcr_context) +
  3621. sizeof(struct chcr_aead_ctx) +
  3622. sizeof(struct chcr_authenc_ctx),
  3623. },
  3624. .ivsize = AES_BLOCK_SIZE,
  3625. .maxauthsize = SHA224_DIGEST_SIZE,
  3626. .setkey = chcr_authenc_setkey,
  3627. .setauthsize = chcr_authenc_setauthsize,
  3628. }
  3629. },
  3630. {
  3631. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3632. .is_registered = 0,
  3633. .alg.aead = {
  3634. .base = {
  3635. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  3636. .cra_driver_name =
  3637. "authenc-hmac-sha384-cbc-aes-chcr",
  3638. .cra_blocksize = AES_BLOCK_SIZE,
  3639. .cra_priority = CHCR_AEAD_PRIORITY,
  3640. .cra_ctxsize = sizeof(struct chcr_context) +
  3641. sizeof(struct chcr_aead_ctx) +
  3642. sizeof(struct chcr_authenc_ctx),
  3643. },
  3644. .ivsize = AES_BLOCK_SIZE,
  3645. .maxauthsize = SHA384_DIGEST_SIZE,
  3646. .setkey = chcr_authenc_setkey,
  3647. .setauthsize = chcr_authenc_setauthsize,
  3648. }
  3649. },
  3650. {
  3651. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3652. .is_registered = 0,
  3653. .alg.aead = {
  3654. .base = {
  3655. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  3656. .cra_driver_name =
  3657. "authenc-hmac-sha512-cbc-aes-chcr",
  3658. .cra_blocksize = AES_BLOCK_SIZE,
  3659. .cra_priority = CHCR_AEAD_PRIORITY,
  3660. .cra_ctxsize = sizeof(struct chcr_context) +
  3661. sizeof(struct chcr_aead_ctx) +
  3662. sizeof(struct chcr_authenc_ctx),
  3663. },
  3664. .ivsize = AES_BLOCK_SIZE,
  3665. .maxauthsize = SHA512_DIGEST_SIZE,
  3666. .setkey = chcr_authenc_setkey,
  3667. .setauthsize = chcr_authenc_setauthsize,
  3668. }
  3669. },
  3670. {
  3671. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
  3672. .is_registered = 0,
  3673. .alg.aead = {
  3674. .base = {
  3675. .cra_name = "authenc(digest_null,cbc(aes))",
  3676. .cra_driver_name =
  3677. "authenc-digest_null-cbc-aes-chcr",
  3678. .cra_blocksize = AES_BLOCK_SIZE,
  3679. .cra_priority = CHCR_AEAD_PRIORITY,
  3680. .cra_ctxsize = sizeof(struct chcr_context) +
  3681. sizeof(struct chcr_aead_ctx) +
  3682. sizeof(struct chcr_authenc_ctx),
  3683. },
  3684. .ivsize = AES_BLOCK_SIZE,
  3685. .maxauthsize = 0,
  3686. .setkey = chcr_aead_digest_null_setkey,
  3687. .setauthsize = chcr_authenc_null_setauthsize,
  3688. }
  3689. },
  3690. {
  3691. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3692. .is_registered = 0,
  3693. .alg.aead = {
  3694. .base = {
  3695. .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3696. .cra_driver_name =
  3697. "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
  3698. .cra_blocksize = 1,
  3699. .cra_priority = CHCR_AEAD_PRIORITY,
  3700. .cra_ctxsize = sizeof(struct chcr_context) +
  3701. sizeof(struct chcr_aead_ctx) +
  3702. sizeof(struct chcr_authenc_ctx),
  3703. },
  3704. .ivsize = CTR_RFC3686_IV_SIZE,
  3705. .maxauthsize = SHA1_DIGEST_SIZE,
  3706. .setkey = chcr_authenc_setkey,
  3707. .setauthsize = chcr_authenc_setauthsize,
  3708. }
  3709. },
  3710. {
  3711. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3712. .is_registered = 0,
  3713. .alg.aead = {
  3714. .base = {
  3715. .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3716. .cra_driver_name =
  3717. "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
  3718. .cra_blocksize = 1,
  3719. .cra_priority = CHCR_AEAD_PRIORITY,
  3720. .cra_ctxsize = sizeof(struct chcr_context) +
  3721. sizeof(struct chcr_aead_ctx) +
  3722. sizeof(struct chcr_authenc_ctx),
  3723. },
  3724. .ivsize = CTR_RFC3686_IV_SIZE,
  3725. .maxauthsize = SHA256_DIGEST_SIZE,
  3726. .setkey = chcr_authenc_setkey,
  3727. .setauthsize = chcr_authenc_setauthsize,
  3728. }
  3729. },
  3730. {
  3731. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3732. .is_registered = 0,
  3733. .alg.aead = {
  3734. .base = {
  3735. .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3736. .cra_driver_name =
  3737. "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
  3738. .cra_blocksize = 1,
  3739. .cra_priority = CHCR_AEAD_PRIORITY,
  3740. .cra_ctxsize = sizeof(struct chcr_context) +
  3741. sizeof(struct chcr_aead_ctx) +
  3742. sizeof(struct chcr_authenc_ctx),
  3743. },
  3744. .ivsize = CTR_RFC3686_IV_SIZE,
  3745. .maxauthsize = SHA224_DIGEST_SIZE,
  3746. .setkey = chcr_authenc_setkey,
  3747. .setauthsize = chcr_authenc_setauthsize,
  3748. }
  3749. },
  3750. {
  3751. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3752. .is_registered = 0,
  3753. .alg.aead = {
  3754. .base = {
  3755. .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3756. .cra_driver_name =
  3757. "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
  3758. .cra_blocksize = 1,
  3759. .cra_priority = CHCR_AEAD_PRIORITY,
  3760. .cra_ctxsize = sizeof(struct chcr_context) +
  3761. sizeof(struct chcr_aead_ctx) +
  3762. sizeof(struct chcr_authenc_ctx),
  3763. },
  3764. .ivsize = CTR_RFC3686_IV_SIZE,
  3765. .maxauthsize = SHA384_DIGEST_SIZE,
  3766. .setkey = chcr_authenc_setkey,
  3767. .setauthsize = chcr_authenc_setauthsize,
  3768. }
  3769. },
  3770. {
  3771. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3772. .is_registered = 0,
  3773. .alg.aead = {
  3774. .base = {
  3775. .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3776. .cra_driver_name =
  3777. "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
  3778. .cra_blocksize = 1,
  3779. .cra_priority = CHCR_AEAD_PRIORITY,
  3780. .cra_ctxsize = sizeof(struct chcr_context) +
  3781. sizeof(struct chcr_aead_ctx) +
  3782. sizeof(struct chcr_authenc_ctx),
  3783. },
  3784. .ivsize = CTR_RFC3686_IV_SIZE,
  3785. .maxauthsize = SHA512_DIGEST_SIZE,
  3786. .setkey = chcr_authenc_setkey,
  3787. .setauthsize = chcr_authenc_setauthsize,
  3788. }
  3789. },
  3790. {
  3791. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
  3792. .is_registered = 0,
  3793. .alg.aead = {
  3794. .base = {
  3795. .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
  3796. .cra_driver_name =
  3797. "authenc-digest_null-rfc3686-ctr-aes-chcr",
  3798. .cra_blocksize = 1,
  3799. .cra_priority = CHCR_AEAD_PRIORITY,
  3800. .cra_ctxsize = sizeof(struct chcr_context) +
  3801. sizeof(struct chcr_aead_ctx) +
  3802. sizeof(struct chcr_authenc_ctx),
  3803. },
  3804. .ivsize = CTR_RFC3686_IV_SIZE,
  3805. .maxauthsize = 0,
  3806. .setkey = chcr_aead_digest_null_setkey,
  3807. .setauthsize = chcr_authenc_null_setauthsize,
  3808. }
  3809. },
  3810. };
  3811. /*
  3812. * chcr_unregister_alg - Deregister crypto algorithms with
  3813. * kernel framework.
  3814. */
  3815. static int chcr_unregister_alg(void)
  3816. {
  3817. int i;
  3818. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3819. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3820. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3821. if (driver_algs[i].is_registered)
  3822. crypto_unregister_alg(
  3823. &driver_algs[i].alg.crypto);
  3824. break;
  3825. case CRYPTO_ALG_TYPE_AEAD:
  3826. if (driver_algs[i].is_registered)
  3827. crypto_unregister_aead(
  3828. &driver_algs[i].alg.aead);
  3829. break;
  3830. case CRYPTO_ALG_TYPE_AHASH:
  3831. if (driver_algs[i].is_registered)
  3832. crypto_unregister_ahash(
  3833. &driver_algs[i].alg.hash);
  3834. break;
  3835. }
  3836. driver_algs[i].is_registered = 0;
  3837. }
  3838. return 0;
  3839. }
  3840. #define SZ_AHASH_CTX sizeof(struct chcr_context)
  3841. #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
  3842. #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
  3843. #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
  3844. /*
  3845. * chcr_register_alg - Register crypto algorithms with kernel framework.
  3846. */
  3847. static int chcr_register_alg(void)
  3848. {
  3849. struct crypto_alg ai;
  3850. struct ahash_alg *a_hash;
  3851. int err = 0, i;
  3852. char *name = NULL;
  3853. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3854. if (driver_algs[i].is_registered)
  3855. continue;
  3856. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3857. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3858. driver_algs[i].alg.crypto.cra_priority =
  3859. CHCR_CRA_PRIORITY;
  3860. driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
  3861. driver_algs[i].alg.crypto.cra_flags =
  3862. CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  3863. CRYPTO_ALG_NEED_FALLBACK;
  3864. driver_algs[i].alg.crypto.cra_ctxsize =
  3865. sizeof(struct chcr_context) +
  3866. sizeof(struct ablk_ctx);
  3867. driver_algs[i].alg.crypto.cra_alignmask = 0;
  3868. driver_algs[i].alg.crypto.cra_type =
  3869. &crypto_ablkcipher_type;
  3870. err = crypto_register_alg(&driver_algs[i].alg.crypto);
  3871. name = driver_algs[i].alg.crypto.cra_driver_name;
  3872. break;
  3873. case CRYPTO_ALG_TYPE_AEAD:
  3874. driver_algs[i].alg.aead.base.cra_flags =
  3875. CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
  3876. CRYPTO_ALG_NEED_FALLBACK;
  3877. driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
  3878. driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
  3879. driver_algs[i].alg.aead.init = chcr_aead_cra_init;
  3880. driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
  3881. driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
  3882. err = crypto_register_aead(&driver_algs[i].alg.aead);
  3883. name = driver_algs[i].alg.aead.base.cra_driver_name;
  3884. break;
  3885. case CRYPTO_ALG_TYPE_AHASH:
  3886. a_hash = &driver_algs[i].alg.hash;
  3887. a_hash->update = chcr_ahash_update;
  3888. a_hash->final = chcr_ahash_final;
  3889. a_hash->finup = chcr_ahash_finup;
  3890. a_hash->digest = chcr_ahash_digest;
  3891. a_hash->export = chcr_ahash_export;
  3892. a_hash->import = chcr_ahash_import;
  3893. a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
  3894. a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
  3895. a_hash->halg.base.cra_module = THIS_MODULE;
  3896. a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
  3897. a_hash->halg.base.cra_alignmask = 0;
  3898. a_hash->halg.base.cra_exit = NULL;
  3899. a_hash->halg.base.cra_type = &crypto_ahash_type;
  3900. if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
  3901. a_hash->halg.base.cra_init = chcr_hmac_cra_init;
  3902. a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
  3903. a_hash->init = chcr_hmac_init;
  3904. a_hash->setkey = chcr_ahash_setkey;
  3905. a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
  3906. } else {
  3907. a_hash->init = chcr_sha_init;
  3908. a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
  3909. a_hash->halg.base.cra_init = chcr_sha_cra_init;
  3910. }
  3911. err = crypto_register_ahash(&driver_algs[i].alg.hash);
  3912. ai = driver_algs[i].alg.hash.halg.base;
  3913. name = ai.cra_driver_name;
  3914. break;
  3915. }
  3916. if (err) {
  3917. pr_err("chcr : %s : Algorithm registration failed\n",
  3918. name);
  3919. goto register_err;
  3920. } else {
  3921. driver_algs[i].is_registered = 1;
  3922. }
  3923. }
  3924. return 0;
  3925. register_err:
  3926. chcr_unregister_alg();
  3927. return err;
  3928. }
  3929. /*
  3930. * start_crypto - Register the crypto algorithms.
  3931. * This should called once when the first device comesup. After this
  3932. * kernel will start calling driver APIs for crypto operations.
  3933. */
  3934. int start_crypto(void)
  3935. {
  3936. return chcr_register_alg();
  3937. }
  3938. /*
  3939. * stop_crypto - Deregister all the crypto algorithms with kernel.
  3940. * This should be called once when the last device goes down. After this
  3941. * kernel will not call the driver API for crypto operations.
  3942. */
  3943. int stop_crypto(void)
  3944. {
  3945. chcr_unregister_alg();
  3946. return 0;
  3947. }