chcr_algo.c 123 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Manoj Malviya (manojmalviya@chelsio.com)
  36. * Atul Gupta (atul.gupta@chelsio.com)
  37. * Jitendra Lulla (jlulla@chelsio.com)
  38. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39. * Harsh Jain (harsh@chelsio.com)
  40. */
  41. #define pr_fmt(fmt) "chcr:" fmt
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/crypto.h>
  45. #include <linux/cryptohash.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/rtnetlink.h>
  48. #include <linux/highmem.h>
  49. #include <linux/scatterlist.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/gcm.h>
  54. #include <crypto/sha.h>
  55. #include <crypto/authenc.h>
  56. #include <crypto/ctr.h>
  57. #include <crypto/gf128mul.h>
  58. #include <crypto/internal/aead.h>
  59. #include <crypto/null.h>
  60. #include <crypto/internal/skcipher.h>
  61. #include <crypto/aead.h>
  62. #include <crypto/scatterwalk.h>
  63. #include <crypto/internal/hash.h>
  64. #include "t4fw_api.h"
  65. #include "t4_msg.h"
  66. #include "chcr_core.h"
  67. #include "chcr_algo.h"
  68. #include "chcr_crypto.h"
  69. #define IV AES_BLOCK_SIZE
  70. static unsigned int sgl_ent_len[] = {
  71. 0, 0, 16, 24, 40, 48, 64, 72, 88,
  72. 96, 112, 120, 136, 144, 160, 168, 184,
  73. 192, 208, 216, 232, 240, 256, 264, 280,
  74. 288, 304, 312, 328, 336, 352, 360, 376
  75. };
  76. static unsigned int dsgl_ent_len[] = {
  77. 0, 32, 32, 48, 48, 64, 64, 80, 80,
  78. 112, 112, 128, 128, 144, 144, 160, 160,
  79. 192, 192, 208, 208, 224, 224, 240, 240,
  80. 272, 272, 288, 288, 304, 304, 320, 320
  81. };
  82. static u32 round_constant[11] = {
  83. 0x01000000, 0x02000000, 0x04000000, 0x08000000,
  84. 0x10000000, 0x20000000, 0x40000000, 0x80000000,
  85. 0x1B000000, 0x36000000, 0x6C000000
  86. };
  87. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  88. unsigned char *input, int err);
  89. static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  90. {
  91. return ctx->crypto_ctx->aeadctx;
  92. }
  93. static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  94. {
  95. return ctx->crypto_ctx->ablkctx;
  96. }
  97. static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  98. {
  99. return ctx->crypto_ctx->hmacctx;
  100. }
  101. static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  102. {
  103. return gctx->ctx->gcm;
  104. }
  105. static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  106. {
  107. return gctx->ctx->authenc;
  108. }
  109. static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  110. {
  111. return ctx->dev->u_ctx;
  112. }
  113. static inline int is_ofld_imm(const struct sk_buff *skb)
  114. {
  115. return (skb->len <= SGE_MAX_WR_LEN);
  116. }
  117. static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
  118. {
  119. memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
  120. }
  121. static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
  122. unsigned int entlen,
  123. unsigned int skip)
  124. {
  125. int nents = 0;
  126. unsigned int less;
  127. unsigned int skip_len = 0;
  128. while (sg && skip) {
  129. if (sg_dma_len(sg) <= skip) {
  130. skip -= sg_dma_len(sg);
  131. skip_len = 0;
  132. sg = sg_next(sg);
  133. } else {
  134. skip_len = skip;
  135. skip = 0;
  136. }
  137. }
  138. while (sg && reqlen) {
  139. less = min(reqlen, sg_dma_len(sg) - skip_len);
  140. nents += DIV_ROUND_UP(less, entlen);
  141. reqlen -= less;
  142. skip_len = 0;
  143. sg = sg_next(sg);
  144. }
  145. return nents;
  146. }
  147. static inline int get_aead_subtype(struct crypto_aead *aead)
  148. {
  149. struct aead_alg *alg = crypto_aead_alg(aead);
  150. struct chcr_alg_template *chcr_crypto_alg =
  151. container_of(alg, struct chcr_alg_template, alg.aead);
  152. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  153. }
  154. void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
  155. {
  156. u8 temp[SHA512_DIGEST_SIZE];
  157. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  158. int authsize = crypto_aead_authsize(tfm);
  159. struct cpl_fw6_pld *fw6_pld;
  160. int cmp = 0;
  161. fw6_pld = (struct cpl_fw6_pld *)input;
  162. if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
  163. (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
  164. cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
  165. } else {
  166. sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
  167. authsize, req->assoclen +
  168. req->cryptlen - authsize);
  169. cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
  170. }
  171. if (cmp)
  172. *err = -EBADMSG;
  173. else
  174. *err = 0;
  175. }
  176. static inline void chcr_handle_aead_resp(struct aead_request *req,
  177. unsigned char *input,
  178. int err)
  179. {
  180. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  181. chcr_aead_common_exit(req);
  182. if (reqctx->verify == VERIFY_SW) {
  183. chcr_verify_tag(req, input, &err);
  184. reqctx->verify = VERIFY_HW;
  185. }
  186. req->base.complete(&req->base, err);
  187. }
  188. static void get_aes_decrypt_key(unsigned char *dec_key,
  189. const unsigned char *key,
  190. unsigned int keylength)
  191. {
  192. u32 temp;
  193. u32 w_ring[MAX_NK];
  194. int i, j, k;
  195. u8 nr, nk;
  196. switch (keylength) {
  197. case AES_KEYLENGTH_128BIT:
  198. nk = KEYLENGTH_4BYTES;
  199. nr = NUMBER_OF_ROUNDS_10;
  200. break;
  201. case AES_KEYLENGTH_192BIT:
  202. nk = KEYLENGTH_6BYTES;
  203. nr = NUMBER_OF_ROUNDS_12;
  204. break;
  205. case AES_KEYLENGTH_256BIT:
  206. nk = KEYLENGTH_8BYTES;
  207. nr = NUMBER_OF_ROUNDS_14;
  208. break;
  209. default:
  210. return;
  211. }
  212. for (i = 0; i < nk; i++)
  213. w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
  214. i = 0;
  215. temp = w_ring[nk - 1];
  216. while (i + nk < (nr + 1) * 4) {
  217. if (!(i % nk)) {
  218. /* RotWord(temp) */
  219. temp = (temp << 8) | (temp >> 24);
  220. temp = aes_ks_subword(temp);
  221. temp ^= round_constant[i / nk];
  222. } else if (nk == 8 && (i % 4 == 0)) {
  223. temp = aes_ks_subword(temp);
  224. }
  225. w_ring[i % nk] ^= temp;
  226. temp = w_ring[i % nk];
  227. i++;
  228. }
  229. i--;
  230. for (k = 0, j = i % nk; k < nk; k++) {
  231. *((u32 *)dec_key + k) = htonl(w_ring[j]);
  232. j--;
  233. if (j < 0)
  234. j += nk;
  235. }
  236. }
  237. static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  238. {
  239. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  240. switch (ds) {
  241. case SHA1_DIGEST_SIZE:
  242. base_hash = crypto_alloc_shash("sha1", 0, 0);
  243. break;
  244. case SHA224_DIGEST_SIZE:
  245. base_hash = crypto_alloc_shash("sha224", 0, 0);
  246. break;
  247. case SHA256_DIGEST_SIZE:
  248. base_hash = crypto_alloc_shash("sha256", 0, 0);
  249. break;
  250. case SHA384_DIGEST_SIZE:
  251. base_hash = crypto_alloc_shash("sha384", 0, 0);
  252. break;
  253. case SHA512_DIGEST_SIZE:
  254. base_hash = crypto_alloc_shash("sha512", 0, 0);
  255. break;
  256. }
  257. return base_hash;
  258. }
  259. static int chcr_compute_partial_hash(struct shash_desc *desc,
  260. char *iopad, char *result_hash,
  261. int digest_size)
  262. {
  263. struct sha1_state sha1_st;
  264. struct sha256_state sha256_st;
  265. struct sha512_state sha512_st;
  266. int error;
  267. if (digest_size == SHA1_DIGEST_SIZE) {
  268. error = crypto_shash_init(desc) ?:
  269. crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
  270. crypto_shash_export(desc, (void *)&sha1_st);
  271. memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
  272. } else if (digest_size == SHA224_DIGEST_SIZE) {
  273. error = crypto_shash_init(desc) ?:
  274. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  275. crypto_shash_export(desc, (void *)&sha256_st);
  276. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  277. } else if (digest_size == SHA256_DIGEST_SIZE) {
  278. error = crypto_shash_init(desc) ?:
  279. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  280. crypto_shash_export(desc, (void *)&sha256_st);
  281. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  282. } else if (digest_size == SHA384_DIGEST_SIZE) {
  283. error = crypto_shash_init(desc) ?:
  284. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  285. crypto_shash_export(desc, (void *)&sha512_st);
  286. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  287. } else if (digest_size == SHA512_DIGEST_SIZE) {
  288. error = crypto_shash_init(desc) ?:
  289. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  290. crypto_shash_export(desc, (void *)&sha512_st);
  291. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  292. } else {
  293. error = -EINVAL;
  294. pr_err("Unknown digest size %d\n", digest_size);
  295. }
  296. return error;
  297. }
  298. static void chcr_change_order(char *buf, int ds)
  299. {
  300. int i;
  301. if (ds == SHA512_DIGEST_SIZE) {
  302. for (i = 0; i < (ds / sizeof(u64)); i++)
  303. *((__be64 *)buf + i) =
  304. cpu_to_be64(*((u64 *)buf + i));
  305. } else {
  306. for (i = 0; i < (ds / sizeof(u32)); i++)
  307. *((__be32 *)buf + i) =
  308. cpu_to_be32(*((u32 *)buf + i));
  309. }
  310. }
  311. static inline int is_hmac(struct crypto_tfm *tfm)
  312. {
  313. struct crypto_alg *alg = tfm->__crt_alg;
  314. struct chcr_alg_template *chcr_crypto_alg =
  315. container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
  316. alg.hash);
  317. if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
  318. return 1;
  319. return 0;
  320. }
  321. static inline void dsgl_walk_init(struct dsgl_walk *walk,
  322. struct cpl_rx_phys_dsgl *dsgl)
  323. {
  324. walk->dsgl = dsgl;
  325. walk->nents = 0;
  326. walk->to = (struct phys_sge_pairs *)(dsgl + 1);
  327. }
  328. static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
  329. int pci_chan_id)
  330. {
  331. struct cpl_rx_phys_dsgl *phys_cpl;
  332. phys_cpl = walk->dsgl;
  333. phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
  334. | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
  335. phys_cpl->pcirlxorder_to_noofsgentr =
  336. htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
  337. CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
  338. CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
  339. CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
  340. CPL_RX_PHYS_DSGL_DCAID_V(0) |
  341. CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
  342. phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
  343. phys_cpl->rss_hdr_int.qid = htons(qid);
  344. phys_cpl->rss_hdr_int.hash_val = 0;
  345. phys_cpl->rss_hdr_int.channel = pci_chan_id;
  346. }
  347. static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
  348. size_t size,
  349. dma_addr_t *addr)
  350. {
  351. int j;
  352. if (!size)
  353. return;
  354. j = walk->nents;
  355. walk->to->len[j % 8] = htons(size);
  356. walk->to->addr[j % 8] = cpu_to_be64(*addr);
  357. j++;
  358. if ((j % 8) == 0)
  359. walk->to++;
  360. walk->nents = j;
  361. }
  362. static void dsgl_walk_add_sg(struct dsgl_walk *walk,
  363. struct scatterlist *sg,
  364. unsigned int slen,
  365. unsigned int skip)
  366. {
  367. int skip_len = 0;
  368. unsigned int left_size = slen, len = 0;
  369. unsigned int j = walk->nents;
  370. int offset, ent_len;
  371. if (!slen)
  372. return;
  373. while (sg && skip) {
  374. if (sg_dma_len(sg) <= skip) {
  375. skip -= sg_dma_len(sg);
  376. skip_len = 0;
  377. sg = sg_next(sg);
  378. } else {
  379. skip_len = skip;
  380. skip = 0;
  381. }
  382. }
  383. while (left_size && sg) {
  384. len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  385. offset = 0;
  386. while (len) {
  387. ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
  388. walk->to->len[j % 8] = htons(ent_len);
  389. walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
  390. offset + skip_len);
  391. offset += ent_len;
  392. len -= ent_len;
  393. j++;
  394. if ((j % 8) == 0)
  395. walk->to++;
  396. }
  397. walk->last_sg = sg;
  398. walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
  399. skip_len) + skip_len;
  400. left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  401. skip_len = 0;
  402. sg = sg_next(sg);
  403. }
  404. walk->nents = j;
  405. }
  406. static inline void ulptx_walk_init(struct ulptx_walk *walk,
  407. struct ulptx_sgl *ulp)
  408. {
  409. walk->sgl = ulp;
  410. walk->nents = 0;
  411. walk->pair_idx = 0;
  412. walk->pair = ulp->sge;
  413. walk->last_sg = NULL;
  414. walk->last_sg_len = 0;
  415. }
  416. static inline void ulptx_walk_end(struct ulptx_walk *walk)
  417. {
  418. walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  419. ULPTX_NSGE_V(walk->nents));
  420. }
  421. static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
  422. size_t size,
  423. dma_addr_t *addr)
  424. {
  425. if (!size)
  426. return;
  427. if (walk->nents == 0) {
  428. walk->sgl->len0 = cpu_to_be32(size);
  429. walk->sgl->addr0 = cpu_to_be64(*addr);
  430. } else {
  431. walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
  432. walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
  433. walk->pair_idx = !walk->pair_idx;
  434. if (!walk->pair_idx)
  435. walk->pair++;
  436. }
  437. walk->nents++;
  438. }
  439. static void ulptx_walk_add_sg(struct ulptx_walk *walk,
  440. struct scatterlist *sg,
  441. unsigned int len,
  442. unsigned int skip)
  443. {
  444. int small;
  445. int skip_len = 0;
  446. unsigned int sgmin;
  447. if (!len)
  448. return;
  449. while (sg && skip) {
  450. if (sg_dma_len(sg) <= skip) {
  451. skip -= sg_dma_len(sg);
  452. skip_len = 0;
  453. sg = sg_next(sg);
  454. } else {
  455. skip_len = skip;
  456. skip = 0;
  457. }
  458. }
  459. WARN(!sg, "SG should not be null here\n");
  460. if (sg && (walk->nents == 0)) {
  461. small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
  462. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  463. walk->sgl->len0 = cpu_to_be32(sgmin);
  464. walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
  465. walk->nents++;
  466. len -= sgmin;
  467. walk->last_sg = sg;
  468. walk->last_sg_len = sgmin + skip_len;
  469. skip_len += sgmin;
  470. if (sg_dma_len(sg) == skip_len) {
  471. sg = sg_next(sg);
  472. skip_len = 0;
  473. }
  474. }
  475. while (sg && len) {
  476. small = min(sg_dma_len(sg) - skip_len, len);
  477. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  478. walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
  479. walk->pair->addr[walk->pair_idx] =
  480. cpu_to_be64(sg_dma_address(sg) + skip_len);
  481. walk->pair_idx = !walk->pair_idx;
  482. walk->nents++;
  483. if (!walk->pair_idx)
  484. walk->pair++;
  485. len -= sgmin;
  486. skip_len += sgmin;
  487. walk->last_sg = sg;
  488. walk->last_sg_len = skip_len;
  489. if (sg_dma_len(sg) == skip_len) {
  490. sg = sg_next(sg);
  491. skip_len = 0;
  492. }
  493. }
  494. }
  495. static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  496. {
  497. struct crypto_alg *alg = tfm->__crt_alg;
  498. struct chcr_alg_template *chcr_crypto_alg =
  499. container_of(alg, struct chcr_alg_template, alg.crypto);
  500. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  501. }
  502. static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  503. {
  504. struct adapter *adap = netdev2adap(dev);
  505. struct sge_uld_txq_info *txq_info =
  506. adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
  507. struct sge_uld_txq *txq;
  508. int ret = 0;
  509. local_bh_disable();
  510. txq = &txq_info->uldtxq[idx];
  511. spin_lock(&txq->sendq.lock);
  512. if (txq->full)
  513. ret = -1;
  514. spin_unlock(&txq->sendq.lock);
  515. local_bh_enable();
  516. return ret;
  517. }
  518. static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
  519. struct _key_ctx *key_ctx)
  520. {
  521. if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
  522. memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
  523. } else {
  524. memcpy(key_ctx->key,
  525. ablkctx->key + (ablkctx->enckey_len >> 1),
  526. ablkctx->enckey_len >> 1);
  527. memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
  528. ablkctx->rrkey, ablkctx->enckey_len >> 1);
  529. }
  530. return 0;
  531. }
  532. static int chcr_hash_ent_in_wr(struct scatterlist *src,
  533. unsigned int minsg,
  534. unsigned int space,
  535. unsigned int srcskip)
  536. {
  537. int srclen = 0;
  538. int srcsg = minsg;
  539. int soffset = 0, sless;
  540. if (sg_dma_len(src) == srcskip) {
  541. src = sg_next(src);
  542. srcskip = 0;
  543. }
  544. while (src && space > (sgl_ent_len[srcsg + 1])) {
  545. sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
  546. CHCR_SRC_SG_SIZE);
  547. srclen += sless;
  548. soffset += sless;
  549. srcsg++;
  550. if (sg_dma_len(src) == (soffset + srcskip)) {
  551. src = sg_next(src);
  552. soffset = 0;
  553. srcskip = 0;
  554. }
  555. }
  556. return srclen;
  557. }
  558. static int chcr_sg_ent_in_wr(struct scatterlist *src,
  559. struct scatterlist *dst,
  560. unsigned int minsg,
  561. unsigned int space,
  562. unsigned int srcskip,
  563. unsigned int dstskip)
  564. {
  565. int srclen = 0, dstlen = 0;
  566. int srcsg = minsg, dstsg = minsg;
  567. int offset = 0, soffset = 0, less, sless = 0;
  568. if (sg_dma_len(src) == srcskip) {
  569. src = sg_next(src);
  570. srcskip = 0;
  571. }
  572. if (sg_dma_len(dst) == dstskip) {
  573. dst = sg_next(dst);
  574. dstskip = 0;
  575. }
  576. while (src && dst &&
  577. space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
  578. sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
  579. CHCR_SRC_SG_SIZE);
  580. srclen += sless;
  581. srcsg++;
  582. offset = 0;
  583. while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
  584. space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
  585. if (srclen <= dstlen)
  586. break;
  587. less = min_t(unsigned int, sg_dma_len(dst) - offset -
  588. dstskip, CHCR_DST_SG_SIZE);
  589. dstlen += less;
  590. offset += less;
  591. if ((offset + dstskip) == sg_dma_len(dst)) {
  592. dst = sg_next(dst);
  593. offset = 0;
  594. }
  595. dstsg++;
  596. dstskip = 0;
  597. }
  598. soffset += sless;
  599. if ((soffset + srcskip) == sg_dma_len(src)) {
  600. src = sg_next(src);
  601. srcskip = 0;
  602. soffset = 0;
  603. }
  604. }
  605. return min(srclen, dstlen);
  606. }
  607. static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
  608. u32 flags,
  609. struct scatterlist *src,
  610. struct scatterlist *dst,
  611. unsigned int nbytes,
  612. u8 *iv,
  613. unsigned short op_type)
  614. {
  615. int err;
  616. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
  617. skcipher_request_set_sync_tfm(subreq, cipher);
  618. skcipher_request_set_callback(subreq, flags, NULL, NULL);
  619. skcipher_request_set_crypt(subreq, src, dst,
  620. nbytes, iv);
  621. err = op_type ? crypto_skcipher_decrypt(subreq) :
  622. crypto_skcipher_encrypt(subreq);
  623. skcipher_request_zero(subreq);
  624. return err;
  625. }
  626. static inline void create_wreq(struct chcr_context *ctx,
  627. struct chcr_wr *chcr_req,
  628. struct crypto_async_request *req,
  629. unsigned int imm,
  630. int hash_sz,
  631. unsigned int len16,
  632. unsigned int sc_len,
  633. unsigned int lcb)
  634. {
  635. struct uld_ctx *u_ctx = ULD_CTX(ctx);
  636. int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
  637. chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
  638. chcr_req->wreq.pld_size_hash_size =
  639. htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
  640. chcr_req->wreq.len16_pkd =
  641. htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
  642. chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
  643. chcr_req->wreq.rx_chid_to_rx_q_id =
  644. FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
  645. !!lcb, ctx->tx_qidx);
  646. chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
  647. qid);
  648. chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
  649. ((sizeof(chcr_req->wreq)) >> 4)));
  650. chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
  651. chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  652. sizeof(chcr_req->key_ctx) + sc_len);
  653. }
  654. /**
  655. * create_cipher_wr - form the WR for cipher operations
  656. * @req: cipher req.
  657. * @ctx: crypto driver context of the request.
  658. * @qid: ingress qid where response of this WR should be received.
  659. * @op_type: encryption or decryption
  660. */
  661. static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
  662. {
  663. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  664. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  665. struct sk_buff *skb = NULL;
  666. struct chcr_wr *chcr_req;
  667. struct cpl_rx_phys_dsgl *phys_cpl;
  668. struct ulptx_sgl *ulptx;
  669. struct chcr_blkcipher_req_ctx *reqctx =
  670. ablkcipher_request_ctx(wrparam->req);
  671. unsigned int temp = 0, transhdr_len, dst_size;
  672. int error;
  673. int nents;
  674. unsigned int kctx_len;
  675. gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  676. GFP_KERNEL : GFP_ATOMIC;
  677. struct adapter *adap = padap(c_ctx(tfm)->dev);
  678. nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
  679. reqctx->dst_ofst);
  680. dst_size = get_space_for_phys_dsgl(nents);
  681. kctx_len = roundup(ablkctx->enckey_len, 16);
  682. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  683. nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
  684. CHCR_SRC_SG_SIZE, reqctx->src_ofst);
  685. temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
  686. (sgl_len(nents) * 8);
  687. transhdr_len += temp;
  688. transhdr_len = roundup(transhdr_len, 16);
  689. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  690. if (!skb) {
  691. error = -ENOMEM;
  692. goto err;
  693. }
  694. chcr_req = __skb_put_zero(skb, transhdr_len);
  695. chcr_req->sec_cpl.op_ivinsrtofst =
  696. FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
  697. chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
  698. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  699. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
  700. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  701. FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
  702. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
  703. ablkctx->ciph_mode,
  704. 0, 0, IV >> 1);
  705. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
  706. 0, 1, dst_size);
  707. chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
  708. if ((reqctx->op == CHCR_DECRYPT_OP) &&
  709. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  710. CRYPTO_ALG_SUB_TYPE_CTR)) &&
  711. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  712. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
  713. generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
  714. } else {
  715. if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
  716. (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
  717. memcpy(chcr_req->key_ctx.key, ablkctx->key,
  718. ablkctx->enckey_len);
  719. } else {
  720. memcpy(chcr_req->key_ctx.key, ablkctx->key +
  721. (ablkctx->enckey_len >> 1),
  722. ablkctx->enckey_len >> 1);
  723. memcpy(chcr_req->key_ctx.key +
  724. (ablkctx->enckey_len >> 1),
  725. ablkctx->key,
  726. ablkctx->enckey_len >> 1);
  727. }
  728. }
  729. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  730. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  731. chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
  732. chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
  733. atomic_inc(&adap->chcr_stats.cipher_rqst);
  734. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
  735. + (reqctx->imm ? (wrparam->bytes) : 0);
  736. create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
  737. transhdr_len, temp,
  738. ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
  739. reqctx->skb = skb;
  740. if (reqctx->op && (ablkctx->ciph_mode ==
  741. CHCR_SCMD_CIPHER_MODE_AES_CBC))
  742. sg_pcopy_to_buffer(wrparam->req->src,
  743. sg_nents(wrparam->req->src), wrparam->req->info, 16,
  744. reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
  745. return skb;
  746. err:
  747. return ERR_PTR(error);
  748. }
  749. static inline int chcr_keyctx_ck_size(unsigned int keylen)
  750. {
  751. int ck_size = 0;
  752. if (keylen == AES_KEYSIZE_128)
  753. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  754. else if (keylen == AES_KEYSIZE_192)
  755. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  756. else if (keylen == AES_KEYSIZE_256)
  757. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  758. else
  759. ck_size = 0;
  760. return ck_size;
  761. }
  762. static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
  763. const u8 *key,
  764. unsigned int keylen)
  765. {
  766. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  767. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  768. int err = 0;
  769. crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
  770. CRYPTO_TFM_REQ_MASK);
  771. crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
  772. cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  773. err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
  774. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  775. tfm->crt_flags |=
  776. crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
  777. CRYPTO_TFM_RES_MASK;
  778. return err;
  779. }
  780. static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
  781. const u8 *key,
  782. unsigned int keylen)
  783. {
  784. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  785. unsigned int ck_size, context_size;
  786. u16 alignment = 0;
  787. int err;
  788. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  789. if (err)
  790. goto badkey_err;
  791. ck_size = chcr_keyctx_ck_size(keylen);
  792. alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
  793. memcpy(ablkctx->key, key, keylen);
  794. ablkctx->enckey_len = keylen;
  795. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
  796. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  797. keylen + alignment) >> 4;
  798. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  799. 0, 0, context_size);
  800. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  801. return 0;
  802. badkey_err:
  803. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  804. ablkctx->enckey_len = 0;
  805. return err;
  806. }
  807. static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
  808. const u8 *key,
  809. unsigned int keylen)
  810. {
  811. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  812. unsigned int ck_size, context_size;
  813. u16 alignment = 0;
  814. int err;
  815. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  816. if (err)
  817. goto badkey_err;
  818. ck_size = chcr_keyctx_ck_size(keylen);
  819. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  820. memcpy(ablkctx->key, key, keylen);
  821. ablkctx->enckey_len = keylen;
  822. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  823. keylen + alignment) >> 4;
  824. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  825. 0, 0, context_size);
  826. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  827. return 0;
  828. badkey_err:
  829. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  830. ablkctx->enckey_len = 0;
  831. return err;
  832. }
  833. static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
  834. const u8 *key,
  835. unsigned int keylen)
  836. {
  837. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  838. unsigned int ck_size, context_size;
  839. u16 alignment = 0;
  840. int err;
  841. if (keylen < CTR_RFC3686_NONCE_SIZE)
  842. return -EINVAL;
  843. memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  844. CTR_RFC3686_NONCE_SIZE);
  845. keylen -= CTR_RFC3686_NONCE_SIZE;
  846. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  847. if (err)
  848. goto badkey_err;
  849. ck_size = chcr_keyctx_ck_size(keylen);
  850. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  851. memcpy(ablkctx->key, key, keylen);
  852. ablkctx->enckey_len = keylen;
  853. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  854. keylen + alignment) >> 4;
  855. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  856. 0, 0, context_size);
  857. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  858. return 0;
  859. badkey_err:
  860. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  861. ablkctx->enckey_len = 0;
  862. return err;
  863. }
  864. static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
  865. {
  866. unsigned int size = AES_BLOCK_SIZE;
  867. __be32 *b = (__be32 *)(dstiv + size);
  868. u32 c, prev;
  869. memcpy(dstiv, srciv, AES_BLOCK_SIZE);
  870. for (; size >= 4; size -= 4) {
  871. prev = be32_to_cpu(*--b);
  872. c = prev + add;
  873. *b = cpu_to_be32(c);
  874. if (prev < c)
  875. break;
  876. add = 1;
  877. }
  878. }
  879. static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
  880. {
  881. __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
  882. u64 c;
  883. u32 temp = be32_to_cpu(*--b);
  884. temp = ~temp;
  885. c = (u64)temp + 1; // No of block can processed withou overflow
  886. if ((bytes / AES_BLOCK_SIZE) > c)
  887. bytes = c * AES_BLOCK_SIZE;
  888. return bytes;
  889. }
  890. static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
  891. u32 isfinal)
  892. {
  893. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  894. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  895. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  896. struct crypto_cipher *cipher;
  897. int ret, i;
  898. u8 *key;
  899. unsigned int keylen;
  900. int round = reqctx->last_req_len / AES_BLOCK_SIZE;
  901. int round8 = round / 8;
  902. cipher = ablkctx->aes_generic;
  903. memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
  904. keylen = ablkctx->enckey_len / 2;
  905. key = ablkctx->key + keylen;
  906. ret = crypto_cipher_setkey(cipher, key, keylen);
  907. if (ret)
  908. goto out;
  909. crypto_cipher_encrypt_one(cipher, iv, iv);
  910. for (i = 0; i < round8; i++)
  911. gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
  912. for (i = 0; i < (round % 8); i++)
  913. gf128mul_x_ble((le128 *)iv, (le128 *)iv);
  914. if (!isfinal)
  915. crypto_cipher_decrypt_one(cipher, iv, iv);
  916. out:
  917. return ret;
  918. }
  919. static int chcr_update_cipher_iv(struct ablkcipher_request *req,
  920. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  921. {
  922. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  923. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  924. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  925. int ret = 0;
  926. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  927. ctr_add_iv(iv, req->info, (reqctx->processed /
  928. AES_BLOCK_SIZE));
  929. else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
  930. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  931. CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
  932. AES_BLOCK_SIZE) + 1);
  933. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  934. ret = chcr_update_tweak(req, iv, 0);
  935. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  936. if (reqctx->op)
  937. /*Updated before sending last WR*/
  938. memcpy(iv, req->info, AES_BLOCK_SIZE);
  939. else
  940. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  941. }
  942. return ret;
  943. }
  944. /* We need separate function for final iv because in rfc3686 Initial counter
  945. * starts from 1 and buffer size of iv is 8 byte only which remains constant
  946. * for subsequent update requests
  947. */
  948. static int chcr_final_cipher_iv(struct ablkcipher_request *req,
  949. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  950. {
  951. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  952. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  953. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  954. int ret = 0;
  955. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  956. ctr_add_iv(iv, req->info, (reqctx->processed /
  957. AES_BLOCK_SIZE));
  958. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  959. ret = chcr_update_tweak(req, iv, 1);
  960. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  961. /*Already updated for Decrypt*/
  962. if (!reqctx->op)
  963. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  964. }
  965. return ret;
  966. }
  967. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  968. unsigned char *input, int err)
  969. {
  970. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  971. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  972. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  973. struct sk_buff *skb;
  974. struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
  975. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  976. struct cipher_wr_param wrparam;
  977. int bytes;
  978. if (err)
  979. goto unmap;
  980. if (req->nbytes == reqctx->processed) {
  981. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  982. req);
  983. err = chcr_final_cipher_iv(req, fw6_pld, req->info);
  984. goto complete;
  985. }
  986. if (!reqctx->imm) {
  987. bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
  988. CIP_SPACE_LEFT(ablkctx->enckey_len),
  989. reqctx->src_ofst, reqctx->dst_ofst);
  990. if ((bytes + reqctx->processed) >= req->nbytes)
  991. bytes = req->nbytes - reqctx->processed;
  992. else
  993. bytes = rounddown(bytes, 16);
  994. } else {
  995. /*CTR mode counter overfloa*/
  996. bytes = req->nbytes - reqctx->processed;
  997. }
  998. err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
  999. if (err)
  1000. goto unmap;
  1001. if (unlikely(bytes == 0)) {
  1002. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1003. req);
  1004. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1005. req->base.flags,
  1006. req->src,
  1007. req->dst,
  1008. req->nbytes,
  1009. req->info,
  1010. reqctx->op);
  1011. goto complete;
  1012. }
  1013. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1014. CRYPTO_ALG_SUB_TYPE_CTR)
  1015. bytes = adjust_ctr_overflow(reqctx->iv, bytes);
  1016. wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
  1017. wrparam.req = req;
  1018. wrparam.bytes = bytes;
  1019. skb = create_cipher_wr(&wrparam);
  1020. if (IS_ERR(skb)) {
  1021. pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
  1022. err = PTR_ERR(skb);
  1023. goto unmap;
  1024. }
  1025. skb->dev = u_ctx->lldi.ports[0];
  1026. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1027. chcr_send_wr(skb);
  1028. reqctx->last_req_len = bytes;
  1029. reqctx->processed += bytes;
  1030. return 0;
  1031. unmap:
  1032. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1033. complete:
  1034. req->base.complete(&req->base, err);
  1035. return err;
  1036. }
  1037. static int process_cipher(struct ablkcipher_request *req,
  1038. unsigned short qid,
  1039. struct sk_buff **skb,
  1040. unsigned short op_type)
  1041. {
  1042. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1043. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  1044. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  1045. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  1046. struct cipher_wr_param wrparam;
  1047. int bytes, err = -EINVAL;
  1048. reqctx->processed = 0;
  1049. if (!req->info)
  1050. goto error;
  1051. if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
  1052. (req->nbytes == 0) ||
  1053. (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
  1054. pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
  1055. ablkctx->enckey_len, req->nbytes, ivsize);
  1056. goto error;
  1057. }
  1058. chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1059. if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
  1060. AES_MIN_KEY_SIZE +
  1061. sizeof(struct cpl_rx_phys_dsgl) +
  1062. /*Min dsgl size*/
  1063. 32))) {
  1064. /* Can be sent as Imm*/
  1065. unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
  1066. dnents = sg_nents_xlen(req->dst, req->nbytes,
  1067. CHCR_DST_SG_SIZE, 0);
  1068. phys_dsgl = get_space_for_phys_dsgl(dnents);
  1069. kctx_len = roundup(ablkctx->enckey_len, 16);
  1070. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
  1071. reqctx->imm = (transhdr_len + IV + req->nbytes) <=
  1072. SGE_MAX_WR_LEN;
  1073. bytes = IV + req->nbytes;
  1074. } else {
  1075. reqctx->imm = 0;
  1076. }
  1077. if (!reqctx->imm) {
  1078. bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
  1079. CIP_SPACE_LEFT(ablkctx->enckey_len),
  1080. 0, 0);
  1081. if ((bytes + reqctx->processed) >= req->nbytes)
  1082. bytes = req->nbytes - reqctx->processed;
  1083. else
  1084. bytes = rounddown(bytes, 16);
  1085. } else {
  1086. bytes = req->nbytes;
  1087. }
  1088. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1089. CRYPTO_ALG_SUB_TYPE_CTR) {
  1090. bytes = adjust_ctr_overflow(req->info, bytes);
  1091. }
  1092. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1093. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
  1094. memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
  1095. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
  1096. CTR_RFC3686_IV_SIZE);
  1097. /* initialize counter portion of counter block */
  1098. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  1099. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1100. } else {
  1101. memcpy(reqctx->iv, req->info, IV);
  1102. }
  1103. if (unlikely(bytes == 0)) {
  1104. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1105. req);
  1106. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1107. req->base.flags,
  1108. req->src,
  1109. req->dst,
  1110. req->nbytes,
  1111. reqctx->iv,
  1112. op_type);
  1113. goto error;
  1114. }
  1115. reqctx->op = op_type;
  1116. reqctx->srcsg = req->src;
  1117. reqctx->dstsg = req->dst;
  1118. reqctx->src_ofst = 0;
  1119. reqctx->dst_ofst = 0;
  1120. wrparam.qid = qid;
  1121. wrparam.req = req;
  1122. wrparam.bytes = bytes;
  1123. *skb = create_cipher_wr(&wrparam);
  1124. if (IS_ERR(*skb)) {
  1125. err = PTR_ERR(*skb);
  1126. goto unmap;
  1127. }
  1128. reqctx->processed = bytes;
  1129. reqctx->last_req_len = bytes;
  1130. return 0;
  1131. unmap:
  1132. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1133. error:
  1134. return err;
  1135. }
  1136. static int chcr_aes_encrypt(struct ablkcipher_request *req)
  1137. {
  1138. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1139. struct sk_buff *skb = NULL;
  1140. int err, isfull = 0;
  1141. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1142. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1143. c_ctx(tfm)->tx_qidx))) {
  1144. isfull = 1;
  1145. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1146. return -ENOSPC;
  1147. }
  1148. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1149. &skb, CHCR_ENCRYPT_OP);
  1150. if (err || !skb)
  1151. return err;
  1152. skb->dev = u_ctx->lldi.ports[0];
  1153. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1154. chcr_send_wr(skb);
  1155. return isfull ? -EBUSY : -EINPROGRESS;
  1156. }
  1157. static int chcr_aes_decrypt(struct ablkcipher_request *req)
  1158. {
  1159. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1160. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1161. struct sk_buff *skb = NULL;
  1162. int err, isfull = 0;
  1163. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1164. c_ctx(tfm)->tx_qidx))) {
  1165. isfull = 1;
  1166. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1167. return -ENOSPC;
  1168. }
  1169. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1170. &skb, CHCR_DECRYPT_OP);
  1171. if (err || !skb)
  1172. return err;
  1173. skb->dev = u_ctx->lldi.ports[0];
  1174. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1175. chcr_send_wr(skb);
  1176. return isfull ? -EBUSY : -EINPROGRESS;
  1177. }
  1178. static int chcr_device_init(struct chcr_context *ctx)
  1179. {
  1180. struct uld_ctx *u_ctx = NULL;
  1181. struct adapter *adap;
  1182. unsigned int id;
  1183. int txq_perchan, txq_idx, ntxq;
  1184. int err = 0, rxq_perchan, rxq_idx;
  1185. id = smp_processor_id();
  1186. if (!ctx->dev) {
  1187. u_ctx = assign_chcr_device();
  1188. if (!u_ctx) {
  1189. pr_err("chcr device assignment fails\n");
  1190. goto out;
  1191. }
  1192. ctx->dev = u_ctx->dev;
  1193. adap = padap(ctx->dev);
  1194. ntxq = u_ctx->lldi.ntxq;
  1195. rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
  1196. txq_perchan = ntxq / u_ctx->lldi.nchan;
  1197. spin_lock(&ctx->dev->lock_chcr_dev);
  1198. ctx->tx_chan_id = ctx->dev->tx_channel_id;
  1199. ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
  1200. ctx->dev->rx_channel_id = 0;
  1201. spin_unlock(&ctx->dev->lock_chcr_dev);
  1202. rxq_idx = ctx->tx_chan_id * rxq_perchan;
  1203. rxq_idx += id % rxq_perchan;
  1204. txq_idx = ctx->tx_chan_id * txq_perchan;
  1205. txq_idx += id % txq_perchan;
  1206. ctx->rx_qidx = rxq_idx;
  1207. ctx->tx_qidx = txq_idx;
  1208. /* Channel Id used by SGE to forward packet to Host.
  1209. * Same value should be used in cpl_fw6_pld RSS_CH field
  1210. * by FW. Driver programs PCI channel ID to be used in fw
  1211. * at the time of queue allocation with value "pi->tx_chan"
  1212. */
  1213. ctx->pci_chan_id = txq_idx / txq_perchan;
  1214. }
  1215. out:
  1216. return err;
  1217. }
  1218. static int chcr_cra_init(struct crypto_tfm *tfm)
  1219. {
  1220. struct crypto_alg *alg = tfm->__crt_alg;
  1221. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1222. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1223. ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
  1224. CRYPTO_ALG_NEED_FALLBACK);
  1225. if (IS_ERR(ablkctx->sw_cipher)) {
  1226. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1227. return PTR_ERR(ablkctx->sw_cipher);
  1228. }
  1229. if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
  1230. /* To update tweak*/
  1231. ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
  1232. if (IS_ERR(ablkctx->aes_generic)) {
  1233. pr_err("failed to allocate aes cipher for tweak\n");
  1234. return PTR_ERR(ablkctx->aes_generic);
  1235. }
  1236. } else
  1237. ablkctx->aes_generic = NULL;
  1238. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1239. return chcr_device_init(crypto_tfm_ctx(tfm));
  1240. }
  1241. static int chcr_rfc3686_init(struct crypto_tfm *tfm)
  1242. {
  1243. struct crypto_alg *alg = tfm->__crt_alg;
  1244. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1245. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1246. /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
  1247. * cannot be used as fallback in chcr_handle_cipher_response
  1248. */
  1249. ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
  1250. CRYPTO_ALG_NEED_FALLBACK);
  1251. if (IS_ERR(ablkctx->sw_cipher)) {
  1252. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1253. return PTR_ERR(ablkctx->sw_cipher);
  1254. }
  1255. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1256. return chcr_device_init(crypto_tfm_ctx(tfm));
  1257. }
  1258. static void chcr_cra_exit(struct crypto_tfm *tfm)
  1259. {
  1260. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1261. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1262. crypto_free_sync_skcipher(ablkctx->sw_cipher);
  1263. if (ablkctx->aes_generic)
  1264. crypto_free_cipher(ablkctx->aes_generic);
  1265. }
  1266. static int get_alg_config(struct algo_param *params,
  1267. unsigned int auth_size)
  1268. {
  1269. switch (auth_size) {
  1270. case SHA1_DIGEST_SIZE:
  1271. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
  1272. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
  1273. params->result_size = SHA1_DIGEST_SIZE;
  1274. break;
  1275. case SHA224_DIGEST_SIZE:
  1276. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1277. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
  1278. params->result_size = SHA256_DIGEST_SIZE;
  1279. break;
  1280. case SHA256_DIGEST_SIZE:
  1281. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1282. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
  1283. params->result_size = SHA256_DIGEST_SIZE;
  1284. break;
  1285. case SHA384_DIGEST_SIZE:
  1286. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1287. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
  1288. params->result_size = SHA512_DIGEST_SIZE;
  1289. break;
  1290. case SHA512_DIGEST_SIZE:
  1291. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1292. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
  1293. params->result_size = SHA512_DIGEST_SIZE;
  1294. break;
  1295. default:
  1296. pr_err("chcr : ERROR, unsupported digest size\n");
  1297. return -EINVAL;
  1298. }
  1299. return 0;
  1300. }
  1301. static inline void chcr_free_shash(struct crypto_shash *base_hash)
  1302. {
  1303. crypto_free_shash(base_hash);
  1304. }
  1305. /**
  1306. * create_hash_wr - Create hash work request
  1307. * @req - Cipher req base
  1308. */
  1309. static struct sk_buff *create_hash_wr(struct ahash_request *req,
  1310. struct hash_wr_param *param)
  1311. {
  1312. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1313. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1314. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1315. struct sk_buff *skb = NULL;
  1316. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1317. struct chcr_wr *chcr_req;
  1318. struct ulptx_sgl *ulptx;
  1319. unsigned int nents = 0, transhdr_len;
  1320. unsigned int temp = 0;
  1321. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1322. GFP_ATOMIC;
  1323. struct adapter *adap = padap(h_ctx(tfm)->dev);
  1324. int error = 0;
  1325. transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
  1326. req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
  1327. param->sg_len) <= SGE_MAX_WR_LEN;
  1328. nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
  1329. CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
  1330. nents += param->bfr_len ? 1 : 0;
  1331. transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
  1332. param->sg_len, 16) : (sgl_len(nents) * 8);
  1333. transhdr_len = roundup(transhdr_len, 16);
  1334. skb = alloc_skb(transhdr_len, flags);
  1335. if (!skb)
  1336. return ERR_PTR(-ENOMEM);
  1337. chcr_req = __skb_put_zero(skb, transhdr_len);
  1338. chcr_req->sec_cpl.op_ivinsrtofst =
  1339. FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
  1340. chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  1341. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  1342. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
  1343. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  1344. FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
  1345. chcr_req->sec_cpl.seqno_numivs =
  1346. FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
  1347. param->opad_needed, 0);
  1348. chcr_req->sec_cpl.ivgen_hdrlen =
  1349. FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  1350. memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
  1351. param->alg_prm.result_size);
  1352. if (param->opad_needed)
  1353. memcpy(chcr_req->key_ctx.key +
  1354. ((param->alg_prm.result_size <= 32) ? 32 :
  1355. CHCR_HASH_MAX_DIGEST_SIZE),
  1356. hmacctx->opad, param->alg_prm.result_size);
  1357. chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
  1358. param->alg_prm.mk_size, 0,
  1359. param->opad_needed,
  1360. ((param->kctx_len +
  1361. sizeof(chcr_req->key_ctx)) >> 4));
  1362. chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  1363. ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
  1364. DUMMY_BYTES);
  1365. if (param->bfr_len != 0) {
  1366. req_ctx->hctx_wr.dma_addr =
  1367. dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
  1368. param->bfr_len, DMA_TO_DEVICE);
  1369. if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
  1370. req_ctx->hctx_wr. dma_addr)) {
  1371. error = -ENOMEM;
  1372. goto err;
  1373. }
  1374. req_ctx->hctx_wr.dma_len = param->bfr_len;
  1375. } else {
  1376. req_ctx->hctx_wr.dma_addr = 0;
  1377. }
  1378. chcr_add_hash_src_ent(req, ulptx, param);
  1379. /* Request upto max wr size */
  1380. temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
  1381. (param->sg_len + param->bfr_len) : 0);
  1382. atomic_inc(&adap->chcr_stats.digest_rqst);
  1383. create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
  1384. param->hash_size, transhdr_len,
  1385. temp, 0);
  1386. req_ctx->hctx_wr.skb = skb;
  1387. return skb;
  1388. err:
  1389. kfree_skb(skb);
  1390. return ERR_PTR(error);
  1391. }
  1392. static int chcr_ahash_update(struct ahash_request *req)
  1393. {
  1394. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1395. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1396. struct uld_ctx *u_ctx = NULL;
  1397. struct sk_buff *skb;
  1398. u8 remainder = 0, bs;
  1399. unsigned int nbytes = req->nbytes;
  1400. struct hash_wr_param params;
  1401. int error, isfull = 0;
  1402. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1403. u_ctx = ULD_CTX(h_ctx(rtfm));
  1404. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1405. h_ctx(rtfm)->tx_qidx))) {
  1406. isfull = 1;
  1407. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1408. return -ENOSPC;
  1409. }
  1410. if (nbytes + req_ctx->reqlen >= bs) {
  1411. remainder = (nbytes + req_ctx->reqlen) % bs;
  1412. nbytes = nbytes + req_ctx->reqlen - remainder;
  1413. } else {
  1414. sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
  1415. + req_ctx->reqlen, nbytes, 0);
  1416. req_ctx->reqlen += nbytes;
  1417. return 0;
  1418. }
  1419. chcr_init_hctx_per_wr(req_ctx);
  1420. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1421. if (error)
  1422. return -ENOMEM;
  1423. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1424. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1425. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1426. HASH_SPACE_LEFT(params.kctx_len), 0);
  1427. if (params.sg_len > req->nbytes)
  1428. params.sg_len = req->nbytes;
  1429. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
  1430. req_ctx->reqlen;
  1431. params.opad_needed = 0;
  1432. params.more = 1;
  1433. params.last = 0;
  1434. params.bfr_len = req_ctx->reqlen;
  1435. params.scmd1 = 0;
  1436. req_ctx->hctx_wr.srcsg = req->src;
  1437. params.hash_size = params.alg_prm.result_size;
  1438. req_ctx->data_len += params.sg_len + params.bfr_len;
  1439. skb = create_hash_wr(req, &params);
  1440. if (IS_ERR(skb)) {
  1441. error = PTR_ERR(skb);
  1442. goto unmap;
  1443. }
  1444. req_ctx->hctx_wr.processed += params.sg_len;
  1445. if (remainder) {
  1446. /* Swap buffers */
  1447. swap(req_ctx->reqbfr, req_ctx->skbfr);
  1448. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  1449. req_ctx->reqbfr, remainder, req->nbytes -
  1450. remainder);
  1451. }
  1452. req_ctx->reqlen = remainder;
  1453. skb->dev = u_ctx->lldi.ports[0];
  1454. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1455. chcr_send_wr(skb);
  1456. return isfull ? -EBUSY : -EINPROGRESS;
  1457. unmap:
  1458. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1459. return error;
  1460. }
  1461. static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
  1462. {
  1463. memset(bfr_ptr, 0, bs);
  1464. *bfr_ptr = 0x80;
  1465. if (bs == 64)
  1466. *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
  1467. else
  1468. *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
  1469. }
  1470. static int chcr_ahash_final(struct ahash_request *req)
  1471. {
  1472. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1473. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1474. struct hash_wr_param params;
  1475. struct sk_buff *skb;
  1476. struct uld_ctx *u_ctx = NULL;
  1477. u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1478. chcr_init_hctx_per_wr(req_ctx);
  1479. u_ctx = ULD_CTX(h_ctx(rtfm));
  1480. if (is_hmac(crypto_ahash_tfm(rtfm)))
  1481. params.opad_needed = 1;
  1482. else
  1483. params.opad_needed = 0;
  1484. params.sg_len = 0;
  1485. req_ctx->hctx_wr.isfinal = 1;
  1486. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1487. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1488. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1489. params.opad_needed = 1;
  1490. params.kctx_len *= 2;
  1491. } else {
  1492. params.opad_needed = 0;
  1493. }
  1494. req_ctx->hctx_wr.result = 1;
  1495. params.bfr_len = req_ctx->reqlen;
  1496. req_ctx->data_len += params.bfr_len + params.sg_len;
  1497. req_ctx->hctx_wr.srcsg = req->src;
  1498. if (req_ctx->reqlen == 0) {
  1499. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1500. params.last = 0;
  1501. params.more = 1;
  1502. params.scmd1 = 0;
  1503. params.bfr_len = bs;
  1504. } else {
  1505. params.scmd1 = req_ctx->data_len;
  1506. params.last = 1;
  1507. params.more = 0;
  1508. }
  1509. params.hash_size = crypto_ahash_digestsize(rtfm);
  1510. skb = create_hash_wr(req, &params);
  1511. if (IS_ERR(skb))
  1512. return PTR_ERR(skb);
  1513. req_ctx->reqlen = 0;
  1514. skb->dev = u_ctx->lldi.ports[0];
  1515. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1516. chcr_send_wr(skb);
  1517. return -EINPROGRESS;
  1518. }
  1519. static int chcr_ahash_finup(struct ahash_request *req)
  1520. {
  1521. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1522. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1523. struct uld_ctx *u_ctx = NULL;
  1524. struct sk_buff *skb;
  1525. struct hash_wr_param params;
  1526. u8 bs;
  1527. int error, isfull = 0;
  1528. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1529. u_ctx = ULD_CTX(h_ctx(rtfm));
  1530. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1531. h_ctx(rtfm)->tx_qidx))) {
  1532. isfull = 1;
  1533. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1534. return -ENOSPC;
  1535. }
  1536. chcr_init_hctx_per_wr(req_ctx);
  1537. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1538. if (error)
  1539. return -ENOMEM;
  1540. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1541. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1542. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1543. params.kctx_len *= 2;
  1544. params.opad_needed = 1;
  1545. } else {
  1546. params.opad_needed = 0;
  1547. }
  1548. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1549. HASH_SPACE_LEFT(params.kctx_len), 0);
  1550. if (params.sg_len < req->nbytes) {
  1551. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1552. params.kctx_len /= 2;
  1553. params.opad_needed = 0;
  1554. }
  1555. params.last = 0;
  1556. params.more = 1;
  1557. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
  1558. - req_ctx->reqlen;
  1559. params.hash_size = params.alg_prm.result_size;
  1560. params.scmd1 = 0;
  1561. } else {
  1562. params.last = 1;
  1563. params.more = 0;
  1564. params.sg_len = req->nbytes;
  1565. params.hash_size = crypto_ahash_digestsize(rtfm);
  1566. params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
  1567. params.sg_len;
  1568. }
  1569. params.bfr_len = req_ctx->reqlen;
  1570. req_ctx->data_len += params.bfr_len + params.sg_len;
  1571. req_ctx->hctx_wr.result = 1;
  1572. req_ctx->hctx_wr.srcsg = req->src;
  1573. if ((req_ctx->reqlen + req->nbytes) == 0) {
  1574. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1575. params.last = 0;
  1576. params.more = 1;
  1577. params.scmd1 = 0;
  1578. params.bfr_len = bs;
  1579. }
  1580. skb = create_hash_wr(req, &params);
  1581. if (IS_ERR(skb)) {
  1582. error = PTR_ERR(skb);
  1583. goto unmap;
  1584. }
  1585. req_ctx->reqlen = 0;
  1586. req_ctx->hctx_wr.processed += params.sg_len;
  1587. skb->dev = u_ctx->lldi.ports[0];
  1588. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1589. chcr_send_wr(skb);
  1590. return isfull ? -EBUSY : -EINPROGRESS;
  1591. unmap:
  1592. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1593. return error;
  1594. }
  1595. static int chcr_ahash_digest(struct ahash_request *req)
  1596. {
  1597. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1598. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1599. struct uld_ctx *u_ctx = NULL;
  1600. struct sk_buff *skb;
  1601. struct hash_wr_param params;
  1602. u8 bs;
  1603. int error, isfull = 0;
  1604. rtfm->init(req);
  1605. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1606. u_ctx = ULD_CTX(h_ctx(rtfm));
  1607. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1608. h_ctx(rtfm)->tx_qidx))) {
  1609. isfull = 1;
  1610. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1611. return -ENOSPC;
  1612. }
  1613. chcr_init_hctx_per_wr(req_ctx);
  1614. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1615. if (error)
  1616. return -ENOMEM;
  1617. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1618. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1619. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1620. params.kctx_len *= 2;
  1621. params.opad_needed = 1;
  1622. } else {
  1623. params.opad_needed = 0;
  1624. }
  1625. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1626. HASH_SPACE_LEFT(params.kctx_len), 0);
  1627. if (params.sg_len < req->nbytes) {
  1628. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1629. params.kctx_len /= 2;
  1630. params.opad_needed = 0;
  1631. }
  1632. params.last = 0;
  1633. params.more = 1;
  1634. params.scmd1 = 0;
  1635. params.sg_len = rounddown(params.sg_len, bs);
  1636. params.hash_size = params.alg_prm.result_size;
  1637. } else {
  1638. params.sg_len = req->nbytes;
  1639. params.hash_size = crypto_ahash_digestsize(rtfm);
  1640. params.last = 1;
  1641. params.more = 0;
  1642. params.scmd1 = req->nbytes + req_ctx->data_len;
  1643. }
  1644. params.bfr_len = 0;
  1645. req_ctx->hctx_wr.result = 1;
  1646. req_ctx->hctx_wr.srcsg = req->src;
  1647. req_ctx->data_len += params.bfr_len + params.sg_len;
  1648. if (req->nbytes == 0) {
  1649. create_last_hash_block(req_ctx->reqbfr, bs, 0);
  1650. params.more = 1;
  1651. params.bfr_len = bs;
  1652. }
  1653. skb = create_hash_wr(req, &params);
  1654. if (IS_ERR(skb)) {
  1655. error = PTR_ERR(skb);
  1656. goto unmap;
  1657. }
  1658. req_ctx->hctx_wr.processed += params.sg_len;
  1659. skb->dev = u_ctx->lldi.ports[0];
  1660. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1661. chcr_send_wr(skb);
  1662. return isfull ? -EBUSY : -EINPROGRESS;
  1663. unmap:
  1664. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1665. return error;
  1666. }
  1667. static int chcr_ahash_continue(struct ahash_request *req)
  1668. {
  1669. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1670. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1671. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1672. struct uld_ctx *u_ctx = NULL;
  1673. struct sk_buff *skb;
  1674. struct hash_wr_param params;
  1675. u8 bs;
  1676. int error;
  1677. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1678. u_ctx = ULD_CTX(h_ctx(rtfm));
  1679. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1680. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1681. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1682. params.kctx_len *= 2;
  1683. params.opad_needed = 1;
  1684. } else {
  1685. params.opad_needed = 0;
  1686. }
  1687. params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
  1688. HASH_SPACE_LEFT(params.kctx_len),
  1689. hctx_wr->src_ofst);
  1690. if ((params.sg_len + hctx_wr->processed) > req->nbytes)
  1691. params.sg_len = req->nbytes - hctx_wr->processed;
  1692. if (!hctx_wr->result ||
  1693. ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
  1694. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1695. params.kctx_len /= 2;
  1696. params.opad_needed = 0;
  1697. }
  1698. params.last = 0;
  1699. params.more = 1;
  1700. params.sg_len = rounddown(params.sg_len, bs);
  1701. params.hash_size = params.alg_prm.result_size;
  1702. params.scmd1 = 0;
  1703. } else {
  1704. params.last = 1;
  1705. params.more = 0;
  1706. params.hash_size = crypto_ahash_digestsize(rtfm);
  1707. params.scmd1 = reqctx->data_len + params.sg_len;
  1708. }
  1709. params.bfr_len = 0;
  1710. reqctx->data_len += params.sg_len;
  1711. skb = create_hash_wr(req, &params);
  1712. if (IS_ERR(skb)) {
  1713. error = PTR_ERR(skb);
  1714. goto err;
  1715. }
  1716. hctx_wr->processed += params.sg_len;
  1717. skb->dev = u_ctx->lldi.ports[0];
  1718. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1719. chcr_send_wr(skb);
  1720. return 0;
  1721. err:
  1722. return error;
  1723. }
  1724. static inline void chcr_handle_ahash_resp(struct ahash_request *req,
  1725. unsigned char *input,
  1726. int err)
  1727. {
  1728. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1729. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1730. int digestsize, updated_digestsize;
  1731. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1732. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1733. if (input == NULL)
  1734. goto out;
  1735. digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  1736. updated_digestsize = digestsize;
  1737. if (digestsize == SHA224_DIGEST_SIZE)
  1738. updated_digestsize = SHA256_DIGEST_SIZE;
  1739. else if (digestsize == SHA384_DIGEST_SIZE)
  1740. updated_digestsize = SHA512_DIGEST_SIZE;
  1741. if (hctx_wr->dma_addr) {
  1742. dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
  1743. hctx_wr->dma_len, DMA_TO_DEVICE);
  1744. hctx_wr->dma_addr = 0;
  1745. }
  1746. if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
  1747. req->nbytes)) {
  1748. if (hctx_wr->result == 1) {
  1749. hctx_wr->result = 0;
  1750. memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
  1751. digestsize);
  1752. } else {
  1753. memcpy(reqctx->partial_hash,
  1754. input + sizeof(struct cpl_fw6_pld),
  1755. updated_digestsize);
  1756. }
  1757. goto unmap;
  1758. }
  1759. memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
  1760. updated_digestsize);
  1761. err = chcr_ahash_continue(req);
  1762. if (err)
  1763. goto unmap;
  1764. return;
  1765. unmap:
  1766. if (hctx_wr->is_sg_map)
  1767. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1768. out:
  1769. req->base.complete(&req->base, err);
  1770. }
  1771. /*
  1772. * chcr_handle_resp - Unmap the DMA buffers associated with the request
  1773. * @req: crypto request
  1774. */
  1775. int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
  1776. int err)
  1777. {
  1778. struct crypto_tfm *tfm = req->tfm;
  1779. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1780. struct adapter *adap = padap(ctx->dev);
  1781. switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  1782. case CRYPTO_ALG_TYPE_AEAD:
  1783. chcr_handle_aead_resp(aead_request_cast(req), input, err);
  1784. break;
  1785. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1786. err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
  1787. input, err);
  1788. break;
  1789. case CRYPTO_ALG_TYPE_AHASH:
  1790. chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
  1791. }
  1792. atomic_inc(&adap->chcr_stats.complete);
  1793. return err;
  1794. }
  1795. static int chcr_ahash_export(struct ahash_request *areq, void *out)
  1796. {
  1797. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1798. struct chcr_ahash_req_ctx *state = out;
  1799. state->reqlen = req_ctx->reqlen;
  1800. state->data_len = req_ctx->data_len;
  1801. memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
  1802. memcpy(state->partial_hash, req_ctx->partial_hash,
  1803. CHCR_HASH_MAX_DIGEST_SIZE);
  1804. chcr_init_hctx_per_wr(state);
  1805. return 0;
  1806. }
  1807. static int chcr_ahash_import(struct ahash_request *areq, const void *in)
  1808. {
  1809. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1810. struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  1811. req_ctx->reqlen = state->reqlen;
  1812. req_ctx->data_len = state->data_len;
  1813. req_ctx->reqbfr = req_ctx->bfr1;
  1814. req_ctx->skbfr = req_ctx->bfr2;
  1815. memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
  1816. memcpy(req_ctx->partial_hash, state->partial_hash,
  1817. CHCR_HASH_MAX_DIGEST_SIZE);
  1818. chcr_init_hctx_per_wr(req_ctx);
  1819. return 0;
  1820. }
  1821. static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1822. unsigned int keylen)
  1823. {
  1824. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1825. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1826. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1827. unsigned int i, err = 0, updated_digestsize;
  1828. SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
  1829. /* use the key to calculate the ipad and opad. ipad will sent with the
  1830. * first request's data. opad will be sent with the final hash result
  1831. * ipad in hmacctx->ipad and opad in hmacctx->opad location
  1832. */
  1833. shash->tfm = hmacctx->base_hash;
  1834. shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
  1835. if (keylen > bs) {
  1836. err = crypto_shash_digest(shash, key, keylen,
  1837. hmacctx->ipad);
  1838. if (err)
  1839. goto out;
  1840. keylen = digestsize;
  1841. } else {
  1842. memcpy(hmacctx->ipad, key, keylen);
  1843. }
  1844. memset(hmacctx->ipad + keylen, 0, bs - keylen);
  1845. memcpy(hmacctx->opad, hmacctx->ipad, bs);
  1846. for (i = 0; i < bs / sizeof(int); i++) {
  1847. *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
  1848. *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
  1849. }
  1850. updated_digestsize = digestsize;
  1851. if (digestsize == SHA224_DIGEST_SIZE)
  1852. updated_digestsize = SHA256_DIGEST_SIZE;
  1853. else if (digestsize == SHA384_DIGEST_SIZE)
  1854. updated_digestsize = SHA512_DIGEST_SIZE;
  1855. err = chcr_compute_partial_hash(shash, hmacctx->ipad,
  1856. hmacctx->ipad, digestsize);
  1857. if (err)
  1858. goto out;
  1859. chcr_change_order(hmacctx->ipad, updated_digestsize);
  1860. err = chcr_compute_partial_hash(shash, hmacctx->opad,
  1861. hmacctx->opad, digestsize);
  1862. if (err)
  1863. goto out;
  1864. chcr_change_order(hmacctx->opad, updated_digestsize);
  1865. out:
  1866. return err;
  1867. }
  1868. static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1869. unsigned int key_len)
  1870. {
  1871. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  1872. unsigned short context_size = 0;
  1873. int err;
  1874. err = chcr_cipher_fallback_setkey(cipher, key, key_len);
  1875. if (err)
  1876. goto badkey_err;
  1877. memcpy(ablkctx->key, key, key_len);
  1878. ablkctx->enckey_len = key_len;
  1879. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
  1880. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
  1881. ablkctx->key_ctx_hdr =
  1882. FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
  1883. CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
  1884. CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
  1885. CHCR_KEYCTX_NO_KEY, 1,
  1886. 0, context_size);
  1887. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
  1888. return 0;
  1889. badkey_err:
  1890. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1891. ablkctx->enckey_len = 0;
  1892. return err;
  1893. }
  1894. static int chcr_sha_init(struct ahash_request *areq)
  1895. {
  1896. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1897. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1898. int digestsize = crypto_ahash_digestsize(tfm);
  1899. req_ctx->data_len = 0;
  1900. req_ctx->reqlen = 0;
  1901. req_ctx->reqbfr = req_ctx->bfr1;
  1902. req_ctx->skbfr = req_ctx->bfr2;
  1903. copy_hash_init_values(req_ctx->partial_hash, digestsize);
  1904. return 0;
  1905. }
  1906. static int chcr_sha_cra_init(struct crypto_tfm *tfm)
  1907. {
  1908. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1909. sizeof(struct chcr_ahash_req_ctx));
  1910. return chcr_device_init(crypto_tfm_ctx(tfm));
  1911. }
  1912. static int chcr_hmac_init(struct ahash_request *areq)
  1913. {
  1914. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1915. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
  1916. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
  1917. unsigned int digestsize = crypto_ahash_digestsize(rtfm);
  1918. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1919. chcr_sha_init(areq);
  1920. req_ctx->data_len = bs;
  1921. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1922. if (digestsize == SHA224_DIGEST_SIZE)
  1923. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1924. SHA256_DIGEST_SIZE);
  1925. else if (digestsize == SHA384_DIGEST_SIZE)
  1926. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1927. SHA512_DIGEST_SIZE);
  1928. else
  1929. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1930. digestsize);
  1931. }
  1932. return 0;
  1933. }
  1934. static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
  1935. {
  1936. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1937. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1938. unsigned int digestsize =
  1939. crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
  1940. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1941. sizeof(struct chcr_ahash_req_ctx));
  1942. hmacctx->base_hash = chcr_alloc_shash(digestsize);
  1943. if (IS_ERR(hmacctx->base_hash))
  1944. return PTR_ERR(hmacctx->base_hash);
  1945. return chcr_device_init(crypto_tfm_ctx(tfm));
  1946. }
  1947. static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  1948. {
  1949. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1950. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1951. if (hmacctx->base_hash) {
  1952. chcr_free_shash(hmacctx->base_hash);
  1953. hmacctx->base_hash = NULL;
  1954. }
  1955. }
  1956. inline void chcr_aead_common_exit(struct aead_request *req)
  1957. {
  1958. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1959. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1960. struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
  1961. chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
  1962. }
  1963. static int chcr_aead_common_init(struct aead_request *req)
  1964. {
  1965. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1966. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  1967. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1968. unsigned int authsize = crypto_aead_authsize(tfm);
  1969. int error = -EINVAL;
  1970. /* validate key size */
  1971. if (aeadctx->enckey_len == 0)
  1972. goto err;
  1973. if (reqctx->op && req->cryptlen < authsize)
  1974. goto err;
  1975. if (reqctx->b0_len)
  1976. reqctx->scratch_pad = reqctx->iv + IV;
  1977. else
  1978. reqctx->scratch_pad = NULL;
  1979. error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  1980. reqctx->op);
  1981. if (error) {
  1982. error = -ENOMEM;
  1983. goto err;
  1984. }
  1985. reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
  1986. CHCR_SRC_SG_SIZE, 0);
  1987. reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
  1988. CHCR_SRC_SG_SIZE, req->assoclen);
  1989. return 0;
  1990. err:
  1991. return error;
  1992. }
  1993. static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
  1994. int aadmax, int wrlen,
  1995. unsigned short op_type)
  1996. {
  1997. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  1998. if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
  1999. dst_nents > MAX_DSGL_ENT ||
  2000. (req->assoclen > aadmax) ||
  2001. (wrlen > SGE_MAX_WR_LEN))
  2002. return 1;
  2003. return 0;
  2004. }
  2005. static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
  2006. {
  2007. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2008. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2009. struct aead_request *subreq = aead_request_ctx(req);
  2010. aead_request_set_tfm(subreq, aeadctx->sw_cipher);
  2011. aead_request_set_callback(subreq, req->base.flags,
  2012. req->base.complete, req->base.data);
  2013. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  2014. req->iv);
  2015. aead_request_set_ad(subreq, req->assoclen);
  2016. return op_type ? crypto_aead_decrypt(subreq) :
  2017. crypto_aead_encrypt(subreq);
  2018. }
  2019. static struct sk_buff *create_authenc_wr(struct aead_request *req,
  2020. unsigned short qid,
  2021. int size)
  2022. {
  2023. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2024. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2025. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2026. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2027. struct sk_buff *skb = NULL;
  2028. struct chcr_wr *chcr_req;
  2029. struct cpl_rx_phys_dsgl *phys_cpl;
  2030. struct ulptx_sgl *ulptx;
  2031. unsigned int transhdr_len;
  2032. unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
  2033. unsigned int kctx_len = 0, dnents;
  2034. unsigned int assoclen = req->assoclen;
  2035. unsigned int authsize = crypto_aead_authsize(tfm);
  2036. int error = -EINVAL;
  2037. int null = 0;
  2038. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2039. GFP_ATOMIC;
  2040. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2041. if (req->cryptlen == 0)
  2042. return NULL;
  2043. reqctx->b0_len = 0;
  2044. error = chcr_aead_common_init(req);
  2045. if (error)
  2046. return ERR_PTR(error);
  2047. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
  2048. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2049. null = 1;
  2050. assoclen = 0;
  2051. reqctx->aad_nents = 0;
  2052. }
  2053. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2054. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2055. (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
  2056. req->assoclen);
  2057. dnents += MIN_AUTH_SG; // For IV
  2058. dst_size = get_space_for_phys_dsgl(dnents);
  2059. kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
  2060. - sizeof(chcr_req->key_ctx);
  2061. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2062. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
  2063. SGE_MAX_WR_LEN;
  2064. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
  2065. : (sgl_len(reqctx->src_nents + reqctx->aad_nents
  2066. + MIN_GCM_SG) * 8);
  2067. transhdr_len += temp;
  2068. transhdr_len = roundup(transhdr_len, 16);
  2069. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2070. transhdr_len, reqctx->op)) {
  2071. atomic_inc(&adap->chcr_stats.fallback);
  2072. chcr_aead_common_exit(req);
  2073. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2074. }
  2075. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2076. if (!skb) {
  2077. error = -ENOMEM;
  2078. goto err;
  2079. }
  2080. chcr_req = __skb_put_zero(skb, transhdr_len);
  2081. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2082. /*
  2083. * Input order is AAD,IV and Payload. where IV should be included as
  2084. * the part of authdata. All other fields should be filled according
  2085. * to the hardware spec
  2086. */
  2087. chcr_req->sec_cpl.op_ivinsrtofst =
  2088. FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
  2089. assoclen + 1);
  2090. chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
  2091. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2092. assoclen ? 1 : 0, assoclen,
  2093. assoclen + IV + 1,
  2094. (temp & 0x1F0) >> 4);
  2095. chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
  2096. temp & 0xF,
  2097. null ? 0 : assoclen + IV + 1,
  2098. temp, temp);
  2099. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
  2100. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
  2101. temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  2102. else
  2103. temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  2104. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
  2105. (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
  2106. temp,
  2107. actx->auth_mode, aeadctx->hmac_ctrl,
  2108. IV >> 1);
  2109. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2110. 0, 0, dst_size);
  2111. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2112. if (reqctx->op == CHCR_ENCRYPT_OP ||
  2113. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2114. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
  2115. memcpy(chcr_req->key_ctx.key, aeadctx->key,
  2116. aeadctx->enckey_len);
  2117. else
  2118. memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
  2119. aeadctx->enckey_len);
  2120. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2121. actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
  2122. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2123. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2124. memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
  2125. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
  2126. CTR_RFC3686_IV_SIZE);
  2127. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  2128. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  2129. } else {
  2130. memcpy(reqctx->iv, req->iv, IV);
  2131. }
  2132. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2133. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2134. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2135. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2136. atomic_inc(&adap->chcr_stats.cipher_rqst);
  2137. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2138. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2139. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2140. transhdr_len, temp, 0);
  2141. reqctx->skb = skb;
  2142. return skb;
  2143. err:
  2144. chcr_aead_common_exit(req);
  2145. return ERR_PTR(error);
  2146. }
  2147. int chcr_aead_dma_map(struct device *dev,
  2148. struct aead_request *req,
  2149. unsigned short op_type)
  2150. {
  2151. int error;
  2152. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2153. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2154. unsigned int authsize = crypto_aead_authsize(tfm);
  2155. int dst_size;
  2156. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2157. -authsize : authsize);
  2158. if (!req->cryptlen || !dst_size)
  2159. return 0;
  2160. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
  2161. DMA_BIDIRECTIONAL);
  2162. if (dma_mapping_error(dev, reqctx->iv_dma))
  2163. return -ENOMEM;
  2164. if (reqctx->b0_len)
  2165. reqctx->b0_dma = reqctx->iv_dma + IV;
  2166. else
  2167. reqctx->b0_dma = 0;
  2168. if (req->src == req->dst) {
  2169. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2170. DMA_BIDIRECTIONAL);
  2171. if (!error)
  2172. goto err;
  2173. } else {
  2174. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2175. DMA_TO_DEVICE);
  2176. if (!error)
  2177. goto err;
  2178. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2179. DMA_FROM_DEVICE);
  2180. if (!error) {
  2181. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2182. DMA_TO_DEVICE);
  2183. goto err;
  2184. }
  2185. }
  2186. return 0;
  2187. err:
  2188. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2189. return -ENOMEM;
  2190. }
  2191. void chcr_aead_dma_unmap(struct device *dev,
  2192. struct aead_request *req,
  2193. unsigned short op_type)
  2194. {
  2195. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2196. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2197. unsigned int authsize = crypto_aead_authsize(tfm);
  2198. int dst_size;
  2199. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2200. -authsize : authsize);
  2201. if (!req->cryptlen || !dst_size)
  2202. return;
  2203. dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
  2204. DMA_BIDIRECTIONAL);
  2205. if (req->src == req->dst) {
  2206. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2207. DMA_BIDIRECTIONAL);
  2208. } else {
  2209. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2210. DMA_TO_DEVICE);
  2211. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2212. DMA_FROM_DEVICE);
  2213. }
  2214. }
  2215. void chcr_add_aead_src_ent(struct aead_request *req,
  2216. struct ulptx_sgl *ulptx,
  2217. unsigned int assoclen)
  2218. {
  2219. struct ulptx_walk ulp_walk;
  2220. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2221. if (reqctx->imm) {
  2222. u8 *buf = (u8 *)ulptx;
  2223. if (reqctx->b0_len) {
  2224. memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
  2225. buf += reqctx->b0_len;
  2226. }
  2227. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2228. buf, assoclen, 0);
  2229. buf += assoclen;
  2230. memcpy(buf, reqctx->iv, IV);
  2231. buf += IV;
  2232. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2233. buf, req->cryptlen, req->assoclen);
  2234. } else {
  2235. ulptx_walk_init(&ulp_walk, ulptx);
  2236. if (reqctx->b0_len)
  2237. ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
  2238. &reqctx->b0_dma);
  2239. ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
  2240. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2241. ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
  2242. req->assoclen);
  2243. ulptx_walk_end(&ulp_walk);
  2244. }
  2245. }
  2246. void chcr_add_aead_dst_ent(struct aead_request *req,
  2247. struct cpl_rx_phys_dsgl *phys_cpl,
  2248. unsigned int assoclen,
  2249. unsigned short qid)
  2250. {
  2251. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2252. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2253. struct dsgl_walk dsgl_walk;
  2254. unsigned int authsize = crypto_aead_authsize(tfm);
  2255. struct chcr_context *ctx = a_ctx(tfm);
  2256. u32 temp;
  2257. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2258. if (reqctx->b0_len)
  2259. dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
  2260. dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
  2261. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2262. temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
  2263. dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
  2264. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2265. }
  2266. void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
  2267. void *ulptx,
  2268. struct cipher_wr_param *wrparam)
  2269. {
  2270. struct ulptx_walk ulp_walk;
  2271. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2272. u8 *buf = ulptx;
  2273. memcpy(buf, reqctx->iv, IV);
  2274. buf += IV;
  2275. if (reqctx->imm) {
  2276. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2277. buf, wrparam->bytes, reqctx->processed);
  2278. } else {
  2279. ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
  2280. ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
  2281. reqctx->src_ofst);
  2282. reqctx->srcsg = ulp_walk.last_sg;
  2283. reqctx->src_ofst = ulp_walk.last_sg_len;
  2284. ulptx_walk_end(&ulp_walk);
  2285. }
  2286. }
  2287. void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
  2288. struct cpl_rx_phys_dsgl *phys_cpl,
  2289. struct cipher_wr_param *wrparam,
  2290. unsigned short qid)
  2291. {
  2292. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2293. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  2294. struct chcr_context *ctx = c_ctx(tfm);
  2295. struct dsgl_walk dsgl_walk;
  2296. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2297. dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
  2298. reqctx->dst_ofst);
  2299. reqctx->dstsg = dsgl_walk.last_sg;
  2300. reqctx->dst_ofst = dsgl_walk.last_sg_len;
  2301. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2302. }
  2303. void chcr_add_hash_src_ent(struct ahash_request *req,
  2304. struct ulptx_sgl *ulptx,
  2305. struct hash_wr_param *param)
  2306. {
  2307. struct ulptx_walk ulp_walk;
  2308. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  2309. if (reqctx->hctx_wr.imm) {
  2310. u8 *buf = (u8 *)ulptx;
  2311. if (param->bfr_len) {
  2312. memcpy(buf, reqctx->reqbfr, param->bfr_len);
  2313. buf += param->bfr_len;
  2314. }
  2315. sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
  2316. sg_nents(reqctx->hctx_wr.srcsg), buf,
  2317. param->sg_len, 0);
  2318. } else {
  2319. ulptx_walk_init(&ulp_walk, ulptx);
  2320. if (param->bfr_len)
  2321. ulptx_walk_add_page(&ulp_walk, param->bfr_len,
  2322. &reqctx->hctx_wr.dma_addr);
  2323. ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
  2324. param->sg_len, reqctx->hctx_wr.src_ofst);
  2325. reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
  2326. reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
  2327. ulptx_walk_end(&ulp_walk);
  2328. }
  2329. }
  2330. int chcr_hash_dma_map(struct device *dev,
  2331. struct ahash_request *req)
  2332. {
  2333. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2334. int error = 0;
  2335. if (!req->nbytes)
  2336. return 0;
  2337. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2338. DMA_TO_DEVICE);
  2339. if (!error)
  2340. return -ENOMEM;
  2341. req_ctx->hctx_wr.is_sg_map = 1;
  2342. return 0;
  2343. }
  2344. void chcr_hash_dma_unmap(struct device *dev,
  2345. struct ahash_request *req)
  2346. {
  2347. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2348. if (!req->nbytes)
  2349. return;
  2350. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2351. DMA_TO_DEVICE);
  2352. req_ctx->hctx_wr.is_sg_map = 0;
  2353. }
  2354. int chcr_cipher_dma_map(struct device *dev,
  2355. struct ablkcipher_request *req)
  2356. {
  2357. int error;
  2358. if (req->src == req->dst) {
  2359. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2360. DMA_BIDIRECTIONAL);
  2361. if (!error)
  2362. goto err;
  2363. } else {
  2364. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2365. DMA_TO_DEVICE);
  2366. if (!error)
  2367. goto err;
  2368. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2369. DMA_FROM_DEVICE);
  2370. if (!error) {
  2371. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2372. DMA_TO_DEVICE);
  2373. goto err;
  2374. }
  2375. }
  2376. return 0;
  2377. err:
  2378. return -ENOMEM;
  2379. }
  2380. void chcr_cipher_dma_unmap(struct device *dev,
  2381. struct ablkcipher_request *req)
  2382. {
  2383. if (req->src == req->dst) {
  2384. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2385. DMA_BIDIRECTIONAL);
  2386. } else {
  2387. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2388. DMA_TO_DEVICE);
  2389. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2390. DMA_FROM_DEVICE);
  2391. }
  2392. }
  2393. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  2394. {
  2395. __be32 data;
  2396. memset(block, 0, csize);
  2397. block += csize;
  2398. if (csize >= 4)
  2399. csize = 4;
  2400. else if (msglen > (unsigned int)(1 << (8 * csize)))
  2401. return -EOVERFLOW;
  2402. data = cpu_to_be32(msglen);
  2403. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  2404. return 0;
  2405. }
  2406. static void generate_b0(struct aead_request *req,
  2407. struct chcr_aead_ctx *aeadctx,
  2408. unsigned short op_type)
  2409. {
  2410. unsigned int l, lp, m;
  2411. int rc;
  2412. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2413. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2414. u8 *b0 = reqctx->scratch_pad;
  2415. m = crypto_aead_authsize(aead);
  2416. memcpy(b0, reqctx->iv, 16);
  2417. lp = b0[0];
  2418. l = lp + 1;
  2419. /* set m, bits 3-5 */
  2420. *b0 |= (8 * ((m - 2) / 2));
  2421. /* set adata, bit 6, if associated data is used */
  2422. if (req->assoclen)
  2423. *b0 |= 64;
  2424. rc = set_msg_len(b0 + 16 - l,
  2425. (op_type == CHCR_DECRYPT_OP) ?
  2426. req->cryptlen - m : req->cryptlen, l);
  2427. }
  2428. static inline int crypto_ccm_check_iv(const u8 *iv)
  2429. {
  2430. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  2431. if (iv[0] < 1 || iv[0] > 7)
  2432. return -EINVAL;
  2433. return 0;
  2434. }
  2435. static int ccm_format_packet(struct aead_request *req,
  2436. struct chcr_aead_ctx *aeadctx,
  2437. unsigned int sub_type,
  2438. unsigned short op_type,
  2439. unsigned int assoclen)
  2440. {
  2441. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2442. int rc = 0;
  2443. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2444. reqctx->iv[0] = 3;
  2445. memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
  2446. memcpy(reqctx->iv + 4, req->iv, 8);
  2447. memset(reqctx->iv + 12, 0, 4);
  2448. } else {
  2449. memcpy(reqctx->iv, req->iv, 16);
  2450. }
  2451. if (assoclen)
  2452. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2453. htons(assoclen);
  2454. generate_b0(req, aeadctx, op_type);
  2455. /* zero the ctr value */
  2456. memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
  2457. return rc;
  2458. }
  2459. static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
  2460. unsigned int dst_size,
  2461. struct aead_request *req,
  2462. unsigned short op_type)
  2463. {
  2464. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2465. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2466. unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
  2467. unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
  2468. unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
  2469. unsigned int ccm_xtra;
  2470. unsigned char tag_offset = 0, auth_offset = 0;
  2471. unsigned int assoclen;
  2472. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2473. assoclen = req->assoclen - 8;
  2474. else
  2475. assoclen = req->assoclen;
  2476. ccm_xtra = CCM_B0_SIZE +
  2477. ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
  2478. auth_offset = req->cryptlen ?
  2479. (assoclen + IV + 1 + ccm_xtra) : 0;
  2480. if (op_type == CHCR_DECRYPT_OP) {
  2481. if (crypto_aead_authsize(tfm) != req->cryptlen)
  2482. tag_offset = crypto_aead_authsize(tfm);
  2483. else
  2484. auth_offset = 0;
  2485. }
  2486. sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
  2487. 2, assoclen + 1 + ccm_xtra);
  2488. sec_cpl->pldlen =
  2489. htonl(assoclen + IV + req->cryptlen + ccm_xtra);
  2490. /* For CCM there wil be b0 always. So AAD start will be 1 always */
  2491. sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2492. 1, assoclen + ccm_xtra, assoclen
  2493. + IV + 1 + ccm_xtra, 0);
  2494. sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
  2495. auth_offset, tag_offset,
  2496. (op_type == CHCR_ENCRYPT_OP) ? 0 :
  2497. crypto_aead_authsize(tfm));
  2498. sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2499. (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
  2500. cipher_mode, mac_mode,
  2501. aeadctx->hmac_ctrl, IV >> 1);
  2502. sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
  2503. 0, dst_size);
  2504. }
  2505. static int aead_ccm_validate_input(unsigned short op_type,
  2506. struct aead_request *req,
  2507. struct chcr_aead_ctx *aeadctx,
  2508. unsigned int sub_type)
  2509. {
  2510. if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2511. if (crypto_ccm_check_iv(req->iv)) {
  2512. pr_err("CCM: IV check fails\n");
  2513. return -EINVAL;
  2514. }
  2515. } else {
  2516. if (req->assoclen != 16 && req->assoclen != 20) {
  2517. pr_err("RFC4309: Invalid AAD length %d\n",
  2518. req->assoclen);
  2519. return -EINVAL;
  2520. }
  2521. }
  2522. return 0;
  2523. }
  2524. static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
  2525. unsigned short qid,
  2526. int size)
  2527. {
  2528. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2529. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2530. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2531. struct sk_buff *skb = NULL;
  2532. struct chcr_wr *chcr_req;
  2533. struct cpl_rx_phys_dsgl *phys_cpl;
  2534. struct ulptx_sgl *ulptx;
  2535. unsigned int transhdr_len;
  2536. unsigned int dst_size = 0, kctx_len, dnents, temp;
  2537. unsigned int sub_type, assoclen = req->assoclen;
  2538. unsigned int authsize = crypto_aead_authsize(tfm);
  2539. int error = -EINVAL;
  2540. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2541. GFP_ATOMIC;
  2542. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2543. sub_type = get_aead_subtype(tfm);
  2544. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2545. assoclen -= 8;
  2546. reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
  2547. error = chcr_aead_common_init(req);
  2548. if (error)
  2549. return ERR_PTR(error);
  2550. error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
  2551. if (error)
  2552. goto err;
  2553. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2554. dnents += sg_nents_xlen(req->dst, req->cryptlen
  2555. + (reqctx->op ? -authsize : authsize),
  2556. CHCR_DST_SG_SIZE, req->assoclen);
  2557. dnents += MIN_CCM_SG; // For IV and B0
  2558. dst_size = get_space_for_phys_dsgl(dnents);
  2559. kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
  2560. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2561. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
  2562. reqctx->b0_len) <= SGE_MAX_WR_LEN;
  2563. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
  2564. reqctx->b0_len, 16) :
  2565. (sgl_len(reqctx->src_nents + reqctx->aad_nents +
  2566. MIN_CCM_SG) * 8);
  2567. transhdr_len += temp;
  2568. transhdr_len = roundup(transhdr_len, 16);
  2569. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
  2570. reqctx->b0_len, transhdr_len, reqctx->op)) {
  2571. atomic_inc(&adap->chcr_stats.fallback);
  2572. chcr_aead_common_exit(req);
  2573. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2574. }
  2575. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2576. if (!skb) {
  2577. error = -ENOMEM;
  2578. goto err;
  2579. }
  2580. chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
  2581. fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
  2582. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2583. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2584. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2585. aeadctx->key, aeadctx->enckey_len);
  2586. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2587. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2588. error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
  2589. if (error)
  2590. goto dstmap_fail;
  2591. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2592. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2593. atomic_inc(&adap->chcr_stats.aead_rqst);
  2594. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2595. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
  2596. reqctx->b0_len) : 0);
  2597. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
  2598. transhdr_len, temp, 0);
  2599. reqctx->skb = skb;
  2600. return skb;
  2601. dstmap_fail:
  2602. kfree_skb(skb);
  2603. err:
  2604. chcr_aead_common_exit(req);
  2605. return ERR_PTR(error);
  2606. }
  2607. static struct sk_buff *create_gcm_wr(struct aead_request *req,
  2608. unsigned short qid,
  2609. int size)
  2610. {
  2611. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2612. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2613. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2614. struct sk_buff *skb = NULL;
  2615. struct chcr_wr *chcr_req;
  2616. struct cpl_rx_phys_dsgl *phys_cpl;
  2617. struct ulptx_sgl *ulptx;
  2618. unsigned int transhdr_len, dnents = 0;
  2619. unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
  2620. unsigned int authsize = crypto_aead_authsize(tfm);
  2621. int error = -EINVAL;
  2622. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2623. GFP_ATOMIC;
  2624. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2625. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
  2626. assoclen = req->assoclen - 8;
  2627. reqctx->b0_len = 0;
  2628. error = chcr_aead_common_init(req);
  2629. if (error)
  2630. return ERR_PTR(error);
  2631. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2632. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2633. (reqctx->op ? -authsize : authsize),
  2634. CHCR_DST_SG_SIZE, req->assoclen);
  2635. dnents += MIN_GCM_SG; // For IV
  2636. dst_size = get_space_for_phys_dsgl(dnents);
  2637. kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
  2638. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2639. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
  2640. SGE_MAX_WR_LEN;
  2641. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
  2642. (sgl_len(reqctx->src_nents +
  2643. reqctx->aad_nents + MIN_GCM_SG) * 8);
  2644. transhdr_len += temp;
  2645. transhdr_len = roundup(transhdr_len, 16);
  2646. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2647. transhdr_len, reqctx->op)) {
  2648. atomic_inc(&adap->chcr_stats.fallback);
  2649. chcr_aead_common_exit(req);
  2650. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2651. }
  2652. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2653. if (!skb) {
  2654. error = -ENOMEM;
  2655. goto err;
  2656. }
  2657. chcr_req = __skb_put_zero(skb, transhdr_len);
  2658. //Offset of tag from end
  2659. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2660. chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
  2661. a_ctx(tfm)->dev->rx_channel_id, 2,
  2662. (assoclen + 1));
  2663. chcr_req->sec_cpl.pldlen =
  2664. htonl(assoclen + IV + req->cryptlen);
  2665. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2666. assoclen ? 1 : 0, assoclen,
  2667. assoclen + IV + 1, 0);
  2668. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  2669. FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
  2670. temp, temp);
  2671. chcr_req->sec_cpl.seqno_numivs =
  2672. FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
  2673. CHCR_ENCRYPT_OP) ? 1 : 0,
  2674. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  2675. CHCR_SCMD_AUTH_MODE_GHASH,
  2676. aeadctx->hmac_ctrl, IV >> 1);
  2677. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2678. 0, 0, dst_size);
  2679. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2680. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2681. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2682. GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
  2683. /* prepare a 16 byte iv */
  2684. /* S A L T | IV | 0x00000001 */
  2685. if (get_aead_subtype(tfm) ==
  2686. CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
  2687. memcpy(reqctx->iv, aeadctx->salt, 4);
  2688. memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
  2689. } else {
  2690. memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
  2691. }
  2692. *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
  2693. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2694. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2695. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2696. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2697. atomic_inc(&adap->chcr_stats.aead_rqst);
  2698. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2699. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2700. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2701. transhdr_len, temp, reqctx->verify);
  2702. reqctx->skb = skb;
  2703. return skb;
  2704. err:
  2705. chcr_aead_common_exit(req);
  2706. return ERR_PTR(error);
  2707. }
  2708. static int chcr_aead_cra_init(struct crypto_aead *tfm)
  2709. {
  2710. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2711. struct aead_alg *alg = crypto_aead_alg(tfm);
  2712. aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
  2713. CRYPTO_ALG_NEED_FALLBACK |
  2714. CRYPTO_ALG_ASYNC);
  2715. if (IS_ERR(aeadctx->sw_cipher))
  2716. return PTR_ERR(aeadctx->sw_cipher);
  2717. crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
  2718. sizeof(struct aead_request) +
  2719. crypto_aead_reqsize(aeadctx->sw_cipher)));
  2720. return chcr_device_init(a_ctx(tfm));
  2721. }
  2722. static void chcr_aead_cra_exit(struct crypto_aead *tfm)
  2723. {
  2724. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2725. crypto_free_aead(aeadctx->sw_cipher);
  2726. }
  2727. static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
  2728. unsigned int authsize)
  2729. {
  2730. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2731. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
  2732. aeadctx->mayverify = VERIFY_HW;
  2733. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2734. }
  2735. static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
  2736. unsigned int authsize)
  2737. {
  2738. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2739. u32 maxauth = crypto_aead_maxauthsize(tfm);
  2740. /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
  2741. * true for sha1. authsize == 12 condition should be before
  2742. * authsize == (maxauth >> 1)
  2743. */
  2744. if (authsize == ICV_4) {
  2745. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2746. aeadctx->mayverify = VERIFY_HW;
  2747. } else if (authsize == ICV_6) {
  2748. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2749. aeadctx->mayverify = VERIFY_HW;
  2750. } else if (authsize == ICV_10) {
  2751. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2752. aeadctx->mayverify = VERIFY_HW;
  2753. } else if (authsize == ICV_12) {
  2754. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2755. aeadctx->mayverify = VERIFY_HW;
  2756. } else if (authsize == ICV_14) {
  2757. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2758. aeadctx->mayverify = VERIFY_HW;
  2759. } else if (authsize == (maxauth >> 1)) {
  2760. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2761. aeadctx->mayverify = VERIFY_HW;
  2762. } else if (authsize == maxauth) {
  2763. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2764. aeadctx->mayverify = VERIFY_HW;
  2765. } else {
  2766. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2767. aeadctx->mayverify = VERIFY_SW;
  2768. }
  2769. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2770. }
  2771. static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  2772. {
  2773. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2774. switch (authsize) {
  2775. case ICV_4:
  2776. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2777. aeadctx->mayverify = VERIFY_HW;
  2778. break;
  2779. case ICV_8:
  2780. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2781. aeadctx->mayverify = VERIFY_HW;
  2782. break;
  2783. case ICV_12:
  2784. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2785. aeadctx->mayverify = VERIFY_HW;
  2786. break;
  2787. case ICV_14:
  2788. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2789. aeadctx->mayverify = VERIFY_HW;
  2790. break;
  2791. case ICV_16:
  2792. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2793. aeadctx->mayverify = VERIFY_HW;
  2794. break;
  2795. case ICV_13:
  2796. case ICV_15:
  2797. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2798. aeadctx->mayverify = VERIFY_SW;
  2799. break;
  2800. default:
  2801. crypto_tfm_set_flags((struct crypto_tfm *) tfm,
  2802. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2803. return -EINVAL;
  2804. }
  2805. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2806. }
  2807. static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
  2808. unsigned int authsize)
  2809. {
  2810. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2811. switch (authsize) {
  2812. case ICV_8:
  2813. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2814. aeadctx->mayverify = VERIFY_HW;
  2815. break;
  2816. case ICV_12:
  2817. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2818. aeadctx->mayverify = VERIFY_HW;
  2819. break;
  2820. case ICV_16:
  2821. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2822. aeadctx->mayverify = VERIFY_HW;
  2823. break;
  2824. default:
  2825. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2826. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2827. return -EINVAL;
  2828. }
  2829. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2830. }
  2831. static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
  2832. unsigned int authsize)
  2833. {
  2834. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2835. switch (authsize) {
  2836. case ICV_4:
  2837. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2838. aeadctx->mayverify = VERIFY_HW;
  2839. break;
  2840. case ICV_6:
  2841. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2842. aeadctx->mayverify = VERIFY_HW;
  2843. break;
  2844. case ICV_8:
  2845. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2846. aeadctx->mayverify = VERIFY_HW;
  2847. break;
  2848. case ICV_10:
  2849. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2850. aeadctx->mayverify = VERIFY_HW;
  2851. break;
  2852. case ICV_12:
  2853. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2854. aeadctx->mayverify = VERIFY_HW;
  2855. break;
  2856. case ICV_14:
  2857. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2858. aeadctx->mayverify = VERIFY_HW;
  2859. break;
  2860. case ICV_16:
  2861. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2862. aeadctx->mayverify = VERIFY_HW;
  2863. break;
  2864. default:
  2865. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2866. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2867. return -EINVAL;
  2868. }
  2869. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2870. }
  2871. static int chcr_ccm_common_setkey(struct crypto_aead *aead,
  2872. const u8 *key,
  2873. unsigned int keylen)
  2874. {
  2875. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2876. unsigned char ck_size, mk_size;
  2877. int key_ctx_size = 0;
  2878. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
  2879. if (keylen == AES_KEYSIZE_128) {
  2880. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2881. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  2882. } else if (keylen == AES_KEYSIZE_192) {
  2883. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2884. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
  2885. } else if (keylen == AES_KEYSIZE_256) {
  2886. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2887. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  2888. } else {
  2889. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2890. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2891. aeadctx->enckey_len = 0;
  2892. return -EINVAL;
  2893. }
  2894. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
  2895. key_ctx_size >> 4);
  2896. memcpy(aeadctx->key, key, keylen);
  2897. aeadctx->enckey_len = keylen;
  2898. return 0;
  2899. }
  2900. static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
  2901. const u8 *key,
  2902. unsigned int keylen)
  2903. {
  2904. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2905. int error;
  2906. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2907. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2908. CRYPTO_TFM_REQ_MASK);
  2909. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2910. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2911. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2912. CRYPTO_TFM_RES_MASK);
  2913. if (error)
  2914. return error;
  2915. return chcr_ccm_common_setkey(aead, key, keylen);
  2916. }
  2917. static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
  2918. unsigned int keylen)
  2919. {
  2920. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2921. int error;
  2922. if (keylen < 3) {
  2923. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2924. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2925. aeadctx->enckey_len = 0;
  2926. return -EINVAL;
  2927. }
  2928. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2929. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2930. CRYPTO_TFM_REQ_MASK);
  2931. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2932. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2933. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2934. CRYPTO_TFM_RES_MASK);
  2935. if (error)
  2936. return error;
  2937. keylen -= 3;
  2938. memcpy(aeadctx->salt, key + keylen, 3);
  2939. return chcr_ccm_common_setkey(aead, key, keylen);
  2940. }
  2941. static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  2942. unsigned int keylen)
  2943. {
  2944. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2945. struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
  2946. struct crypto_cipher *cipher;
  2947. unsigned int ck_size;
  2948. int ret = 0, key_ctx_size = 0;
  2949. aeadctx->enckey_len = 0;
  2950. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2951. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
  2952. & CRYPTO_TFM_REQ_MASK);
  2953. ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2954. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2955. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2956. CRYPTO_TFM_RES_MASK);
  2957. if (ret)
  2958. goto out;
  2959. if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
  2960. keylen > 3) {
  2961. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  2962. memcpy(aeadctx->salt, key + keylen, 4);
  2963. }
  2964. if (keylen == AES_KEYSIZE_128) {
  2965. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2966. } else if (keylen == AES_KEYSIZE_192) {
  2967. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2968. } else if (keylen == AES_KEYSIZE_256) {
  2969. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2970. } else {
  2971. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2972. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2973. pr_err("GCM: Invalid key length %d\n", keylen);
  2974. ret = -EINVAL;
  2975. goto out;
  2976. }
  2977. memcpy(aeadctx->key, key, keylen);
  2978. aeadctx->enckey_len = keylen;
  2979. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
  2980. AEAD_H_SIZE;
  2981. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  2982. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  2983. 0, 0,
  2984. key_ctx_size >> 4);
  2985. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  2986. * It will go in key context
  2987. */
  2988. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  2989. if (IS_ERR(cipher)) {
  2990. aeadctx->enckey_len = 0;
  2991. ret = -ENOMEM;
  2992. goto out;
  2993. }
  2994. ret = crypto_cipher_setkey(cipher, key, keylen);
  2995. if (ret) {
  2996. aeadctx->enckey_len = 0;
  2997. goto out1;
  2998. }
  2999. memset(gctx->ghash_h, 0, AEAD_H_SIZE);
  3000. crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
  3001. out1:
  3002. crypto_free_cipher(cipher);
  3003. out:
  3004. return ret;
  3005. }
  3006. static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
  3007. unsigned int keylen)
  3008. {
  3009. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3010. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3011. /* it contains auth and cipher key both*/
  3012. struct crypto_authenc_keys keys;
  3013. unsigned int bs, subtype;
  3014. unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
  3015. int err = 0, i, key_ctx_len = 0;
  3016. unsigned char ck_size = 0;
  3017. unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
  3018. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  3019. struct algo_param param;
  3020. int align;
  3021. u8 *o_ptr = NULL;
  3022. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3023. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3024. & CRYPTO_TFM_REQ_MASK);
  3025. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3026. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3027. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3028. & CRYPTO_TFM_RES_MASK);
  3029. if (err)
  3030. goto out;
  3031. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3032. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3033. goto out;
  3034. }
  3035. if (get_alg_config(&param, max_authsize)) {
  3036. pr_err("chcr : Unsupported digest size\n");
  3037. goto out;
  3038. }
  3039. subtype = get_aead_subtype(authenc);
  3040. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3041. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3042. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3043. goto out;
  3044. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3045. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3046. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3047. }
  3048. if (keys.enckeylen == AES_KEYSIZE_128) {
  3049. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3050. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3051. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3052. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3053. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3054. } else {
  3055. pr_err("chcr : Unsupported cipher key\n");
  3056. goto out;
  3057. }
  3058. /* Copy only encryption key. We use authkey to generate h(ipad) and
  3059. * h(opad) so authkey is not needed again. authkeylen size have the
  3060. * size of the hash digest size.
  3061. */
  3062. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3063. aeadctx->enckey_len = keys.enckeylen;
  3064. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3065. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3066. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3067. aeadctx->enckey_len << 3);
  3068. }
  3069. base_hash = chcr_alloc_shash(max_authsize);
  3070. if (IS_ERR(base_hash)) {
  3071. pr_err("chcr : Base driver cannot be loaded\n");
  3072. aeadctx->enckey_len = 0;
  3073. memzero_explicit(&keys, sizeof(keys));
  3074. return -EINVAL;
  3075. }
  3076. {
  3077. SHASH_DESC_ON_STACK(shash, base_hash);
  3078. shash->tfm = base_hash;
  3079. shash->flags = crypto_shash_get_flags(base_hash);
  3080. bs = crypto_shash_blocksize(base_hash);
  3081. align = KEYCTX_ALIGN_PAD(max_authsize);
  3082. o_ptr = actx->h_iopad + param.result_size + align;
  3083. if (keys.authkeylen > bs) {
  3084. err = crypto_shash_digest(shash, keys.authkey,
  3085. keys.authkeylen,
  3086. o_ptr);
  3087. if (err) {
  3088. pr_err("chcr : Base driver cannot be loaded\n");
  3089. goto out;
  3090. }
  3091. keys.authkeylen = max_authsize;
  3092. } else
  3093. memcpy(o_ptr, keys.authkey, keys.authkeylen);
  3094. /* Compute the ipad-digest*/
  3095. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3096. memcpy(pad, o_ptr, keys.authkeylen);
  3097. for (i = 0; i < bs >> 2; i++)
  3098. *((unsigned int *)pad + i) ^= IPAD_DATA;
  3099. if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
  3100. max_authsize))
  3101. goto out;
  3102. /* Compute the opad-digest */
  3103. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3104. memcpy(pad, o_ptr, keys.authkeylen);
  3105. for (i = 0; i < bs >> 2; i++)
  3106. *((unsigned int *)pad + i) ^= OPAD_DATA;
  3107. if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
  3108. goto out;
  3109. /* convert the ipad and opad digest to network order */
  3110. chcr_change_order(actx->h_iopad, param.result_size);
  3111. chcr_change_order(o_ptr, param.result_size);
  3112. key_ctx_len = sizeof(struct _key_ctx) +
  3113. roundup(keys.enckeylen, 16) +
  3114. (param.result_size + align) * 2;
  3115. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
  3116. 0, 1, key_ctx_len >> 4);
  3117. actx->auth_mode = param.auth_mode;
  3118. chcr_free_shash(base_hash);
  3119. memzero_explicit(&keys, sizeof(keys));
  3120. return 0;
  3121. }
  3122. out:
  3123. aeadctx->enckey_len = 0;
  3124. memzero_explicit(&keys, sizeof(keys));
  3125. if (!IS_ERR(base_hash))
  3126. chcr_free_shash(base_hash);
  3127. return -EINVAL;
  3128. }
  3129. static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
  3130. const u8 *key, unsigned int keylen)
  3131. {
  3132. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3133. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3134. struct crypto_authenc_keys keys;
  3135. int err;
  3136. /* it contains auth and cipher key both*/
  3137. unsigned int subtype;
  3138. int key_ctx_len = 0;
  3139. unsigned char ck_size = 0;
  3140. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3141. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3142. & CRYPTO_TFM_REQ_MASK);
  3143. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3144. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3145. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3146. & CRYPTO_TFM_RES_MASK);
  3147. if (err)
  3148. goto out;
  3149. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3150. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3151. goto out;
  3152. }
  3153. subtype = get_aead_subtype(authenc);
  3154. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3155. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3156. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3157. goto out;
  3158. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3159. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3160. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3161. }
  3162. if (keys.enckeylen == AES_KEYSIZE_128) {
  3163. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3164. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3165. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3166. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3167. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3168. } else {
  3169. pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
  3170. goto out;
  3171. }
  3172. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3173. aeadctx->enckey_len = keys.enckeylen;
  3174. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3175. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3176. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3177. aeadctx->enckey_len << 3);
  3178. }
  3179. key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
  3180. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
  3181. 0, key_ctx_len >> 4);
  3182. actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
  3183. memzero_explicit(&keys, sizeof(keys));
  3184. return 0;
  3185. out:
  3186. aeadctx->enckey_len = 0;
  3187. memzero_explicit(&keys, sizeof(keys));
  3188. return -EINVAL;
  3189. }
  3190. static int chcr_aead_op(struct aead_request *req,
  3191. int size,
  3192. create_wr_t create_wr_fn)
  3193. {
  3194. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3195. struct uld_ctx *u_ctx;
  3196. struct sk_buff *skb;
  3197. int isfull = 0;
  3198. if (!a_ctx(tfm)->dev) {
  3199. pr_err("chcr : %s : No crypto device.\n", __func__);
  3200. return -ENXIO;
  3201. }
  3202. u_ctx = ULD_CTX(a_ctx(tfm));
  3203. if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  3204. a_ctx(tfm)->tx_qidx)) {
  3205. isfull = 1;
  3206. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  3207. return -ENOSPC;
  3208. }
  3209. /* Form a WR from req */
  3210. skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
  3211. if (IS_ERR(skb) || !skb)
  3212. return PTR_ERR(skb);
  3213. skb->dev = u_ctx->lldi.ports[0];
  3214. set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
  3215. chcr_send_wr(skb);
  3216. return isfull ? -EBUSY : -EINPROGRESS;
  3217. }
  3218. static int chcr_aead_encrypt(struct aead_request *req)
  3219. {
  3220. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3221. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3222. reqctx->verify = VERIFY_HW;
  3223. reqctx->op = CHCR_ENCRYPT_OP;
  3224. switch (get_aead_subtype(tfm)) {
  3225. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3226. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3227. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3228. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3229. return chcr_aead_op(req, 0, create_authenc_wr);
  3230. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3231. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3232. return chcr_aead_op(req, 0, create_aead_ccm_wr);
  3233. default:
  3234. return chcr_aead_op(req, 0, create_gcm_wr);
  3235. }
  3236. }
  3237. static int chcr_aead_decrypt(struct aead_request *req)
  3238. {
  3239. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3240. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  3241. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3242. int size;
  3243. if (aeadctx->mayverify == VERIFY_SW) {
  3244. size = crypto_aead_maxauthsize(tfm);
  3245. reqctx->verify = VERIFY_SW;
  3246. } else {
  3247. size = 0;
  3248. reqctx->verify = VERIFY_HW;
  3249. }
  3250. reqctx->op = CHCR_DECRYPT_OP;
  3251. switch (get_aead_subtype(tfm)) {
  3252. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3253. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3254. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3255. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3256. return chcr_aead_op(req, size, create_authenc_wr);
  3257. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3258. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3259. return chcr_aead_op(req, size, create_aead_ccm_wr);
  3260. default:
  3261. return chcr_aead_op(req, size, create_gcm_wr);
  3262. }
  3263. }
  3264. static struct chcr_alg_template driver_algs[] = {
  3265. /* AES-CBC */
  3266. {
  3267. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
  3268. .is_registered = 0,
  3269. .alg.crypto = {
  3270. .cra_name = "cbc(aes)",
  3271. .cra_driver_name = "cbc-aes-chcr",
  3272. .cra_blocksize = AES_BLOCK_SIZE,
  3273. .cra_init = chcr_cra_init,
  3274. .cra_exit = chcr_cra_exit,
  3275. .cra_u.ablkcipher = {
  3276. .min_keysize = AES_MIN_KEY_SIZE,
  3277. .max_keysize = AES_MAX_KEY_SIZE,
  3278. .ivsize = AES_BLOCK_SIZE,
  3279. .setkey = chcr_aes_cbc_setkey,
  3280. .encrypt = chcr_aes_encrypt,
  3281. .decrypt = chcr_aes_decrypt,
  3282. }
  3283. }
  3284. },
  3285. {
  3286. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
  3287. .is_registered = 0,
  3288. .alg.crypto = {
  3289. .cra_name = "xts(aes)",
  3290. .cra_driver_name = "xts-aes-chcr",
  3291. .cra_blocksize = AES_BLOCK_SIZE,
  3292. .cra_init = chcr_cra_init,
  3293. .cra_exit = NULL,
  3294. .cra_u .ablkcipher = {
  3295. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  3296. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  3297. .ivsize = AES_BLOCK_SIZE,
  3298. .setkey = chcr_aes_xts_setkey,
  3299. .encrypt = chcr_aes_encrypt,
  3300. .decrypt = chcr_aes_decrypt,
  3301. }
  3302. }
  3303. },
  3304. {
  3305. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
  3306. .is_registered = 0,
  3307. .alg.crypto = {
  3308. .cra_name = "ctr(aes)",
  3309. .cra_driver_name = "ctr-aes-chcr",
  3310. .cra_blocksize = 1,
  3311. .cra_init = chcr_cra_init,
  3312. .cra_exit = chcr_cra_exit,
  3313. .cra_u.ablkcipher = {
  3314. .min_keysize = AES_MIN_KEY_SIZE,
  3315. .max_keysize = AES_MAX_KEY_SIZE,
  3316. .ivsize = AES_BLOCK_SIZE,
  3317. .setkey = chcr_aes_ctr_setkey,
  3318. .encrypt = chcr_aes_encrypt,
  3319. .decrypt = chcr_aes_decrypt,
  3320. }
  3321. }
  3322. },
  3323. {
  3324. .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
  3325. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
  3326. .is_registered = 0,
  3327. .alg.crypto = {
  3328. .cra_name = "rfc3686(ctr(aes))",
  3329. .cra_driver_name = "rfc3686-ctr-aes-chcr",
  3330. .cra_blocksize = 1,
  3331. .cra_init = chcr_rfc3686_init,
  3332. .cra_exit = chcr_cra_exit,
  3333. .cra_u.ablkcipher = {
  3334. .min_keysize = AES_MIN_KEY_SIZE +
  3335. CTR_RFC3686_NONCE_SIZE,
  3336. .max_keysize = AES_MAX_KEY_SIZE +
  3337. CTR_RFC3686_NONCE_SIZE,
  3338. .ivsize = CTR_RFC3686_IV_SIZE,
  3339. .setkey = chcr_aes_rfc3686_setkey,
  3340. .encrypt = chcr_aes_encrypt,
  3341. .decrypt = chcr_aes_decrypt,
  3342. .geniv = "seqiv",
  3343. }
  3344. }
  3345. },
  3346. /* SHA */
  3347. {
  3348. .type = CRYPTO_ALG_TYPE_AHASH,
  3349. .is_registered = 0,
  3350. .alg.hash = {
  3351. .halg.digestsize = SHA1_DIGEST_SIZE,
  3352. .halg.base = {
  3353. .cra_name = "sha1",
  3354. .cra_driver_name = "sha1-chcr",
  3355. .cra_blocksize = SHA1_BLOCK_SIZE,
  3356. }
  3357. }
  3358. },
  3359. {
  3360. .type = CRYPTO_ALG_TYPE_AHASH,
  3361. .is_registered = 0,
  3362. .alg.hash = {
  3363. .halg.digestsize = SHA256_DIGEST_SIZE,
  3364. .halg.base = {
  3365. .cra_name = "sha256",
  3366. .cra_driver_name = "sha256-chcr",
  3367. .cra_blocksize = SHA256_BLOCK_SIZE,
  3368. }
  3369. }
  3370. },
  3371. {
  3372. .type = CRYPTO_ALG_TYPE_AHASH,
  3373. .is_registered = 0,
  3374. .alg.hash = {
  3375. .halg.digestsize = SHA224_DIGEST_SIZE,
  3376. .halg.base = {
  3377. .cra_name = "sha224",
  3378. .cra_driver_name = "sha224-chcr",
  3379. .cra_blocksize = SHA224_BLOCK_SIZE,
  3380. }
  3381. }
  3382. },
  3383. {
  3384. .type = CRYPTO_ALG_TYPE_AHASH,
  3385. .is_registered = 0,
  3386. .alg.hash = {
  3387. .halg.digestsize = SHA384_DIGEST_SIZE,
  3388. .halg.base = {
  3389. .cra_name = "sha384",
  3390. .cra_driver_name = "sha384-chcr",
  3391. .cra_blocksize = SHA384_BLOCK_SIZE,
  3392. }
  3393. }
  3394. },
  3395. {
  3396. .type = CRYPTO_ALG_TYPE_AHASH,
  3397. .is_registered = 0,
  3398. .alg.hash = {
  3399. .halg.digestsize = SHA512_DIGEST_SIZE,
  3400. .halg.base = {
  3401. .cra_name = "sha512",
  3402. .cra_driver_name = "sha512-chcr",
  3403. .cra_blocksize = SHA512_BLOCK_SIZE,
  3404. }
  3405. }
  3406. },
  3407. /* HMAC */
  3408. {
  3409. .type = CRYPTO_ALG_TYPE_HMAC,
  3410. .is_registered = 0,
  3411. .alg.hash = {
  3412. .halg.digestsize = SHA1_DIGEST_SIZE,
  3413. .halg.base = {
  3414. .cra_name = "hmac(sha1)",
  3415. .cra_driver_name = "hmac-sha1-chcr",
  3416. .cra_blocksize = SHA1_BLOCK_SIZE,
  3417. }
  3418. }
  3419. },
  3420. {
  3421. .type = CRYPTO_ALG_TYPE_HMAC,
  3422. .is_registered = 0,
  3423. .alg.hash = {
  3424. .halg.digestsize = SHA224_DIGEST_SIZE,
  3425. .halg.base = {
  3426. .cra_name = "hmac(sha224)",
  3427. .cra_driver_name = "hmac-sha224-chcr",
  3428. .cra_blocksize = SHA224_BLOCK_SIZE,
  3429. }
  3430. }
  3431. },
  3432. {
  3433. .type = CRYPTO_ALG_TYPE_HMAC,
  3434. .is_registered = 0,
  3435. .alg.hash = {
  3436. .halg.digestsize = SHA256_DIGEST_SIZE,
  3437. .halg.base = {
  3438. .cra_name = "hmac(sha256)",
  3439. .cra_driver_name = "hmac-sha256-chcr",
  3440. .cra_blocksize = SHA256_BLOCK_SIZE,
  3441. }
  3442. }
  3443. },
  3444. {
  3445. .type = CRYPTO_ALG_TYPE_HMAC,
  3446. .is_registered = 0,
  3447. .alg.hash = {
  3448. .halg.digestsize = SHA384_DIGEST_SIZE,
  3449. .halg.base = {
  3450. .cra_name = "hmac(sha384)",
  3451. .cra_driver_name = "hmac-sha384-chcr",
  3452. .cra_blocksize = SHA384_BLOCK_SIZE,
  3453. }
  3454. }
  3455. },
  3456. {
  3457. .type = CRYPTO_ALG_TYPE_HMAC,
  3458. .is_registered = 0,
  3459. .alg.hash = {
  3460. .halg.digestsize = SHA512_DIGEST_SIZE,
  3461. .halg.base = {
  3462. .cra_name = "hmac(sha512)",
  3463. .cra_driver_name = "hmac-sha512-chcr",
  3464. .cra_blocksize = SHA512_BLOCK_SIZE,
  3465. }
  3466. }
  3467. },
  3468. /* Add AEAD Algorithms */
  3469. {
  3470. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
  3471. .is_registered = 0,
  3472. .alg.aead = {
  3473. .base = {
  3474. .cra_name = "gcm(aes)",
  3475. .cra_driver_name = "gcm-aes-chcr",
  3476. .cra_blocksize = 1,
  3477. .cra_priority = CHCR_AEAD_PRIORITY,
  3478. .cra_ctxsize = sizeof(struct chcr_context) +
  3479. sizeof(struct chcr_aead_ctx) +
  3480. sizeof(struct chcr_gcm_ctx),
  3481. },
  3482. .ivsize = GCM_AES_IV_SIZE,
  3483. .maxauthsize = GHASH_DIGEST_SIZE,
  3484. .setkey = chcr_gcm_setkey,
  3485. .setauthsize = chcr_gcm_setauthsize,
  3486. }
  3487. },
  3488. {
  3489. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
  3490. .is_registered = 0,
  3491. .alg.aead = {
  3492. .base = {
  3493. .cra_name = "rfc4106(gcm(aes))",
  3494. .cra_driver_name = "rfc4106-gcm-aes-chcr",
  3495. .cra_blocksize = 1,
  3496. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3497. .cra_ctxsize = sizeof(struct chcr_context) +
  3498. sizeof(struct chcr_aead_ctx) +
  3499. sizeof(struct chcr_gcm_ctx),
  3500. },
  3501. .ivsize = GCM_RFC4106_IV_SIZE,
  3502. .maxauthsize = GHASH_DIGEST_SIZE,
  3503. .setkey = chcr_gcm_setkey,
  3504. .setauthsize = chcr_4106_4309_setauthsize,
  3505. }
  3506. },
  3507. {
  3508. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
  3509. .is_registered = 0,
  3510. .alg.aead = {
  3511. .base = {
  3512. .cra_name = "ccm(aes)",
  3513. .cra_driver_name = "ccm-aes-chcr",
  3514. .cra_blocksize = 1,
  3515. .cra_priority = CHCR_AEAD_PRIORITY,
  3516. .cra_ctxsize = sizeof(struct chcr_context) +
  3517. sizeof(struct chcr_aead_ctx),
  3518. },
  3519. .ivsize = AES_BLOCK_SIZE,
  3520. .maxauthsize = GHASH_DIGEST_SIZE,
  3521. .setkey = chcr_aead_ccm_setkey,
  3522. .setauthsize = chcr_ccm_setauthsize,
  3523. }
  3524. },
  3525. {
  3526. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
  3527. .is_registered = 0,
  3528. .alg.aead = {
  3529. .base = {
  3530. .cra_name = "rfc4309(ccm(aes))",
  3531. .cra_driver_name = "rfc4309-ccm-aes-chcr",
  3532. .cra_blocksize = 1,
  3533. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3534. .cra_ctxsize = sizeof(struct chcr_context) +
  3535. sizeof(struct chcr_aead_ctx),
  3536. },
  3537. .ivsize = 8,
  3538. .maxauthsize = GHASH_DIGEST_SIZE,
  3539. .setkey = chcr_aead_rfc4309_setkey,
  3540. .setauthsize = chcr_4106_4309_setauthsize,
  3541. }
  3542. },
  3543. {
  3544. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3545. .is_registered = 0,
  3546. .alg.aead = {
  3547. .base = {
  3548. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  3549. .cra_driver_name =
  3550. "authenc-hmac-sha1-cbc-aes-chcr",
  3551. .cra_blocksize = AES_BLOCK_SIZE,
  3552. .cra_priority = CHCR_AEAD_PRIORITY,
  3553. .cra_ctxsize = sizeof(struct chcr_context) +
  3554. sizeof(struct chcr_aead_ctx) +
  3555. sizeof(struct chcr_authenc_ctx),
  3556. },
  3557. .ivsize = AES_BLOCK_SIZE,
  3558. .maxauthsize = SHA1_DIGEST_SIZE,
  3559. .setkey = chcr_authenc_setkey,
  3560. .setauthsize = chcr_authenc_setauthsize,
  3561. }
  3562. },
  3563. {
  3564. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3565. .is_registered = 0,
  3566. .alg.aead = {
  3567. .base = {
  3568. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  3569. .cra_driver_name =
  3570. "authenc-hmac-sha256-cbc-aes-chcr",
  3571. .cra_blocksize = AES_BLOCK_SIZE,
  3572. .cra_priority = CHCR_AEAD_PRIORITY,
  3573. .cra_ctxsize = sizeof(struct chcr_context) +
  3574. sizeof(struct chcr_aead_ctx) +
  3575. sizeof(struct chcr_authenc_ctx),
  3576. },
  3577. .ivsize = AES_BLOCK_SIZE,
  3578. .maxauthsize = SHA256_DIGEST_SIZE,
  3579. .setkey = chcr_authenc_setkey,
  3580. .setauthsize = chcr_authenc_setauthsize,
  3581. }
  3582. },
  3583. {
  3584. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3585. .is_registered = 0,
  3586. .alg.aead = {
  3587. .base = {
  3588. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  3589. .cra_driver_name =
  3590. "authenc-hmac-sha224-cbc-aes-chcr",
  3591. .cra_blocksize = AES_BLOCK_SIZE,
  3592. .cra_priority = CHCR_AEAD_PRIORITY,
  3593. .cra_ctxsize = sizeof(struct chcr_context) +
  3594. sizeof(struct chcr_aead_ctx) +
  3595. sizeof(struct chcr_authenc_ctx),
  3596. },
  3597. .ivsize = AES_BLOCK_SIZE,
  3598. .maxauthsize = SHA224_DIGEST_SIZE,
  3599. .setkey = chcr_authenc_setkey,
  3600. .setauthsize = chcr_authenc_setauthsize,
  3601. }
  3602. },
  3603. {
  3604. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3605. .is_registered = 0,
  3606. .alg.aead = {
  3607. .base = {
  3608. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  3609. .cra_driver_name =
  3610. "authenc-hmac-sha384-cbc-aes-chcr",
  3611. .cra_blocksize = AES_BLOCK_SIZE,
  3612. .cra_priority = CHCR_AEAD_PRIORITY,
  3613. .cra_ctxsize = sizeof(struct chcr_context) +
  3614. sizeof(struct chcr_aead_ctx) +
  3615. sizeof(struct chcr_authenc_ctx),
  3616. },
  3617. .ivsize = AES_BLOCK_SIZE,
  3618. .maxauthsize = SHA384_DIGEST_SIZE,
  3619. .setkey = chcr_authenc_setkey,
  3620. .setauthsize = chcr_authenc_setauthsize,
  3621. }
  3622. },
  3623. {
  3624. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3625. .is_registered = 0,
  3626. .alg.aead = {
  3627. .base = {
  3628. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  3629. .cra_driver_name =
  3630. "authenc-hmac-sha512-cbc-aes-chcr",
  3631. .cra_blocksize = AES_BLOCK_SIZE,
  3632. .cra_priority = CHCR_AEAD_PRIORITY,
  3633. .cra_ctxsize = sizeof(struct chcr_context) +
  3634. sizeof(struct chcr_aead_ctx) +
  3635. sizeof(struct chcr_authenc_ctx),
  3636. },
  3637. .ivsize = AES_BLOCK_SIZE,
  3638. .maxauthsize = SHA512_DIGEST_SIZE,
  3639. .setkey = chcr_authenc_setkey,
  3640. .setauthsize = chcr_authenc_setauthsize,
  3641. }
  3642. },
  3643. {
  3644. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
  3645. .is_registered = 0,
  3646. .alg.aead = {
  3647. .base = {
  3648. .cra_name = "authenc(digest_null,cbc(aes))",
  3649. .cra_driver_name =
  3650. "authenc-digest_null-cbc-aes-chcr",
  3651. .cra_blocksize = AES_BLOCK_SIZE,
  3652. .cra_priority = CHCR_AEAD_PRIORITY,
  3653. .cra_ctxsize = sizeof(struct chcr_context) +
  3654. sizeof(struct chcr_aead_ctx) +
  3655. sizeof(struct chcr_authenc_ctx),
  3656. },
  3657. .ivsize = AES_BLOCK_SIZE,
  3658. .maxauthsize = 0,
  3659. .setkey = chcr_aead_digest_null_setkey,
  3660. .setauthsize = chcr_authenc_null_setauthsize,
  3661. }
  3662. },
  3663. {
  3664. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3665. .is_registered = 0,
  3666. .alg.aead = {
  3667. .base = {
  3668. .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3669. .cra_driver_name =
  3670. "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
  3671. .cra_blocksize = 1,
  3672. .cra_priority = CHCR_AEAD_PRIORITY,
  3673. .cra_ctxsize = sizeof(struct chcr_context) +
  3674. sizeof(struct chcr_aead_ctx) +
  3675. sizeof(struct chcr_authenc_ctx),
  3676. },
  3677. .ivsize = CTR_RFC3686_IV_SIZE,
  3678. .maxauthsize = SHA1_DIGEST_SIZE,
  3679. .setkey = chcr_authenc_setkey,
  3680. .setauthsize = chcr_authenc_setauthsize,
  3681. }
  3682. },
  3683. {
  3684. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3685. .is_registered = 0,
  3686. .alg.aead = {
  3687. .base = {
  3688. .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3689. .cra_driver_name =
  3690. "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
  3691. .cra_blocksize = 1,
  3692. .cra_priority = CHCR_AEAD_PRIORITY,
  3693. .cra_ctxsize = sizeof(struct chcr_context) +
  3694. sizeof(struct chcr_aead_ctx) +
  3695. sizeof(struct chcr_authenc_ctx),
  3696. },
  3697. .ivsize = CTR_RFC3686_IV_SIZE,
  3698. .maxauthsize = SHA256_DIGEST_SIZE,
  3699. .setkey = chcr_authenc_setkey,
  3700. .setauthsize = chcr_authenc_setauthsize,
  3701. }
  3702. },
  3703. {
  3704. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3705. .is_registered = 0,
  3706. .alg.aead = {
  3707. .base = {
  3708. .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3709. .cra_driver_name =
  3710. "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
  3711. .cra_blocksize = 1,
  3712. .cra_priority = CHCR_AEAD_PRIORITY,
  3713. .cra_ctxsize = sizeof(struct chcr_context) +
  3714. sizeof(struct chcr_aead_ctx) +
  3715. sizeof(struct chcr_authenc_ctx),
  3716. },
  3717. .ivsize = CTR_RFC3686_IV_SIZE,
  3718. .maxauthsize = SHA224_DIGEST_SIZE,
  3719. .setkey = chcr_authenc_setkey,
  3720. .setauthsize = chcr_authenc_setauthsize,
  3721. }
  3722. },
  3723. {
  3724. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3725. .is_registered = 0,
  3726. .alg.aead = {
  3727. .base = {
  3728. .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3729. .cra_driver_name =
  3730. "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
  3731. .cra_blocksize = 1,
  3732. .cra_priority = CHCR_AEAD_PRIORITY,
  3733. .cra_ctxsize = sizeof(struct chcr_context) +
  3734. sizeof(struct chcr_aead_ctx) +
  3735. sizeof(struct chcr_authenc_ctx),
  3736. },
  3737. .ivsize = CTR_RFC3686_IV_SIZE,
  3738. .maxauthsize = SHA384_DIGEST_SIZE,
  3739. .setkey = chcr_authenc_setkey,
  3740. .setauthsize = chcr_authenc_setauthsize,
  3741. }
  3742. },
  3743. {
  3744. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3745. .is_registered = 0,
  3746. .alg.aead = {
  3747. .base = {
  3748. .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3749. .cra_driver_name =
  3750. "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
  3751. .cra_blocksize = 1,
  3752. .cra_priority = CHCR_AEAD_PRIORITY,
  3753. .cra_ctxsize = sizeof(struct chcr_context) +
  3754. sizeof(struct chcr_aead_ctx) +
  3755. sizeof(struct chcr_authenc_ctx),
  3756. },
  3757. .ivsize = CTR_RFC3686_IV_SIZE,
  3758. .maxauthsize = SHA512_DIGEST_SIZE,
  3759. .setkey = chcr_authenc_setkey,
  3760. .setauthsize = chcr_authenc_setauthsize,
  3761. }
  3762. },
  3763. {
  3764. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
  3765. .is_registered = 0,
  3766. .alg.aead = {
  3767. .base = {
  3768. .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
  3769. .cra_driver_name =
  3770. "authenc-digest_null-rfc3686-ctr-aes-chcr",
  3771. .cra_blocksize = 1,
  3772. .cra_priority = CHCR_AEAD_PRIORITY,
  3773. .cra_ctxsize = sizeof(struct chcr_context) +
  3774. sizeof(struct chcr_aead_ctx) +
  3775. sizeof(struct chcr_authenc_ctx),
  3776. },
  3777. .ivsize = CTR_RFC3686_IV_SIZE,
  3778. .maxauthsize = 0,
  3779. .setkey = chcr_aead_digest_null_setkey,
  3780. .setauthsize = chcr_authenc_null_setauthsize,
  3781. }
  3782. },
  3783. };
  3784. /*
  3785. * chcr_unregister_alg - Deregister crypto algorithms with
  3786. * kernel framework.
  3787. */
  3788. static int chcr_unregister_alg(void)
  3789. {
  3790. int i;
  3791. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3792. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3793. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3794. if (driver_algs[i].is_registered)
  3795. crypto_unregister_alg(
  3796. &driver_algs[i].alg.crypto);
  3797. break;
  3798. case CRYPTO_ALG_TYPE_AEAD:
  3799. if (driver_algs[i].is_registered)
  3800. crypto_unregister_aead(
  3801. &driver_algs[i].alg.aead);
  3802. break;
  3803. case CRYPTO_ALG_TYPE_AHASH:
  3804. if (driver_algs[i].is_registered)
  3805. crypto_unregister_ahash(
  3806. &driver_algs[i].alg.hash);
  3807. break;
  3808. }
  3809. driver_algs[i].is_registered = 0;
  3810. }
  3811. return 0;
  3812. }
  3813. #define SZ_AHASH_CTX sizeof(struct chcr_context)
  3814. #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
  3815. #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
  3816. /*
  3817. * chcr_register_alg - Register crypto algorithms with kernel framework.
  3818. */
  3819. static int chcr_register_alg(void)
  3820. {
  3821. struct crypto_alg ai;
  3822. struct ahash_alg *a_hash;
  3823. int err = 0, i;
  3824. char *name = NULL;
  3825. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3826. if (driver_algs[i].is_registered)
  3827. continue;
  3828. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3829. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3830. driver_algs[i].alg.crypto.cra_priority =
  3831. CHCR_CRA_PRIORITY;
  3832. driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
  3833. driver_algs[i].alg.crypto.cra_flags =
  3834. CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  3835. CRYPTO_ALG_NEED_FALLBACK;
  3836. driver_algs[i].alg.crypto.cra_ctxsize =
  3837. sizeof(struct chcr_context) +
  3838. sizeof(struct ablk_ctx);
  3839. driver_algs[i].alg.crypto.cra_alignmask = 0;
  3840. driver_algs[i].alg.crypto.cra_type =
  3841. &crypto_ablkcipher_type;
  3842. err = crypto_register_alg(&driver_algs[i].alg.crypto);
  3843. name = driver_algs[i].alg.crypto.cra_driver_name;
  3844. break;
  3845. case CRYPTO_ALG_TYPE_AEAD:
  3846. driver_algs[i].alg.aead.base.cra_flags =
  3847. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
  3848. driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
  3849. driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
  3850. driver_algs[i].alg.aead.init = chcr_aead_cra_init;
  3851. driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
  3852. driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
  3853. err = crypto_register_aead(&driver_algs[i].alg.aead);
  3854. name = driver_algs[i].alg.aead.base.cra_driver_name;
  3855. break;
  3856. case CRYPTO_ALG_TYPE_AHASH:
  3857. a_hash = &driver_algs[i].alg.hash;
  3858. a_hash->update = chcr_ahash_update;
  3859. a_hash->final = chcr_ahash_final;
  3860. a_hash->finup = chcr_ahash_finup;
  3861. a_hash->digest = chcr_ahash_digest;
  3862. a_hash->export = chcr_ahash_export;
  3863. a_hash->import = chcr_ahash_import;
  3864. a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
  3865. a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
  3866. a_hash->halg.base.cra_module = THIS_MODULE;
  3867. a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
  3868. a_hash->halg.base.cra_alignmask = 0;
  3869. a_hash->halg.base.cra_exit = NULL;
  3870. if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
  3871. a_hash->halg.base.cra_init = chcr_hmac_cra_init;
  3872. a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
  3873. a_hash->init = chcr_hmac_init;
  3874. a_hash->setkey = chcr_ahash_setkey;
  3875. a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
  3876. } else {
  3877. a_hash->init = chcr_sha_init;
  3878. a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
  3879. a_hash->halg.base.cra_init = chcr_sha_cra_init;
  3880. }
  3881. err = crypto_register_ahash(&driver_algs[i].alg.hash);
  3882. ai = driver_algs[i].alg.hash.halg.base;
  3883. name = ai.cra_driver_name;
  3884. break;
  3885. }
  3886. if (err) {
  3887. pr_err("chcr : %s : Algorithm registration failed\n",
  3888. name);
  3889. goto register_err;
  3890. } else {
  3891. driver_algs[i].is_registered = 1;
  3892. }
  3893. }
  3894. return 0;
  3895. register_err:
  3896. chcr_unregister_alg();
  3897. return err;
  3898. }
  3899. /*
  3900. * start_crypto - Register the crypto algorithms.
  3901. * This should called once when the first device comesup. After this
  3902. * kernel will start calling driver APIs for crypto operations.
  3903. */
  3904. int start_crypto(void)
  3905. {
  3906. return chcr_register_alg();
  3907. }
  3908. /*
  3909. * stop_crypto - Deregister all the crypto algorithms with kernel.
  3910. * This should be called once when the last device goes down. After this
  3911. * kernel will not call the driver API for crypto operations.
  3912. */
  3913. int stop_crypto(void)
  3914. {
  3915. chcr_unregister_alg();
  3916. return 0;
  3917. }