chcr_algo.c 122 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Manoj Malviya (manojmalviya@chelsio.com)
  36. * Atul Gupta (atul.gupta@chelsio.com)
  37. * Jitendra Lulla (jlulla@chelsio.com)
  38. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39. * Harsh Jain (harsh@chelsio.com)
  40. */
  41. #define pr_fmt(fmt) "chcr:" fmt
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/crypto.h>
  45. #include <linux/cryptohash.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/rtnetlink.h>
  48. #include <linux/highmem.h>
  49. #include <linux/scatterlist.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/gcm.h>
  54. #include <crypto/sha.h>
  55. #include <crypto/authenc.h>
  56. #include <crypto/ctr.h>
  57. #include <crypto/gf128mul.h>
  58. #include <crypto/internal/aead.h>
  59. #include <crypto/null.h>
  60. #include <crypto/internal/skcipher.h>
  61. #include <crypto/aead.h>
  62. #include <crypto/scatterwalk.h>
  63. #include <crypto/internal/hash.h>
  64. #include "t4fw_api.h"
  65. #include "t4_msg.h"
  66. #include "chcr_core.h"
  67. #include "chcr_algo.h"
  68. #include "chcr_crypto.h"
  69. #define IV AES_BLOCK_SIZE
  70. static unsigned int sgl_ent_len[] = {
  71. 0, 0, 16, 24, 40, 48, 64, 72, 88,
  72. 96, 112, 120, 136, 144, 160, 168, 184,
  73. 192, 208, 216, 232, 240, 256, 264, 280,
  74. 288, 304, 312, 328, 336, 352, 360, 376
  75. };
  76. static unsigned int dsgl_ent_len[] = {
  77. 0, 32, 32, 48, 48, 64, 64, 80, 80,
  78. 112, 112, 128, 128, 144, 144, 160, 160,
  79. 192, 192, 208, 208, 224, 224, 240, 240,
  80. 272, 272, 288, 288, 304, 304, 320, 320
  81. };
  82. static u32 round_constant[11] = {
  83. 0x01000000, 0x02000000, 0x04000000, 0x08000000,
  84. 0x10000000, 0x20000000, 0x40000000, 0x80000000,
  85. 0x1B000000, 0x36000000, 0x6C000000
  86. };
  87. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  88. unsigned char *input, int err);
  89. static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  90. {
  91. return ctx->crypto_ctx->aeadctx;
  92. }
  93. static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  94. {
  95. return ctx->crypto_ctx->ablkctx;
  96. }
  97. static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  98. {
  99. return ctx->crypto_ctx->hmacctx;
  100. }
  101. static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  102. {
  103. return gctx->ctx->gcm;
  104. }
  105. static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  106. {
  107. return gctx->ctx->authenc;
  108. }
  109. static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  110. {
  111. return ctx->dev->u_ctx;
  112. }
  113. static inline int is_ofld_imm(const struct sk_buff *skb)
  114. {
  115. return (skb->len <= SGE_MAX_WR_LEN);
  116. }
  117. static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
  118. {
  119. memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
  120. }
  121. static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
  122. unsigned int entlen,
  123. unsigned int skip)
  124. {
  125. int nents = 0;
  126. unsigned int less;
  127. unsigned int skip_len = 0;
  128. while (sg && skip) {
  129. if (sg_dma_len(sg) <= skip) {
  130. skip -= sg_dma_len(sg);
  131. skip_len = 0;
  132. sg = sg_next(sg);
  133. } else {
  134. skip_len = skip;
  135. skip = 0;
  136. }
  137. }
  138. while (sg && reqlen) {
  139. less = min(reqlen, sg_dma_len(sg) - skip_len);
  140. nents += DIV_ROUND_UP(less, entlen);
  141. reqlen -= less;
  142. skip_len = 0;
  143. sg = sg_next(sg);
  144. }
  145. return nents;
  146. }
  147. static inline int get_aead_subtype(struct crypto_aead *aead)
  148. {
  149. struct aead_alg *alg = crypto_aead_alg(aead);
  150. struct chcr_alg_template *chcr_crypto_alg =
  151. container_of(alg, struct chcr_alg_template, alg.aead);
  152. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  153. }
  154. void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
  155. {
  156. u8 temp[SHA512_DIGEST_SIZE];
  157. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  158. int authsize = crypto_aead_authsize(tfm);
  159. struct cpl_fw6_pld *fw6_pld;
  160. int cmp = 0;
  161. fw6_pld = (struct cpl_fw6_pld *)input;
  162. if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
  163. (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
  164. cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
  165. } else {
  166. sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
  167. authsize, req->assoclen +
  168. req->cryptlen - authsize);
  169. cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
  170. }
  171. if (cmp)
  172. *err = -EBADMSG;
  173. else
  174. *err = 0;
  175. }
  176. static inline void chcr_handle_aead_resp(struct aead_request *req,
  177. unsigned char *input,
  178. int err)
  179. {
  180. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  181. chcr_aead_common_exit(req);
  182. if (reqctx->verify == VERIFY_SW) {
  183. chcr_verify_tag(req, input, &err);
  184. reqctx->verify = VERIFY_HW;
  185. }
  186. req->base.complete(&req->base, err);
  187. }
  188. static void get_aes_decrypt_key(unsigned char *dec_key,
  189. const unsigned char *key,
  190. unsigned int keylength)
  191. {
  192. u32 temp;
  193. u32 w_ring[MAX_NK];
  194. int i, j, k;
  195. u8 nr, nk;
  196. switch (keylength) {
  197. case AES_KEYLENGTH_128BIT:
  198. nk = KEYLENGTH_4BYTES;
  199. nr = NUMBER_OF_ROUNDS_10;
  200. break;
  201. case AES_KEYLENGTH_192BIT:
  202. nk = KEYLENGTH_6BYTES;
  203. nr = NUMBER_OF_ROUNDS_12;
  204. break;
  205. case AES_KEYLENGTH_256BIT:
  206. nk = KEYLENGTH_8BYTES;
  207. nr = NUMBER_OF_ROUNDS_14;
  208. break;
  209. default:
  210. return;
  211. }
  212. for (i = 0; i < nk; i++)
  213. w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
  214. i = 0;
  215. temp = w_ring[nk - 1];
  216. while (i + nk < (nr + 1) * 4) {
  217. if (!(i % nk)) {
  218. /* RotWord(temp) */
  219. temp = (temp << 8) | (temp >> 24);
  220. temp = aes_ks_subword(temp);
  221. temp ^= round_constant[i / nk];
  222. } else if (nk == 8 && (i % 4 == 0)) {
  223. temp = aes_ks_subword(temp);
  224. }
  225. w_ring[i % nk] ^= temp;
  226. temp = w_ring[i % nk];
  227. i++;
  228. }
  229. i--;
  230. for (k = 0, j = i % nk; k < nk; k++) {
  231. *((u32 *)dec_key + k) = htonl(w_ring[j]);
  232. j--;
  233. if (j < 0)
  234. j += nk;
  235. }
  236. }
  237. static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  238. {
  239. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  240. switch (ds) {
  241. case SHA1_DIGEST_SIZE:
  242. base_hash = crypto_alloc_shash("sha1", 0, 0);
  243. break;
  244. case SHA224_DIGEST_SIZE:
  245. base_hash = crypto_alloc_shash("sha224", 0, 0);
  246. break;
  247. case SHA256_DIGEST_SIZE:
  248. base_hash = crypto_alloc_shash("sha256", 0, 0);
  249. break;
  250. case SHA384_DIGEST_SIZE:
  251. base_hash = crypto_alloc_shash("sha384", 0, 0);
  252. break;
  253. case SHA512_DIGEST_SIZE:
  254. base_hash = crypto_alloc_shash("sha512", 0, 0);
  255. break;
  256. }
  257. return base_hash;
  258. }
  259. static int chcr_compute_partial_hash(struct shash_desc *desc,
  260. char *iopad, char *result_hash,
  261. int digest_size)
  262. {
  263. struct sha1_state sha1_st;
  264. struct sha256_state sha256_st;
  265. struct sha512_state sha512_st;
  266. int error;
  267. if (digest_size == SHA1_DIGEST_SIZE) {
  268. error = crypto_shash_init(desc) ?:
  269. crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
  270. crypto_shash_export(desc, (void *)&sha1_st);
  271. memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
  272. } else if (digest_size == SHA224_DIGEST_SIZE) {
  273. error = crypto_shash_init(desc) ?:
  274. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  275. crypto_shash_export(desc, (void *)&sha256_st);
  276. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  277. } else if (digest_size == SHA256_DIGEST_SIZE) {
  278. error = crypto_shash_init(desc) ?:
  279. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  280. crypto_shash_export(desc, (void *)&sha256_st);
  281. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  282. } else if (digest_size == SHA384_DIGEST_SIZE) {
  283. error = crypto_shash_init(desc) ?:
  284. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  285. crypto_shash_export(desc, (void *)&sha512_st);
  286. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  287. } else if (digest_size == SHA512_DIGEST_SIZE) {
  288. error = crypto_shash_init(desc) ?:
  289. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  290. crypto_shash_export(desc, (void *)&sha512_st);
  291. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  292. } else {
  293. error = -EINVAL;
  294. pr_err("Unknown digest size %d\n", digest_size);
  295. }
  296. return error;
  297. }
  298. static void chcr_change_order(char *buf, int ds)
  299. {
  300. int i;
  301. if (ds == SHA512_DIGEST_SIZE) {
  302. for (i = 0; i < (ds / sizeof(u64)); i++)
  303. *((__be64 *)buf + i) =
  304. cpu_to_be64(*((u64 *)buf + i));
  305. } else {
  306. for (i = 0; i < (ds / sizeof(u32)); i++)
  307. *((__be32 *)buf + i) =
  308. cpu_to_be32(*((u32 *)buf + i));
  309. }
  310. }
  311. static inline int is_hmac(struct crypto_tfm *tfm)
  312. {
  313. struct crypto_alg *alg = tfm->__crt_alg;
  314. struct chcr_alg_template *chcr_crypto_alg =
  315. container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
  316. alg.hash);
  317. if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
  318. return 1;
  319. return 0;
  320. }
  321. static inline void dsgl_walk_init(struct dsgl_walk *walk,
  322. struct cpl_rx_phys_dsgl *dsgl)
  323. {
  324. walk->dsgl = dsgl;
  325. walk->nents = 0;
  326. walk->to = (struct phys_sge_pairs *)(dsgl + 1);
  327. }
  328. static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
  329. {
  330. struct cpl_rx_phys_dsgl *phys_cpl;
  331. phys_cpl = walk->dsgl;
  332. phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
  333. | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
  334. phys_cpl->pcirlxorder_to_noofsgentr =
  335. htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
  336. CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
  337. CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
  338. CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
  339. CPL_RX_PHYS_DSGL_DCAID_V(0) |
  340. CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
  341. phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
  342. phys_cpl->rss_hdr_int.qid = htons(qid);
  343. phys_cpl->rss_hdr_int.hash_val = 0;
  344. }
  345. static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
  346. size_t size,
  347. dma_addr_t *addr)
  348. {
  349. int j;
  350. if (!size)
  351. return;
  352. j = walk->nents;
  353. walk->to->len[j % 8] = htons(size);
  354. walk->to->addr[j % 8] = cpu_to_be64(*addr);
  355. j++;
  356. if ((j % 8) == 0)
  357. walk->to++;
  358. walk->nents = j;
  359. }
  360. static void dsgl_walk_add_sg(struct dsgl_walk *walk,
  361. struct scatterlist *sg,
  362. unsigned int slen,
  363. unsigned int skip)
  364. {
  365. int skip_len = 0;
  366. unsigned int left_size = slen, len = 0;
  367. unsigned int j = walk->nents;
  368. int offset, ent_len;
  369. if (!slen)
  370. return;
  371. while (sg && skip) {
  372. if (sg_dma_len(sg) <= skip) {
  373. skip -= sg_dma_len(sg);
  374. skip_len = 0;
  375. sg = sg_next(sg);
  376. } else {
  377. skip_len = skip;
  378. skip = 0;
  379. }
  380. }
  381. while (left_size && sg) {
  382. len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  383. offset = 0;
  384. while (len) {
  385. ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
  386. walk->to->len[j % 8] = htons(ent_len);
  387. walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
  388. offset + skip_len);
  389. offset += ent_len;
  390. len -= ent_len;
  391. j++;
  392. if ((j % 8) == 0)
  393. walk->to++;
  394. }
  395. walk->last_sg = sg;
  396. walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
  397. skip_len) + skip_len;
  398. left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  399. skip_len = 0;
  400. sg = sg_next(sg);
  401. }
  402. walk->nents = j;
  403. }
  404. static inline void ulptx_walk_init(struct ulptx_walk *walk,
  405. struct ulptx_sgl *ulp)
  406. {
  407. walk->sgl = ulp;
  408. walk->nents = 0;
  409. walk->pair_idx = 0;
  410. walk->pair = ulp->sge;
  411. walk->last_sg = NULL;
  412. walk->last_sg_len = 0;
  413. }
  414. static inline void ulptx_walk_end(struct ulptx_walk *walk)
  415. {
  416. walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  417. ULPTX_NSGE_V(walk->nents));
  418. }
  419. static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
  420. size_t size,
  421. dma_addr_t *addr)
  422. {
  423. if (!size)
  424. return;
  425. if (walk->nents == 0) {
  426. walk->sgl->len0 = cpu_to_be32(size);
  427. walk->sgl->addr0 = cpu_to_be64(*addr);
  428. } else {
  429. walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
  430. walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
  431. walk->pair_idx = !walk->pair_idx;
  432. if (!walk->pair_idx)
  433. walk->pair++;
  434. }
  435. walk->nents++;
  436. }
  437. static void ulptx_walk_add_sg(struct ulptx_walk *walk,
  438. struct scatterlist *sg,
  439. unsigned int len,
  440. unsigned int skip)
  441. {
  442. int small;
  443. int skip_len = 0;
  444. unsigned int sgmin;
  445. if (!len)
  446. return;
  447. while (sg && skip) {
  448. if (sg_dma_len(sg) <= skip) {
  449. skip -= sg_dma_len(sg);
  450. skip_len = 0;
  451. sg = sg_next(sg);
  452. } else {
  453. skip_len = skip;
  454. skip = 0;
  455. }
  456. }
  457. WARN(!sg, "SG should not be null here\n");
  458. if (sg && (walk->nents == 0)) {
  459. small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
  460. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  461. walk->sgl->len0 = cpu_to_be32(sgmin);
  462. walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
  463. walk->nents++;
  464. len -= sgmin;
  465. walk->last_sg = sg;
  466. walk->last_sg_len = sgmin + skip_len;
  467. skip_len += sgmin;
  468. if (sg_dma_len(sg) == skip_len) {
  469. sg = sg_next(sg);
  470. skip_len = 0;
  471. }
  472. }
  473. while (sg && len) {
  474. small = min(sg_dma_len(sg) - skip_len, len);
  475. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  476. walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
  477. walk->pair->addr[walk->pair_idx] =
  478. cpu_to_be64(sg_dma_address(sg) + skip_len);
  479. walk->pair_idx = !walk->pair_idx;
  480. walk->nents++;
  481. if (!walk->pair_idx)
  482. walk->pair++;
  483. len -= sgmin;
  484. skip_len += sgmin;
  485. walk->last_sg = sg;
  486. walk->last_sg_len = skip_len;
  487. if (sg_dma_len(sg) == skip_len) {
  488. sg = sg_next(sg);
  489. skip_len = 0;
  490. }
  491. }
  492. }
  493. static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  494. {
  495. struct crypto_alg *alg = tfm->__crt_alg;
  496. struct chcr_alg_template *chcr_crypto_alg =
  497. container_of(alg, struct chcr_alg_template, alg.crypto);
  498. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  499. }
  500. static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  501. {
  502. struct adapter *adap = netdev2adap(dev);
  503. struct sge_uld_txq_info *txq_info =
  504. adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
  505. struct sge_uld_txq *txq;
  506. int ret = 0;
  507. local_bh_disable();
  508. txq = &txq_info->uldtxq[idx];
  509. spin_lock(&txq->sendq.lock);
  510. if (txq->full)
  511. ret = -1;
  512. spin_unlock(&txq->sendq.lock);
  513. local_bh_enable();
  514. return ret;
  515. }
  516. static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
  517. struct _key_ctx *key_ctx)
  518. {
  519. if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
  520. memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
  521. } else {
  522. memcpy(key_ctx->key,
  523. ablkctx->key + (ablkctx->enckey_len >> 1),
  524. ablkctx->enckey_len >> 1);
  525. memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
  526. ablkctx->rrkey, ablkctx->enckey_len >> 1);
  527. }
  528. return 0;
  529. }
  530. static int chcr_hash_ent_in_wr(struct scatterlist *src,
  531. unsigned int minsg,
  532. unsigned int space,
  533. unsigned int srcskip)
  534. {
  535. int srclen = 0;
  536. int srcsg = minsg;
  537. int soffset = 0, sless;
  538. if (sg_dma_len(src) == srcskip) {
  539. src = sg_next(src);
  540. srcskip = 0;
  541. }
  542. while (src && space > (sgl_ent_len[srcsg + 1])) {
  543. sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
  544. CHCR_SRC_SG_SIZE);
  545. srclen += sless;
  546. soffset += sless;
  547. srcsg++;
  548. if (sg_dma_len(src) == (soffset + srcskip)) {
  549. src = sg_next(src);
  550. soffset = 0;
  551. srcskip = 0;
  552. }
  553. }
  554. return srclen;
  555. }
  556. static int chcr_sg_ent_in_wr(struct scatterlist *src,
  557. struct scatterlist *dst,
  558. unsigned int minsg,
  559. unsigned int space,
  560. unsigned int srcskip,
  561. unsigned int dstskip)
  562. {
  563. int srclen = 0, dstlen = 0;
  564. int srcsg = minsg, dstsg = minsg;
  565. int offset = 0, soffset = 0, less, sless = 0;
  566. if (sg_dma_len(src) == srcskip) {
  567. src = sg_next(src);
  568. srcskip = 0;
  569. }
  570. if (sg_dma_len(dst) == dstskip) {
  571. dst = sg_next(dst);
  572. dstskip = 0;
  573. }
  574. while (src && dst &&
  575. space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
  576. sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
  577. CHCR_SRC_SG_SIZE);
  578. srclen += sless;
  579. srcsg++;
  580. offset = 0;
  581. while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
  582. space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
  583. if (srclen <= dstlen)
  584. break;
  585. less = min_t(unsigned int, sg_dma_len(dst) - offset -
  586. dstskip, CHCR_DST_SG_SIZE);
  587. dstlen += less;
  588. offset += less;
  589. if ((offset + dstskip) == sg_dma_len(dst)) {
  590. dst = sg_next(dst);
  591. offset = 0;
  592. }
  593. dstsg++;
  594. dstskip = 0;
  595. }
  596. soffset += sless;
  597. if ((soffset + srcskip) == sg_dma_len(src)) {
  598. src = sg_next(src);
  599. srcskip = 0;
  600. soffset = 0;
  601. }
  602. }
  603. return min(srclen, dstlen);
  604. }
  605. static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
  606. u32 flags,
  607. struct scatterlist *src,
  608. struct scatterlist *dst,
  609. unsigned int nbytes,
  610. u8 *iv,
  611. unsigned short op_type)
  612. {
  613. int err;
  614. SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
  615. skcipher_request_set_tfm(subreq, cipher);
  616. skcipher_request_set_callback(subreq, flags, NULL, NULL);
  617. skcipher_request_set_crypt(subreq, src, dst,
  618. nbytes, iv);
  619. err = op_type ? crypto_skcipher_decrypt(subreq) :
  620. crypto_skcipher_encrypt(subreq);
  621. skcipher_request_zero(subreq);
  622. return err;
  623. }
  624. static inline void create_wreq(struct chcr_context *ctx,
  625. struct chcr_wr *chcr_req,
  626. struct crypto_async_request *req,
  627. unsigned int imm,
  628. int hash_sz,
  629. unsigned int len16,
  630. unsigned int sc_len,
  631. unsigned int lcb)
  632. {
  633. struct uld_ctx *u_ctx = ULD_CTX(ctx);
  634. int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
  635. chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
  636. chcr_req->wreq.pld_size_hash_size =
  637. htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
  638. chcr_req->wreq.len16_pkd =
  639. htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
  640. chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
  641. chcr_req->wreq.rx_chid_to_rx_q_id =
  642. FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
  643. !!lcb, ctx->tx_qidx);
  644. chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
  645. qid);
  646. chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
  647. ((sizeof(chcr_req->wreq)) >> 4)));
  648. chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
  649. chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  650. sizeof(chcr_req->key_ctx) + sc_len);
  651. }
  652. /**
  653. * create_cipher_wr - form the WR for cipher operations
  654. * @req: cipher req.
  655. * @ctx: crypto driver context of the request.
  656. * @qid: ingress qid where response of this WR should be received.
  657. * @op_type: encryption or decryption
  658. */
  659. static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
  660. {
  661. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  662. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  663. struct sk_buff *skb = NULL;
  664. struct chcr_wr *chcr_req;
  665. struct cpl_rx_phys_dsgl *phys_cpl;
  666. struct ulptx_sgl *ulptx;
  667. struct chcr_blkcipher_req_ctx *reqctx =
  668. ablkcipher_request_ctx(wrparam->req);
  669. unsigned int temp = 0, transhdr_len, dst_size;
  670. int error;
  671. int nents;
  672. unsigned int kctx_len;
  673. gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  674. GFP_KERNEL : GFP_ATOMIC;
  675. struct adapter *adap = padap(c_ctx(tfm)->dev);
  676. nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
  677. reqctx->dst_ofst);
  678. dst_size = get_space_for_phys_dsgl(nents);
  679. kctx_len = roundup(ablkctx->enckey_len, 16);
  680. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  681. nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
  682. CHCR_SRC_SG_SIZE, reqctx->src_ofst);
  683. temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
  684. (sgl_len(nents) * 8);
  685. transhdr_len += temp;
  686. transhdr_len = roundup(transhdr_len, 16);
  687. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  688. if (!skb) {
  689. error = -ENOMEM;
  690. goto err;
  691. }
  692. chcr_req = __skb_put_zero(skb, transhdr_len);
  693. chcr_req->sec_cpl.op_ivinsrtofst =
  694. FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
  695. chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
  696. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  697. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
  698. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  699. FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
  700. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
  701. ablkctx->ciph_mode,
  702. 0, 0, IV >> 1);
  703. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
  704. 0, 1, dst_size);
  705. chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
  706. if ((reqctx->op == CHCR_DECRYPT_OP) &&
  707. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  708. CRYPTO_ALG_SUB_TYPE_CTR)) &&
  709. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  710. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
  711. generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
  712. } else {
  713. if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
  714. (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
  715. memcpy(chcr_req->key_ctx.key, ablkctx->key,
  716. ablkctx->enckey_len);
  717. } else {
  718. memcpy(chcr_req->key_ctx.key, ablkctx->key +
  719. (ablkctx->enckey_len >> 1),
  720. ablkctx->enckey_len >> 1);
  721. memcpy(chcr_req->key_ctx.key +
  722. (ablkctx->enckey_len >> 1),
  723. ablkctx->key,
  724. ablkctx->enckey_len >> 1);
  725. }
  726. }
  727. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  728. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  729. chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
  730. chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
  731. atomic_inc(&adap->chcr_stats.cipher_rqst);
  732. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
  733. + (reqctx->imm ? (wrparam->bytes) : 0);
  734. create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
  735. transhdr_len, temp,
  736. ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
  737. reqctx->skb = skb;
  738. if (reqctx->op && (ablkctx->ciph_mode ==
  739. CHCR_SCMD_CIPHER_MODE_AES_CBC))
  740. sg_pcopy_to_buffer(wrparam->req->src,
  741. sg_nents(wrparam->req->src), wrparam->req->info, 16,
  742. reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
  743. return skb;
  744. err:
  745. return ERR_PTR(error);
  746. }
  747. static inline int chcr_keyctx_ck_size(unsigned int keylen)
  748. {
  749. int ck_size = 0;
  750. if (keylen == AES_KEYSIZE_128)
  751. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  752. else if (keylen == AES_KEYSIZE_192)
  753. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  754. else if (keylen == AES_KEYSIZE_256)
  755. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  756. else
  757. ck_size = 0;
  758. return ck_size;
  759. }
  760. static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
  761. const u8 *key,
  762. unsigned int keylen)
  763. {
  764. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  765. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  766. int err = 0;
  767. crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  768. crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
  769. CRYPTO_TFM_REQ_MASK);
  770. err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
  771. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  772. tfm->crt_flags |=
  773. crypto_skcipher_get_flags(ablkctx->sw_cipher) &
  774. CRYPTO_TFM_RES_MASK;
  775. return err;
  776. }
  777. static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
  778. const u8 *key,
  779. unsigned int keylen)
  780. {
  781. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  782. unsigned int ck_size, context_size;
  783. u16 alignment = 0;
  784. int err;
  785. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  786. if (err)
  787. goto badkey_err;
  788. ck_size = chcr_keyctx_ck_size(keylen);
  789. alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
  790. memcpy(ablkctx->key, key, keylen);
  791. ablkctx->enckey_len = keylen;
  792. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
  793. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  794. keylen + alignment) >> 4;
  795. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  796. 0, 0, context_size);
  797. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  798. return 0;
  799. badkey_err:
  800. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  801. ablkctx->enckey_len = 0;
  802. return err;
  803. }
  804. static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
  805. const u8 *key,
  806. unsigned int keylen)
  807. {
  808. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  809. unsigned int ck_size, context_size;
  810. u16 alignment = 0;
  811. int err;
  812. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  813. if (err)
  814. goto badkey_err;
  815. ck_size = chcr_keyctx_ck_size(keylen);
  816. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  817. memcpy(ablkctx->key, key, keylen);
  818. ablkctx->enckey_len = keylen;
  819. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  820. keylen + alignment) >> 4;
  821. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  822. 0, 0, context_size);
  823. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  824. return 0;
  825. badkey_err:
  826. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  827. ablkctx->enckey_len = 0;
  828. return err;
  829. }
  830. static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
  831. const u8 *key,
  832. unsigned int keylen)
  833. {
  834. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  835. unsigned int ck_size, context_size;
  836. u16 alignment = 0;
  837. int err;
  838. if (keylen < CTR_RFC3686_NONCE_SIZE)
  839. return -EINVAL;
  840. memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  841. CTR_RFC3686_NONCE_SIZE);
  842. keylen -= CTR_RFC3686_NONCE_SIZE;
  843. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  844. if (err)
  845. goto badkey_err;
  846. ck_size = chcr_keyctx_ck_size(keylen);
  847. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  848. memcpy(ablkctx->key, key, keylen);
  849. ablkctx->enckey_len = keylen;
  850. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  851. keylen + alignment) >> 4;
  852. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  853. 0, 0, context_size);
  854. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  855. return 0;
  856. badkey_err:
  857. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  858. ablkctx->enckey_len = 0;
  859. return err;
  860. }
  861. static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
  862. {
  863. unsigned int size = AES_BLOCK_SIZE;
  864. __be32 *b = (__be32 *)(dstiv + size);
  865. u32 c, prev;
  866. memcpy(dstiv, srciv, AES_BLOCK_SIZE);
  867. for (; size >= 4; size -= 4) {
  868. prev = be32_to_cpu(*--b);
  869. c = prev + add;
  870. *b = cpu_to_be32(c);
  871. if (prev < c)
  872. break;
  873. add = 1;
  874. }
  875. }
  876. static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
  877. {
  878. __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
  879. u64 c;
  880. u32 temp = be32_to_cpu(*--b);
  881. temp = ~temp;
  882. c = (u64)temp + 1; // No of block can processed withou overflow
  883. if ((bytes / AES_BLOCK_SIZE) > c)
  884. bytes = c * AES_BLOCK_SIZE;
  885. return bytes;
  886. }
  887. static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
  888. u32 isfinal)
  889. {
  890. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  891. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  892. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  893. struct crypto_cipher *cipher;
  894. int ret, i;
  895. u8 *key;
  896. unsigned int keylen;
  897. int round = reqctx->last_req_len / AES_BLOCK_SIZE;
  898. int round8 = round / 8;
  899. cipher = ablkctx->aes_generic;
  900. memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
  901. keylen = ablkctx->enckey_len / 2;
  902. key = ablkctx->key + keylen;
  903. ret = crypto_cipher_setkey(cipher, key, keylen);
  904. if (ret)
  905. goto out;
  906. crypto_cipher_encrypt_one(cipher, iv, iv);
  907. for (i = 0; i < round8; i++)
  908. gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
  909. for (i = 0; i < (round % 8); i++)
  910. gf128mul_x_ble((le128 *)iv, (le128 *)iv);
  911. if (!isfinal)
  912. crypto_cipher_decrypt_one(cipher, iv, iv);
  913. out:
  914. return ret;
  915. }
  916. static int chcr_update_cipher_iv(struct ablkcipher_request *req,
  917. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  918. {
  919. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  920. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  921. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  922. int ret = 0;
  923. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  924. ctr_add_iv(iv, req->info, (reqctx->processed /
  925. AES_BLOCK_SIZE));
  926. else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
  927. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  928. CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
  929. AES_BLOCK_SIZE) + 1);
  930. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  931. ret = chcr_update_tweak(req, iv, 0);
  932. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  933. if (reqctx->op)
  934. /*Updated before sending last WR*/
  935. memcpy(iv, req->info, AES_BLOCK_SIZE);
  936. else
  937. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  938. }
  939. return ret;
  940. }
  941. /* We need separate function for final iv because in rfc3686 Initial counter
  942. * starts from 1 and buffer size of iv is 8 byte only which remains constant
  943. * for subsequent update requests
  944. */
  945. static int chcr_final_cipher_iv(struct ablkcipher_request *req,
  946. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  947. {
  948. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  949. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  950. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  951. int ret = 0;
  952. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  953. ctr_add_iv(iv, req->info, (reqctx->processed /
  954. AES_BLOCK_SIZE));
  955. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  956. ret = chcr_update_tweak(req, iv, 1);
  957. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  958. /*Already updated for Decrypt*/
  959. if (!reqctx->op)
  960. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  961. }
  962. return ret;
  963. }
  964. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  965. unsigned char *input, int err)
  966. {
  967. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  968. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  969. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  970. struct sk_buff *skb;
  971. struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
  972. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  973. struct cipher_wr_param wrparam;
  974. int bytes;
  975. if (err)
  976. goto unmap;
  977. if (req->nbytes == reqctx->processed) {
  978. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  979. req);
  980. err = chcr_final_cipher_iv(req, fw6_pld, req->info);
  981. goto complete;
  982. }
  983. if (!reqctx->imm) {
  984. bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
  985. CIP_SPACE_LEFT(ablkctx->enckey_len),
  986. reqctx->src_ofst, reqctx->dst_ofst);
  987. if ((bytes + reqctx->processed) >= req->nbytes)
  988. bytes = req->nbytes - reqctx->processed;
  989. else
  990. bytes = rounddown(bytes, 16);
  991. } else {
  992. /*CTR mode counter overfloa*/
  993. bytes = req->nbytes - reqctx->processed;
  994. }
  995. err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
  996. if (err)
  997. goto unmap;
  998. if (unlikely(bytes == 0)) {
  999. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1000. req);
  1001. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1002. req->base.flags,
  1003. req->src,
  1004. req->dst,
  1005. req->nbytes,
  1006. req->info,
  1007. reqctx->op);
  1008. goto complete;
  1009. }
  1010. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1011. CRYPTO_ALG_SUB_TYPE_CTR)
  1012. bytes = adjust_ctr_overflow(reqctx->iv, bytes);
  1013. wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
  1014. wrparam.req = req;
  1015. wrparam.bytes = bytes;
  1016. skb = create_cipher_wr(&wrparam);
  1017. if (IS_ERR(skb)) {
  1018. pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
  1019. err = PTR_ERR(skb);
  1020. goto unmap;
  1021. }
  1022. skb->dev = u_ctx->lldi.ports[0];
  1023. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1024. chcr_send_wr(skb);
  1025. reqctx->last_req_len = bytes;
  1026. reqctx->processed += bytes;
  1027. return 0;
  1028. unmap:
  1029. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1030. complete:
  1031. req->base.complete(&req->base, err);
  1032. return err;
  1033. }
  1034. static int process_cipher(struct ablkcipher_request *req,
  1035. unsigned short qid,
  1036. struct sk_buff **skb,
  1037. unsigned short op_type)
  1038. {
  1039. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1040. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  1041. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  1042. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  1043. struct cipher_wr_param wrparam;
  1044. int bytes, err = -EINVAL;
  1045. reqctx->processed = 0;
  1046. if (!req->info)
  1047. goto error;
  1048. if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
  1049. (req->nbytes == 0) ||
  1050. (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
  1051. pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
  1052. ablkctx->enckey_len, req->nbytes, ivsize);
  1053. goto error;
  1054. }
  1055. chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1056. if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
  1057. AES_MIN_KEY_SIZE +
  1058. sizeof(struct cpl_rx_phys_dsgl) +
  1059. /*Min dsgl size*/
  1060. 32))) {
  1061. /* Can be sent as Imm*/
  1062. unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
  1063. dnents = sg_nents_xlen(req->dst, req->nbytes,
  1064. CHCR_DST_SG_SIZE, 0);
  1065. phys_dsgl = get_space_for_phys_dsgl(dnents);
  1066. kctx_len = roundup(ablkctx->enckey_len, 16);
  1067. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
  1068. reqctx->imm = (transhdr_len + IV + req->nbytes) <=
  1069. SGE_MAX_WR_LEN;
  1070. bytes = IV + req->nbytes;
  1071. } else {
  1072. reqctx->imm = 0;
  1073. }
  1074. if (!reqctx->imm) {
  1075. bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
  1076. CIP_SPACE_LEFT(ablkctx->enckey_len),
  1077. 0, 0);
  1078. if ((bytes + reqctx->processed) >= req->nbytes)
  1079. bytes = req->nbytes - reqctx->processed;
  1080. else
  1081. bytes = rounddown(bytes, 16);
  1082. } else {
  1083. bytes = req->nbytes;
  1084. }
  1085. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1086. CRYPTO_ALG_SUB_TYPE_CTR) {
  1087. bytes = adjust_ctr_overflow(req->info, bytes);
  1088. }
  1089. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1090. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
  1091. memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
  1092. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
  1093. CTR_RFC3686_IV_SIZE);
  1094. /* initialize counter portion of counter block */
  1095. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  1096. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1097. } else {
  1098. memcpy(reqctx->iv, req->info, IV);
  1099. }
  1100. if (unlikely(bytes == 0)) {
  1101. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1102. req);
  1103. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1104. req->base.flags,
  1105. req->src,
  1106. req->dst,
  1107. req->nbytes,
  1108. reqctx->iv,
  1109. op_type);
  1110. goto error;
  1111. }
  1112. reqctx->op = op_type;
  1113. reqctx->srcsg = req->src;
  1114. reqctx->dstsg = req->dst;
  1115. reqctx->src_ofst = 0;
  1116. reqctx->dst_ofst = 0;
  1117. wrparam.qid = qid;
  1118. wrparam.req = req;
  1119. wrparam.bytes = bytes;
  1120. *skb = create_cipher_wr(&wrparam);
  1121. if (IS_ERR(*skb)) {
  1122. err = PTR_ERR(*skb);
  1123. goto unmap;
  1124. }
  1125. reqctx->processed = bytes;
  1126. reqctx->last_req_len = bytes;
  1127. return 0;
  1128. unmap:
  1129. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1130. error:
  1131. return err;
  1132. }
  1133. static int chcr_aes_encrypt(struct ablkcipher_request *req)
  1134. {
  1135. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1136. struct sk_buff *skb = NULL;
  1137. int err, isfull = 0;
  1138. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1139. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1140. c_ctx(tfm)->tx_qidx))) {
  1141. isfull = 1;
  1142. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1143. return -ENOSPC;
  1144. }
  1145. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1146. &skb, CHCR_ENCRYPT_OP);
  1147. if (err || !skb)
  1148. return err;
  1149. skb->dev = u_ctx->lldi.ports[0];
  1150. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1151. chcr_send_wr(skb);
  1152. return isfull ? -EBUSY : -EINPROGRESS;
  1153. }
  1154. static int chcr_aes_decrypt(struct ablkcipher_request *req)
  1155. {
  1156. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1157. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1158. struct sk_buff *skb = NULL;
  1159. int err, isfull = 0;
  1160. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1161. c_ctx(tfm)->tx_qidx))) {
  1162. isfull = 1;
  1163. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1164. return -ENOSPC;
  1165. }
  1166. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1167. &skb, CHCR_DECRYPT_OP);
  1168. if (err || !skb)
  1169. return err;
  1170. skb->dev = u_ctx->lldi.ports[0];
  1171. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1172. chcr_send_wr(skb);
  1173. return isfull ? -EBUSY : -EINPROGRESS;
  1174. }
  1175. static int chcr_device_init(struct chcr_context *ctx)
  1176. {
  1177. struct uld_ctx *u_ctx = NULL;
  1178. struct adapter *adap;
  1179. unsigned int id;
  1180. int txq_perchan, txq_idx, ntxq;
  1181. int err = 0, rxq_perchan, rxq_idx;
  1182. id = smp_processor_id();
  1183. if (!ctx->dev) {
  1184. u_ctx = assign_chcr_device();
  1185. if (!u_ctx) {
  1186. pr_err("chcr device assignment fails\n");
  1187. goto out;
  1188. }
  1189. ctx->dev = u_ctx->dev;
  1190. adap = padap(ctx->dev);
  1191. ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
  1192. adap->vres.ncrypto_fc);
  1193. rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
  1194. txq_perchan = ntxq / u_ctx->lldi.nchan;
  1195. rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
  1196. rxq_idx += id % rxq_perchan;
  1197. txq_idx = ctx->dev->tx_channel_id * txq_perchan;
  1198. txq_idx += id % txq_perchan;
  1199. spin_lock(&ctx->dev->lock_chcr_dev);
  1200. ctx->rx_qidx = rxq_idx;
  1201. ctx->tx_qidx = txq_idx;
  1202. ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
  1203. ctx->dev->rx_channel_id = 0;
  1204. spin_unlock(&ctx->dev->lock_chcr_dev);
  1205. }
  1206. out:
  1207. return err;
  1208. }
  1209. static int chcr_cra_init(struct crypto_tfm *tfm)
  1210. {
  1211. struct crypto_alg *alg = tfm->__crt_alg;
  1212. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1213. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1214. ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
  1215. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1216. if (IS_ERR(ablkctx->sw_cipher)) {
  1217. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1218. return PTR_ERR(ablkctx->sw_cipher);
  1219. }
  1220. if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
  1221. /* To update tweak*/
  1222. ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
  1223. if (IS_ERR(ablkctx->aes_generic)) {
  1224. pr_err("failed to allocate aes cipher for tweak\n");
  1225. return PTR_ERR(ablkctx->aes_generic);
  1226. }
  1227. } else
  1228. ablkctx->aes_generic = NULL;
  1229. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1230. return chcr_device_init(crypto_tfm_ctx(tfm));
  1231. }
  1232. static int chcr_rfc3686_init(struct crypto_tfm *tfm)
  1233. {
  1234. struct crypto_alg *alg = tfm->__crt_alg;
  1235. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1236. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1237. /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
  1238. * cannot be used as fallback in chcr_handle_cipher_response
  1239. */
  1240. ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
  1241. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1242. if (IS_ERR(ablkctx->sw_cipher)) {
  1243. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1244. return PTR_ERR(ablkctx->sw_cipher);
  1245. }
  1246. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1247. return chcr_device_init(crypto_tfm_ctx(tfm));
  1248. }
  1249. static void chcr_cra_exit(struct crypto_tfm *tfm)
  1250. {
  1251. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1252. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1253. crypto_free_skcipher(ablkctx->sw_cipher);
  1254. if (ablkctx->aes_generic)
  1255. crypto_free_cipher(ablkctx->aes_generic);
  1256. }
  1257. static int get_alg_config(struct algo_param *params,
  1258. unsigned int auth_size)
  1259. {
  1260. switch (auth_size) {
  1261. case SHA1_DIGEST_SIZE:
  1262. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
  1263. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
  1264. params->result_size = SHA1_DIGEST_SIZE;
  1265. break;
  1266. case SHA224_DIGEST_SIZE:
  1267. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1268. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
  1269. params->result_size = SHA256_DIGEST_SIZE;
  1270. break;
  1271. case SHA256_DIGEST_SIZE:
  1272. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1273. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
  1274. params->result_size = SHA256_DIGEST_SIZE;
  1275. break;
  1276. case SHA384_DIGEST_SIZE:
  1277. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1278. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
  1279. params->result_size = SHA512_DIGEST_SIZE;
  1280. break;
  1281. case SHA512_DIGEST_SIZE:
  1282. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1283. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
  1284. params->result_size = SHA512_DIGEST_SIZE;
  1285. break;
  1286. default:
  1287. pr_err("chcr : ERROR, unsupported digest size\n");
  1288. return -EINVAL;
  1289. }
  1290. return 0;
  1291. }
  1292. static inline void chcr_free_shash(struct crypto_shash *base_hash)
  1293. {
  1294. crypto_free_shash(base_hash);
  1295. }
  1296. /**
  1297. * create_hash_wr - Create hash work request
  1298. * @req - Cipher req base
  1299. */
  1300. static struct sk_buff *create_hash_wr(struct ahash_request *req,
  1301. struct hash_wr_param *param)
  1302. {
  1303. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1304. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1305. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1306. struct sk_buff *skb = NULL;
  1307. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1308. struct chcr_wr *chcr_req;
  1309. struct ulptx_sgl *ulptx;
  1310. unsigned int nents = 0, transhdr_len;
  1311. unsigned int temp = 0;
  1312. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1313. GFP_ATOMIC;
  1314. struct adapter *adap = padap(h_ctx(tfm)->dev);
  1315. int error = 0;
  1316. transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
  1317. req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
  1318. param->sg_len) <= SGE_MAX_WR_LEN;
  1319. nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
  1320. CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
  1321. nents += param->bfr_len ? 1 : 0;
  1322. transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
  1323. param->sg_len, 16) : (sgl_len(nents) * 8);
  1324. transhdr_len = roundup(transhdr_len, 16);
  1325. skb = alloc_skb(transhdr_len, flags);
  1326. if (!skb)
  1327. return ERR_PTR(-ENOMEM);
  1328. chcr_req = __skb_put_zero(skb, transhdr_len);
  1329. chcr_req->sec_cpl.op_ivinsrtofst =
  1330. FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
  1331. chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  1332. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  1333. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
  1334. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  1335. FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
  1336. chcr_req->sec_cpl.seqno_numivs =
  1337. FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
  1338. param->opad_needed, 0);
  1339. chcr_req->sec_cpl.ivgen_hdrlen =
  1340. FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  1341. memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
  1342. param->alg_prm.result_size);
  1343. if (param->opad_needed)
  1344. memcpy(chcr_req->key_ctx.key +
  1345. ((param->alg_prm.result_size <= 32) ? 32 :
  1346. CHCR_HASH_MAX_DIGEST_SIZE),
  1347. hmacctx->opad, param->alg_prm.result_size);
  1348. chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
  1349. param->alg_prm.mk_size, 0,
  1350. param->opad_needed,
  1351. ((param->kctx_len +
  1352. sizeof(chcr_req->key_ctx)) >> 4));
  1353. chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  1354. ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
  1355. DUMMY_BYTES);
  1356. if (param->bfr_len != 0) {
  1357. req_ctx->hctx_wr.dma_addr =
  1358. dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
  1359. param->bfr_len, DMA_TO_DEVICE);
  1360. if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
  1361. req_ctx->hctx_wr. dma_addr)) {
  1362. error = -ENOMEM;
  1363. goto err;
  1364. }
  1365. req_ctx->hctx_wr.dma_len = param->bfr_len;
  1366. } else {
  1367. req_ctx->hctx_wr.dma_addr = 0;
  1368. }
  1369. chcr_add_hash_src_ent(req, ulptx, param);
  1370. /* Request upto max wr size */
  1371. temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
  1372. (param->sg_len + param->bfr_len) : 0);
  1373. atomic_inc(&adap->chcr_stats.digest_rqst);
  1374. create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
  1375. param->hash_size, transhdr_len,
  1376. temp, 0);
  1377. req_ctx->hctx_wr.skb = skb;
  1378. return skb;
  1379. err:
  1380. kfree_skb(skb);
  1381. return ERR_PTR(error);
  1382. }
  1383. static int chcr_ahash_update(struct ahash_request *req)
  1384. {
  1385. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1386. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1387. struct uld_ctx *u_ctx = NULL;
  1388. struct sk_buff *skb;
  1389. u8 remainder = 0, bs;
  1390. unsigned int nbytes = req->nbytes;
  1391. struct hash_wr_param params;
  1392. int error, isfull = 0;
  1393. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1394. u_ctx = ULD_CTX(h_ctx(rtfm));
  1395. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1396. h_ctx(rtfm)->tx_qidx))) {
  1397. isfull = 1;
  1398. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1399. return -ENOSPC;
  1400. }
  1401. if (nbytes + req_ctx->reqlen >= bs) {
  1402. remainder = (nbytes + req_ctx->reqlen) % bs;
  1403. nbytes = nbytes + req_ctx->reqlen - remainder;
  1404. } else {
  1405. sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
  1406. + req_ctx->reqlen, nbytes, 0);
  1407. req_ctx->reqlen += nbytes;
  1408. return 0;
  1409. }
  1410. chcr_init_hctx_per_wr(req_ctx);
  1411. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1412. if (error)
  1413. return -ENOMEM;
  1414. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1415. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1416. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1417. HASH_SPACE_LEFT(params.kctx_len), 0);
  1418. if (params.sg_len > req->nbytes)
  1419. params.sg_len = req->nbytes;
  1420. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
  1421. req_ctx->reqlen;
  1422. params.opad_needed = 0;
  1423. params.more = 1;
  1424. params.last = 0;
  1425. params.bfr_len = req_ctx->reqlen;
  1426. params.scmd1 = 0;
  1427. req_ctx->hctx_wr.srcsg = req->src;
  1428. params.hash_size = params.alg_prm.result_size;
  1429. req_ctx->data_len += params.sg_len + params.bfr_len;
  1430. skb = create_hash_wr(req, &params);
  1431. if (IS_ERR(skb)) {
  1432. error = PTR_ERR(skb);
  1433. goto unmap;
  1434. }
  1435. req_ctx->hctx_wr.processed += params.sg_len;
  1436. if (remainder) {
  1437. /* Swap buffers */
  1438. swap(req_ctx->reqbfr, req_ctx->skbfr);
  1439. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  1440. req_ctx->reqbfr, remainder, req->nbytes -
  1441. remainder);
  1442. }
  1443. req_ctx->reqlen = remainder;
  1444. skb->dev = u_ctx->lldi.ports[0];
  1445. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1446. chcr_send_wr(skb);
  1447. return isfull ? -EBUSY : -EINPROGRESS;
  1448. unmap:
  1449. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1450. return error;
  1451. }
  1452. static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
  1453. {
  1454. memset(bfr_ptr, 0, bs);
  1455. *bfr_ptr = 0x80;
  1456. if (bs == 64)
  1457. *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
  1458. else
  1459. *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
  1460. }
  1461. static int chcr_ahash_final(struct ahash_request *req)
  1462. {
  1463. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1464. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1465. struct hash_wr_param params;
  1466. struct sk_buff *skb;
  1467. struct uld_ctx *u_ctx = NULL;
  1468. u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1469. chcr_init_hctx_per_wr(req_ctx);
  1470. u_ctx = ULD_CTX(h_ctx(rtfm));
  1471. if (is_hmac(crypto_ahash_tfm(rtfm)))
  1472. params.opad_needed = 1;
  1473. else
  1474. params.opad_needed = 0;
  1475. params.sg_len = 0;
  1476. req_ctx->hctx_wr.isfinal = 1;
  1477. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1478. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1479. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1480. params.opad_needed = 1;
  1481. params.kctx_len *= 2;
  1482. } else {
  1483. params.opad_needed = 0;
  1484. }
  1485. req_ctx->hctx_wr.result = 1;
  1486. params.bfr_len = req_ctx->reqlen;
  1487. req_ctx->data_len += params.bfr_len + params.sg_len;
  1488. req_ctx->hctx_wr.srcsg = req->src;
  1489. if (req_ctx->reqlen == 0) {
  1490. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1491. params.last = 0;
  1492. params.more = 1;
  1493. params.scmd1 = 0;
  1494. params.bfr_len = bs;
  1495. } else {
  1496. params.scmd1 = req_ctx->data_len;
  1497. params.last = 1;
  1498. params.more = 0;
  1499. }
  1500. params.hash_size = crypto_ahash_digestsize(rtfm);
  1501. skb = create_hash_wr(req, &params);
  1502. if (IS_ERR(skb))
  1503. return PTR_ERR(skb);
  1504. req_ctx->reqlen = 0;
  1505. skb->dev = u_ctx->lldi.ports[0];
  1506. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1507. chcr_send_wr(skb);
  1508. return -EINPROGRESS;
  1509. }
  1510. static int chcr_ahash_finup(struct ahash_request *req)
  1511. {
  1512. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1513. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1514. struct uld_ctx *u_ctx = NULL;
  1515. struct sk_buff *skb;
  1516. struct hash_wr_param params;
  1517. u8 bs;
  1518. int error, isfull = 0;
  1519. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1520. u_ctx = ULD_CTX(h_ctx(rtfm));
  1521. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1522. h_ctx(rtfm)->tx_qidx))) {
  1523. isfull = 1;
  1524. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1525. return -ENOSPC;
  1526. }
  1527. chcr_init_hctx_per_wr(req_ctx);
  1528. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1529. if (error)
  1530. return -ENOMEM;
  1531. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1532. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1533. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1534. params.kctx_len *= 2;
  1535. params.opad_needed = 1;
  1536. } else {
  1537. params.opad_needed = 0;
  1538. }
  1539. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1540. HASH_SPACE_LEFT(params.kctx_len), 0);
  1541. if (params.sg_len < req->nbytes) {
  1542. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1543. params.kctx_len /= 2;
  1544. params.opad_needed = 0;
  1545. }
  1546. params.last = 0;
  1547. params.more = 1;
  1548. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
  1549. - req_ctx->reqlen;
  1550. params.hash_size = params.alg_prm.result_size;
  1551. params.scmd1 = 0;
  1552. } else {
  1553. params.last = 1;
  1554. params.more = 0;
  1555. params.sg_len = req->nbytes;
  1556. params.hash_size = crypto_ahash_digestsize(rtfm);
  1557. params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
  1558. params.sg_len;
  1559. }
  1560. params.bfr_len = req_ctx->reqlen;
  1561. req_ctx->data_len += params.bfr_len + params.sg_len;
  1562. req_ctx->hctx_wr.result = 1;
  1563. req_ctx->hctx_wr.srcsg = req->src;
  1564. if ((req_ctx->reqlen + req->nbytes) == 0) {
  1565. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1566. params.last = 0;
  1567. params.more = 1;
  1568. params.scmd1 = 0;
  1569. params.bfr_len = bs;
  1570. }
  1571. skb = create_hash_wr(req, &params);
  1572. if (IS_ERR(skb)) {
  1573. error = PTR_ERR(skb);
  1574. goto unmap;
  1575. }
  1576. req_ctx->reqlen = 0;
  1577. req_ctx->hctx_wr.processed += params.sg_len;
  1578. skb->dev = u_ctx->lldi.ports[0];
  1579. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1580. chcr_send_wr(skb);
  1581. return isfull ? -EBUSY : -EINPROGRESS;
  1582. unmap:
  1583. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1584. return error;
  1585. }
  1586. static int chcr_ahash_digest(struct ahash_request *req)
  1587. {
  1588. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1589. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1590. struct uld_ctx *u_ctx = NULL;
  1591. struct sk_buff *skb;
  1592. struct hash_wr_param params;
  1593. u8 bs;
  1594. int error, isfull = 0;
  1595. rtfm->init(req);
  1596. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1597. u_ctx = ULD_CTX(h_ctx(rtfm));
  1598. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1599. h_ctx(rtfm)->tx_qidx))) {
  1600. isfull = 1;
  1601. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1602. return -ENOSPC;
  1603. }
  1604. chcr_init_hctx_per_wr(req_ctx);
  1605. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1606. if (error)
  1607. return -ENOMEM;
  1608. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1609. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1610. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1611. params.kctx_len *= 2;
  1612. params.opad_needed = 1;
  1613. } else {
  1614. params.opad_needed = 0;
  1615. }
  1616. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1617. HASH_SPACE_LEFT(params.kctx_len), 0);
  1618. if (params.sg_len < req->nbytes) {
  1619. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1620. params.kctx_len /= 2;
  1621. params.opad_needed = 0;
  1622. }
  1623. params.last = 0;
  1624. params.more = 1;
  1625. params.scmd1 = 0;
  1626. params.sg_len = rounddown(params.sg_len, bs);
  1627. params.hash_size = params.alg_prm.result_size;
  1628. } else {
  1629. params.sg_len = req->nbytes;
  1630. params.hash_size = crypto_ahash_digestsize(rtfm);
  1631. params.last = 1;
  1632. params.more = 0;
  1633. params.scmd1 = req->nbytes + req_ctx->data_len;
  1634. }
  1635. params.bfr_len = 0;
  1636. req_ctx->hctx_wr.result = 1;
  1637. req_ctx->hctx_wr.srcsg = req->src;
  1638. req_ctx->data_len += params.bfr_len + params.sg_len;
  1639. if (req->nbytes == 0) {
  1640. create_last_hash_block(req_ctx->reqbfr, bs, 0);
  1641. params.more = 1;
  1642. params.bfr_len = bs;
  1643. }
  1644. skb = create_hash_wr(req, &params);
  1645. if (IS_ERR(skb)) {
  1646. error = PTR_ERR(skb);
  1647. goto unmap;
  1648. }
  1649. req_ctx->hctx_wr.processed += params.sg_len;
  1650. skb->dev = u_ctx->lldi.ports[0];
  1651. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1652. chcr_send_wr(skb);
  1653. return isfull ? -EBUSY : -EINPROGRESS;
  1654. unmap:
  1655. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1656. return error;
  1657. }
  1658. static int chcr_ahash_continue(struct ahash_request *req)
  1659. {
  1660. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1661. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1662. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1663. struct uld_ctx *u_ctx = NULL;
  1664. struct sk_buff *skb;
  1665. struct hash_wr_param params;
  1666. u8 bs;
  1667. int error;
  1668. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1669. u_ctx = ULD_CTX(h_ctx(rtfm));
  1670. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1671. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1672. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1673. params.kctx_len *= 2;
  1674. params.opad_needed = 1;
  1675. } else {
  1676. params.opad_needed = 0;
  1677. }
  1678. params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
  1679. HASH_SPACE_LEFT(params.kctx_len),
  1680. hctx_wr->src_ofst);
  1681. if ((params.sg_len + hctx_wr->processed) > req->nbytes)
  1682. params.sg_len = req->nbytes - hctx_wr->processed;
  1683. if (!hctx_wr->result ||
  1684. ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
  1685. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1686. params.kctx_len /= 2;
  1687. params.opad_needed = 0;
  1688. }
  1689. params.last = 0;
  1690. params.more = 1;
  1691. params.sg_len = rounddown(params.sg_len, bs);
  1692. params.hash_size = params.alg_prm.result_size;
  1693. params.scmd1 = 0;
  1694. } else {
  1695. params.last = 1;
  1696. params.more = 0;
  1697. params.hash_size = crypto_ahash_digestsize(rtfm);
  1698. params.scmd1 = reqctx->data_len + params.sg_len;
  1699. }
  1700. params.bfr_len = 0;
  1701. reqctx->data_len += params.sg_len;
  1702. skb = create_hash_wr(req, &params);
  1703. if (IS_ERR(skb)) {
  1704. error = PTR_ERR(skb);
  1705. goto err;
  1706. }
  1707. hctx_wr->processed += params.sg_len;
  1708. skb->dev = u_ctx->lldi.ports[0];
  1709. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1710. chcr_send_wr(skb);
  1711. return 0;
  1712. err:
  1713. return error;
  1714. }
  1715. static inline void chcr_handle_ahash_resp(struct ahash_request *req,
  1716. unsigned char *input,
  1717. int err)
  1718. {
  1719. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1720. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1721. int digestsize, updated_digestsize;
  1722. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1723. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1724. if (input == NULL)
  1725. goto out;
  1726. digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  1727. updated_digestsize = digestsize;
  1728. if (digestsize == SHA224_DIGEST_SIZE)
  1729. updated_digestsize = SHA256_DIGEST_SIZE;
  1730. else if (digestsize == SHA384_DIGEST_SIZE)
  1731. updated_digestsize = SHA512_DIGEST_SIZE;
  1732. if (hctx_wr->dma_addr) {
  1733. dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
  1734. hctx_wr->dma_len, DMA_TO_DEVICE);
  1735. hctx_wr->dma_addr = 0;
  1736. }
  1737. if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
  1738. req->nbytes)) {
  1739. if (hctx_wr->result == 1) {
  1740. hctx_wr->result = 0;
  1741. memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
  1742. digestsize);
  1743. } else {
  1744. memcpy(reqctx->partial_hash,
  1745. input + sizeof(struct cpl_fw6_pld),
  1746. updated_digestsize);
  1747. }
  1748. goto unmap;
  1749. }
  1750. memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
  1751. updated_digestsize);
  1752. err = chcr_ahash_continue(req);
  1753. if (err)
  1754. goto unmap;
  1755. return;
  1756. unmap:
  1757. if (hctx_wr->is_sg_map)
  1758. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1759. out:
  1760. req->base.complete(&req->base, err);
  1761. }
  1762. /*
  1763. * chcr_handle_resp - Unmap the DMA buffers associated with the request
  1764. * @req: crypto request
  1765. */
  1766. int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
  1767. int err)
  1768. {
  1769. struct crypto_tfm *tfm = req->tfm;
  1770. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1771. struct adapter *adap = padap(ctx->dev);
  1772. switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  1773. case CRYPTO_ALG_TYPE_AEAD:
  1774. chcr_handle_aead_resp(aead_request_cast(req), input, err);
  1775. break;
  1776. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1777. err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
  1778. input, err);
  1779. break;
  1780. case CRYPTO_ALG_TYPE_AHASH:
  1781. chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
  1782. }
  1783. atomic_inc(&adap->chcr_stats.complete);
  1784. return err;
  1785. }
  1786. static int chcr_ahash_export(struct ahash_request *areq, void *out)
  1787. {
  1788. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1789. struct chcr_ahash_req_ctx *state = out;
  1790. state->reqlen = req_ctx->reqlen;
  1791. state->data_len = req_ctx->data_len;
  1792. memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
  1793. memcpy(state->partial_hash, req_ctx->partial_hash,
  1794. CHCR_HASH_MAX_DIGEST_SIZE);
  1795. chcr_init_hctx_per_wr(state);
  1796. return 0;
  1797. }
  1798. static int chcr_ahash_import(struct ahash_request *areq, const void *in)
  1799. {
  1800. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1801. struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  1802. req_ctx->reqlen = state->reqlen;
  1803. req_ctx->data_len = state->data_len;
  1804. req_ctx->reqbfr = req_ctx->bfr1;
  1805. req_ctx->skbfr = req_ctx->bfr2;
  1806. memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
  1807. memcpy(req_ctx->partial_hash, state->partial_hash,
  1808. CHCR_HASH_MAX_DIGEST_SIZE);
  1809. chcr_init_hctx_per_wr(req_ctx);
  1810. return 0;
  1811. }
  1812. static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1813. unsigned int keylen)
  1814. {
  1815. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1816. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1817. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1818. unsigned int i, err = 0, updated_digestsize;
  1819. SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
  1820. /* use the key to calculate the ipad and opad. ipad will sent with the
  1821. * first request's data. opad will be sent with the final hash result
  1822. * ipad in hmacctx->ipad and opad in hmacctx->opad location
  1823. */
  1824. shash->tfm = hmacctx->base_hash;
  1825. shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
  1826. if (keylen > bs) {
  1827. err = crypto_shash_digest(shash, key, keylen,
  1828. hmacctx->ipad);
  1829. if (err)
  1830. goto out;
  1831. keylen = digestsize;
  1832. } else {
  1833. memcpy(hmacctx->ipad, key, keylen);
  1834. }
  1835. memset(hmacctx->ipad + keylen, 0, bs - keylen);
  1836. memcpy(hmacctx->opad, hmacctx->ipad, bs);
  1837. for (i = 0; i < bs / sizeof(int); i++) {
  1838. *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
  1839. *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
  1840. }
  1841. updated_digestsize = digestsize;
  1842. if (digestsize == SHA224_DIGEST_SIZE)
  1843. updated_digestsize = SHA256_DIGEST_SIZE;
  1844. else if (digestsize == SHA384_DIGEST_SIZE)
  1845. updated_digestsize = SHA512_DIGEST_SIZE;
  1846. err = chcr_compute_partial_hash(shash, hmacctx->ipad,
  1847. hmacctx->ipad, digestsize);
  1848. if (err)
  1849. goto out;
  1850. chcr_change_order(hmacctx->ipad, updated_digestsize);
  1851. err = chcr_compute_partial_hash(shash, hmacctx->opad,
  1852. hmacctx->opad, digestsize);
  1853. if (err)
  1854. goto out;
  1855. chcr_change_order(hmacctx->opad, updated_digestsize);
  1856. out:
  1857. return err;
  1858. }
  1859. static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1860. unsigned int key_len)
  1861. {
  1862. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  1863. unsigned short context_size = 0;
  1864. int err;
  1865. err = chcr_cipher_fallback_setkey(cipher, key, key_len);
  1866. if (err)
  1867. goto badkey_err;
  1868. memcpy(ablkctx->key, key, key_len);
  1869. ablkctx->enckey_len = key_len;
  1870. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
  1871. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
  1872. ablkctx->key_ctx_hdr =
  1873. FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
  1874. CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
  1875. CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
  1876. CHCR_KEYCTX_NO_KEY, 1,
  1877. 0, context_size);
  1878. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
  1879. return 0;
  1880. badkey_err:
  1881. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1882. ablkctx->enckey_len = 0;
  1883. return err;
  1884. }
  1885. static int chcr_sha_init(struct ahash_request *areq)
  1886. {
  1887. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1888. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1889. int digestsize = crypto_ahash_digestsize(tfm);
  1890. req_ctx->data_len = 0;
  1891. req_ctx->reqlen = 0;
  1892. req_ctx->reqbfr = req_ctx->bfr1;
  1893. req_ctx->skbfr = req_ctx->bfr2;
  1894. copy_hash_init_values(req_ctx->partial_hash, digestsize);
  1895. return 0;
  1896. }
  1897. static int chcr_sha_cra_init(struct crypto_tfm *tfm)
  1898. {
  1899. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1900. sizeof(struct chcr_ahash_req_ctx));
  1901. return chcr_device_init(crypto_tfm_ctx(tfm));
  1902. }
  1903. static int chcr_hmac_init(struct ahash_request *areq)
  1904. {
  1905. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1906. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
  1907. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
  1908. unsigned int digestsize = crypto_ahash_digestsize(rtfm);
  1909. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1910. chcr_sha_init(areq);
  1911. req_ctx->data_len = bs;
  1912. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1913. if (digestsize == SHA224_DIGEST_SIZE)
  1914. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1915. SHA256_DIGEST_SIZE);
  1916. else if (digestsize == SHA384_DIGEST_SIZE)
  1917. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1918. SHA512_DIGEST_SIZE);
  1919. else
  1920. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1921. digestsize);
  1922. }
  1923. return 0;
  1924. }
  1925. static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
  1926. {
  1927. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1928. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1929. unsigned int digestsize =
  1930. crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
  1931. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1932. sizeof(struct chcr_ahash_req_ctx));
  1933. hmacctx->base_hash = chcr_alloc_shash(digestsize);
  1934. if (IS_ERR(hmacctx->base_hash))
  1935. return PTR_ERR(hmacctx->base_hash);
  1936. return chcr_device_init(crypto_tfm_ctx(tfm));
  1937. }
  1938. static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  1939. {
  1940. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1941. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1942. if (hmacctx->base_hash) {
  1943. chcr_free_shash(hmacctx->base_hash);
  1944. hmacctx->base_hash = NULL;
  1945. }
  1946. }
  1947. inline void chcr_aead_common_exit(struct aead_request *req)
  1948. {
  1949. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1950. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1951. struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
  1952. chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
  1953. }
  1954. static int chcr_aead_common_init(struct aead_request *req)
  1955. {
  1956. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1957. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  1958. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1959. unsigned int authsize = crypto_aead_authsize(tfm);
  1960. int error = -EINVAL;
  1961. /* validate key size */
  1962. if (aeadctx->enckey_len == 0)
  1963. goto err;
  1964. if (reqctx->op && req->cryptlen < authsize)
  1965. goto err;
  1966. if (reqctx->b0_len)
  1967. reqctx->scratch_pad = reqctx->iv + IV;
  1968. else
  1969. reqctx->scratch_pad = NULL;
  1970. error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  1971. reqctx->op);
  1972. if (error) {
  1973. error = -ENOMEM;
  1974. goto err;
  1975. }
  1976. reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
  1977. CHCR_SRC_SG_SIZE, 0);
  1978. reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
  1979. CHCR_SRC_SG_SIZE, req->assoclen);
  1980. return 0;
  1981. err:
  1982. return error;
  1983. }
  1984. static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
  1985. int aadmax, int wrlen,
  1986. unsigned short op_type)
  1987. {
  1988. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  1989. if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
  1990. dst_nents > MAX_DSGL_ENT ||
  1991. (req->assoclen > aadmax) ||
  1992. (wrlen > SGE_MAX_WR_LEN))
  1993. return 1;
  1994. return 0;
  1995. }
  1996. static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
  1997. {
  1998. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1999. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2000. struct aead_request *subreq = aead_request_ctx(req);
  2001. aead_request_set_tfm(subreq, aeadctx->sw_cipher);
  2002. aead_request_set_callback(subreq, req->base.flags,
  2003. req->base.complete, req->base.data);
  2004. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  2005. req->iv);
  2006. aead_request_set_ad(subreq, req->assoclen);
  2007. return op_type ? crypto_aead_decrypt(subreq) :
  2008. crypto_aead_encrypt(subreq);
  2009. }
  2010. static struct sk_buff *create_authenc_wr(struct aead_request *req,
  2011. unsigned short qid,
  2012. int size)
  2013. {
  2014. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2015. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2016. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2017. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2018. struct sk_buff *skb = NULL;
  2019. struct chcr_wr *chcr_req;
  2020. struct cpl_rx_phys_dsgl *phys_cpl;
  2021. struct ulptx_sgl *ulptx;
  2022. unsigned int transhdr_len;
  2023. unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
  2024. unsigned int kctx_len = 0, dnents;
  2025. unsigned int assoclen = req->assoclen;
  2026. unsigned int authsize = crypto_aead_authsize(tfm);
  2027. int error = -EINVAL;
  2028. int null = 0;
  2029. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2030. GFP_ATOMIC;
  2031. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2032. if (req->cryptlen == 0)
  2033. return NULL;
  2034. reqctx->b0_len = 0;
  2035. error = chcr_aead_common_init(req);
  2036. if (error)
  2037. return ERR_PTR(error);
  2038. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
  2039. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2040. null = 1;
  2041. assoclen = 0;
  2042. reqctx->aad_nents = 0;
  2043. }
  2044. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2045. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2046. (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
  2047. req->assoclen);
  2048. dnents += MIN_AUTH_SG; // For IV
  2049. dst_size = get_space_for_phys_dsgl(dnents);
  2050. kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
  2051. - sizeof(chcr_req->key_ctx);
  2052. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2053. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
  2054. SGE_MAX_WR_LEN;
  2055. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
  2056. : (sgl_len(reqctx->src_nents + reqctx->aad_nents
  2057. + MIN_GCM_SG) * 8);
  2058. transhdr_len += temp;
  2059. transhdr_len = roundup(transhdr_len, 16);
  2060. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2061. transhdr_len, reqctx->op)) {
  2062. atomic_inc(&adap->chcr_stats.fallback);
  2063. chcr_aead_common_exit(req);
  2064. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2065. }
  2066. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2067. if (!skb) {
  2068. error = -ENOMEM;
  2069. goto err;
  2070. }
  2071. chcr_req = __skb_put_zero(skb, transhdr_len);
  2072. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2073. /*
  2074. * Input order is AAD,IV and Payload. where IV should be included as
  2075. * the part of authdata. All other fields should be filled according
  2076. * to the hardware spec
  2077. */
  2078. chcr_req->sec_cpl.op_ivinsrtofst =
  2079. FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
  2080. assoclen + 1);
  2081. chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
  2082. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2083. assoclen ? 1 : 0, assoclen,
  2084. assoclen + IV + 1,
  2085. (temp & 0x1F0) >> 4);
  2086. chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
  2087. temp & 0xF,
  2088. null ? 0 : assoclen + IV + 1,
  2089. temp, temp);
  2090. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
  2091. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
  2092. temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  2093. else
  2094. temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  2095. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
  2096. (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
  2097. temp,
  2098. actx->auth_mode, aeadctx->hmac_ctrl,
  2099. IV >> 1);
  2100. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2101. 0, 0, dst_size);
  2102. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2103. if (reqctx->op == CHCR_ENCRYPT_OP ||
  2104. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2105. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
  2106. memcpy(chcr_req->key_ctx.key, aeadctx->key,
  2107. aeadctx->enckey_len);
  2108. else
  2109. memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
  2110. aeadctx->enckey_len);
  2111. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2112. actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
  2113. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2114. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2115. memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
  2116. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
  2117. CTR_RFC3686_IV_SIZE);
  2118. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  2119. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  2120. } else {
  2121. memcpy(reqctx->iv, req->iv, IV);
  2122. }
  2123. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2124. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2125. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2126. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2127. atomic_inc(&adap->chcr_stats.cipher_rqst);
  2128. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2129. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2130. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2131. transhdr_len, temp, 0);
  2132. reqctx->skb = skb;
  2133. return skb;
  2134. err:
  2135. chcr_aead_common_exit(req);
  2136. return ERR_PTR(error);
  2137. }
  2138. int chcr_aead_dma_map(struct device *dev,
  2139. struct aead_request *req,
  2140. unsigned short op_type)
  2141. {
  2142. int error;
  2143. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2144. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2145. unsigned int authsize = crypto_aead_authsize(tfm);
  2146. int dst_size;
  2147. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2148. -authsize : authsize);
  2149. if (!req->cryptlen || !dst_size)
  2150. return 0;
  2151. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
  2152. DMA_BIDIRECTIONAL);
  2153. if (dma_mapping_error(dev, reqctx->iv_dma))
  2154. return -ENOMEM;
  2155. if (reqctx->b0_len)
  2156. reqctx->b0_dma = reqctx->iv_dma + IV;
  2157. else
  2158. reqctx->b0_dma = 0;
  2159. if (req->src == req->dst) {
  2160. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2161. DMA_BIDIRECTIONAL);
  2162. if (!error)
  2163. goto err;
  2164. } else {
  2165. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2166. DMA_TO_DEVICE);
  2167. if (!error)
  2168. goto err;
  2169. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2170. DMA_FROM_DEVICE);
  2171. if (!error) {
  2172. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2173. DMA_TO_DEVICE);
  2174. goto err;
  2175. }
  2176. }
  2177. return 0;
  2178. err:
  2179. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2180. return -ENOMEM;
  2181. }
  2182. void chcr_aead_dma_unmap(struct device *dev,
  2183. struct aead_request *req,
  2184. unsigned short op_type)
  2185. {
  2186. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2187. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2188. unsigned int authsize = crypto_aead_authsize(tfm);
  2189. int dst_size;
  2190. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2191. -authsize : authsize);
  2192. if (!req->cryptlen || !dst_size)
  2193. return;
  2194. dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
  2195. DMA_BIDIRECTIONAL);
  2196. if (req->src == req->dst) {
  2197. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2198. DMA_BIDIRECTIONAL);
  2199. } else {
  2200. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2201. DMA_TO_DEVICE);
  2202. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2203. DMA_FROM_DEVICE);
  2204. }
  2205. }
  2206. void chcr_add_aead_src_ent(struct aead_request *req,
  2207. struct ulptx_sgl *ulptx,
  2208. unsigned int assoclen)
  2209. {
  2210. struct ulptx_walk ulp_walk;
  2211. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2212. if (reqctx->imm) {
  2213. u8 *buf = (u8 *)ulptx;
  2214. if (reqctx->b0_len) {
  2215. memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
  2216. buf += reqctx->b0_len;
  2217. }
  2218. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2219. buf, assoclen, 0);
  2220. buf += assoclen;
  2221. memcpy(buf, reqctx->iv, IV);
  2222. buf += IV;
  2223. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2224. buf, req->cryptlen, req->assoclen);
  2225. } else {
  2226. ulptx_walk_init(&ulp_walk, ulptx);
  2227. if (reqctx->b0_len)
  2228. ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
  2229. &reqctx->b0_dma);
  2230. ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
  2231. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2232. ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
  2233. req->assoclen);
  2234. ulptx_walk_end(&ulp_walk);
  2235. }
  2236. }
  2237. void chcr_add_aead_dst_ent(struct aead_request *req,
  2238. struct cpl_rx_phys_dsgl *phys_cpl,
  2239. unsigned int assoclen,
  2240. unsigned short qid)
  2241. {
  2242. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2243. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2244. struct dsgl_walk dsgl_walk;
  2245. unsigned int authsize = crypto_aead_authsize(tfm);
  2246. u32 temp;
  2247. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2248. if (reqctx->b0_len)
  2249. dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
  2250. dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
  2251. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2252. temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
  2253. dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
  2254. dsgl_walk_end(&dsgl_walk, qid);
  2255. }
  2256. void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
  2257. void *ulptx,
  2258. struct cipher_wr_param *wrparam)
  2259. {
  2260. struct ulptx_walk ulp_walk;
  2261. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2262. u8 *buf = ulptx;
  2263. memcpy(buf, reqctx->iv, IV);
  2264. buf += IV;
  2265. if (reqctx->imm) {
  2266. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2267. buf, wrparam->bytes, reqctx->processed);
  2268. } else {
  2269. ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
  2270. ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
  2271. reqctx->src_ofst);
  2272. reqctx->srcsg = ulp_walk.last_sg;
  2273. reqctx->src_ofst = ulp_walk.last_sg_len;
  2274. ulptx_walk_end(&ulp_walk);
  2275. }
  2276. }
  2277. void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
  2278. struct cpl_rx_phys_dsgl *phys_cpl,
  2279. struct cipher_wr_param *wrparam,
  2280. unsigned short qid)
  2281. {
  2282. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2283. struct dsgl_walk dsgl_walk;
  2284. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2285. dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
  2286. reqctx->dst_ofst);
  2287. reqctx->dstsg = dsgl_walk.last_sg;
  2288. reqctx->dst_ofst = dsgl_walk.last_sg_len;
  2289. dsgl_walk_end(&dsgl_walk, qid);
  2290. }
  2291. void chcr_add_hash_src_ent(struct ahash_request *req,
  2292. struct ulptx_sgl *ulptx,
  2293. struct hash_wr_param *param)
  2294. {
  2295. struct ulptx_walk ulp_walk;
  2296. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  2297. if (reqctx->hctx_wr.imm) {
  2298. u8 *buf = (u8 *)ulptx;
  2299. if (param->bfr_len) {
  2300. memcpy(buf, reqctx->reqbfr, param->bfr_len);
  2301. buf += param->bfr_len;
  2302. }
  2303. sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
  2304. sg_nents(reqctx->hctx_wr.srcsg), buf,
  2305. param->sg_len, 0);
  2306. } else {
  2307. ulptx_walk_init(&ulp_walk, ulptx);
  2308. if (param->bfr_len)
  2309. ulptx_walk_add_page(&ulp_walk, param->bfr_len,
  2310. &reqctx->hctx_wr.dma_addr);
  2311. ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
  2312. param->sg_len, reqctx->hctx_wr.src_ofst);
  2313. reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
  2314. reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
  2315. ulptx_walk_end(&ulp_walk);
  2316. }
  2317. }
  2318. int chcr_hash_dma_map(struct device *dev,
  2319. struct ahash_request *req)
  2320. {
  2321. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2322. int error = 0;
  2323. if (!req->nbytes)
  2324. return 0;
  2325. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2326. DMA_TO_DEVICE);
  2327. if (!error)
  2328. return -ENOMEM;
  2329. req_ctx->hctx_wr.is_sg_map = 1;
  2330. return 0;
  2331. }
  2332. void chcr_hash_dma_unmap(struct device *dev,
  2333. struct ahash_request *req)
  2334. {
  2335. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2336. if (!req->nbytes)
  2337. return;
  2338. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2339. DMA_TO_DEVICE);
  2340. req_ctx->hctx_wr.is_sg_map = 0;
  2341. }
  2342. int chcr_cipher_dma_map(struct device *dev,
  2343. struct ablkcipher_request *req)
  2344. {
  2345. int error;
  2346. if (req->src == req->dst) {
  2347. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2348. DMA_BIDIRECTIONAL);
  2349. if (!error)
  2350. goto err;
  2351. } else {
  2352. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2353. DMA_TO_DEVICE);
  2354. if (!error)
  2355. goto err;
  2356. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2357. DMA_FROM_DEVICE);
  2358. if (!error) {
  2359. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2360. DMA_TO_DEVICE);
  2361. goto err;
  2362. }
  2363. }
  2364. return 0;
  2365. err:
  2366. return -ENOMEM;
  2367. }
  2368. void chcr_cipher_dma_unmap(struct device *dev,
  2369. struct ablkcipher_request *req)
  2370. {
  2371. if (req->src == req->dst) {
  2372. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2373. DMA_BIDIRECTIONAL);
  2374. } else {
  2375. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2376. DMA_TO_DEVICE);
  2377. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2378. DMA_FROM_DEVICE);
  2379. }
  2380. }
  2381. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  2382. {
  2383. __be32 data;
  2384. memset(block, 0, csize);
  2385. block += csize;
  2386. if (csize >= 4)
  2387. csize = 4;
  2388. else if (msglen > (unsigned int)(1 << (8 * csize)))
  2389. return -EOVERFLOW;
  2390. data = cpu_to_be32(msglen);
  2391. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  2392. return 0;
  2393. }
  2394. static void generate_b0(struct aead_request *req,
  2395. struct chcr_aead_ctx *aeadctx,
  2396. unsigned short op_type)
  2397. {
  2398. unsigned int l, lp, m;
  2399. int rc;
  2400. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2401. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2402. u8 *b0 = reqctx->scratch_pad;
  2403. m = crypto_aead_authsize(aead);
  2404. memcpy(b0, reqctx->iv, 16);
  2405. lp = b0[0];
  2406. l = lp + 1;
  2407. /* set m, bits 3-5 */
  2408. *b0 |= (8 * ((m - 2) / 2));
  2409. /* set adata, bit 6, if associated data is used */
  2410. if (req->assoclen)
  2411. *b0 |= 64;
  2412. rc = set_msg_len(b0 + 16 - l,
  2413. (op_type == CHCR_DECRYPT_OP) ?
  2414. req->cryptlen - m : req->cryptlen, l);
  2415. }
  2416. static inline int crypto_ccm_check_iv(const u8 *iv)
  2417. {
  2418. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  2419. if (iv[0] < 1 || iv[0] > 7)
  2420. return -EINVAL;
  2421. return 0;
  2422. }
  2423. static int ccm_format_packet(struct aead_request *req,
  2424. struct chcr_aead_ctx *aeadctx,
  2425. unsigned int sub_type,
  2426. unsigned short op_type,
  2427. unsigned int assoclen)
  2428. {
  2429. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2430. int rc = 0;
  2431. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2432. reqctx->iv[0] = 3;
  2433. memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
  2434. memcpy(reqctx->iv + 4, req->iv, 8);
  2435. memset(reqctx->iv + 12, 0, 4);
  2436. } else {
  2437. memcpy(reqctx->iv, req->iv, 16);
  2438. }
  2439. if (assoclen)
  2440. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2441. htons(assoclen);
  2442. generate_b0(req, aeadctx, op_type);
  2443. /* zero the ctr value */
  2444. memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
  2445. return rc;
  2446. }
  2447. static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
  2448. unsigned int dst_size,
  2449. struct aead_request *req,
  2450. unsigned short op_type)
  2451. {
  2452. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2453. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2454. unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
  2455. unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
  2456. unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
  2457. unsigned int ccm_xtra;
  2458. unsigned char tag_offset = 0, auth_offset = 0;
  2459. unsigned int assoclen;
  2460. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2461. assoclen = req->assoclen - 8;
  2462. else
  2463. assoclen = req->assoclen;
  2464. ccm_xtra = CCM_B0_SIZE +
  2465. ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
  2466. auth_offset = req->cryptlen ?
  2467. (assoclen + IV + 1 + ccm_xtra) : 0;
  2468. if (op_type == CHCR_DECRYPT_OP) {
  2469. if (crypto_aead_authsize(tfm) != req->cryptlen)
  2470. tag_offset = crypto_aead_authsize(tfm);
  2471. else
  2472. auth_offset = 0;
  2473. }
  2474. sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
  2475. 2, assoclen + 1 + ccm_xtra);
  2476. sec_cpl->pldlen =
  2477. htonl(assoclen + IV + req->cryptlen + ccm_xtra);
  2478. /* For CCM there wil be b0 always. So AAD start will be 1 always */
  2479. sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2480. 1, assoclen + ccm_xtra, assoclen
  2481. + IV + 1 + ccm_xtra, 0);
  2482. sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
  2483. auth_offset, tag_offset,
  2484. (op_type == CHCR_ENCRYPT_OP) ? 0 :
  2485. crypto_aead_authsize(tfm));
  2486. sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2487. (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
  2488. cipher_mode, mac_mode,
  2489. aeadctx->hmac_ctrl, IV >> 1);
  2490. sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
  2491. 0, dst_size);
  2492. }
  2493. static int aead_ccm_validate_input(unsigned short op_type,
  2494. struct aead_request *req,
  2495. struct chcr_aead_ctx *aeadctx,
  2496. unsigned int sub_type)
  2497. {
  2498. if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2499. if (crypto_ccm_check_iv(req->iv)) {
  2500. pr_err("CCM: IV check fails\n");
  2501. return -EINVAL;
  2502. }
  2503. } else {
  2504. if (req->assoclen != 16 && req->assoclen != 20) {
  2505. pr_err("RFC4309: Invalid AAD length %d\n",
  2506. req->assoclen);
  2507. return -EINVAL;
  2508. }
  2509. }
  2510. return 0;
  2511. }
  2512. static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
  2513. unsigned short qid,
  2514. int size)
  2515. {
  2516. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2517. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2518. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2519. struct sk_buff *skb = NULL;
  2520. struct chcr_wr *chcr_req;
  2521. struct cpl_rx_phys_dsgl *phys_cpl;
  2522. struct ulptx_sgl *ulptx;
  2523. unsigned int transhdr_len;
  2524. unsigned int dst_size = 0, kctx_len, dnents, temp;
  2525. unsigned int sub_type, assoclen = req->assoclen;
  2526. unsigned int authsize = crypto_aead_authsize(tfm);
  2527. int error = -EINVAL;
  2528. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2529. GFP_ATOMIC;
  2530. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2531. sub_type = get_aead_subtype(tfm);
  2532. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2533. assoclen -= 8;
  2534. reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
  2535. error = chcr_aead_common_init(req);
  2536. if (error)
  2537. return ERR_PTR(error);
  2538. error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
  2539. if (error)
  2540. goto err;
  2541. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2542. dnents += sg_nents_xlen(req->dst, req->cryptlen
  2543. + (reqctx->op ? -authsize : authsize),
  2544. CHCR_DST_SG_SIZE, req->assoclen);
  2545. dnents += MIN_CCM_SG; // For IV and B0
  2546. dst_size = get_space_for_phys_dsgl(dnents);
  2547. kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
  2548. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2549. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
  2550. reqctx->b0_len) <= SGE_MAX_WR_LEN;
  2551. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
  2552. reqctx->b0_len, 16) :
  2553. (sgl_len(reqctx->src_nents + reqctx->aad_nents +
  2554. MIN_CCM_SG) * 8);
  2555. transhdr_len += temp;
  2556. transhdr_len = roundup(transhdr_len, 16);
  2557. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
  2558. reqctx->b0_len, transhdr_len, reqctx->op)) {
  2559. atomic_inc(&adap->chcr_stats.fallback);
  2560. chcr_aead_common_exit(req);
  2561. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2562. }
  2563. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2564. if (!skb) {
  2565. error = -ENOMEM;
  2566. goto err;
  2567. }
  2568. chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
  2569. fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
  2570. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2571. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2572. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2573. aeadctx->key, aeadctx->enckey_len);
  2574. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2575. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2576. error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
  2577. if (error)
  2578. goto dstmap_fail;
  2579. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2580. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2581. atomic_inc(&adap->chcr_stats.aead_rqst);
  2582. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2583. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
  2584. reqctx->b0_len) : 0);
  2585. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
  2586. transhdr_len, temp, 0);
  2587. reqctx->skb = skb;
  2588. return skb;
  2589. dstmap_fail:
  2590. kfree_skb(skb);
  2591. err:
  2592. chcr_aead_common_exit(req);
  2593. return ERR_PTR(error);
  2594. }
  2595. static struct sk_buff *create_gcm_wr(struct aead_request *req,
  2596. unsigned short qid,
  2597. int size)
  2598. {
  2599. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2600. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2601. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2602. struct sk_buff *skb = NULL;
  2603. struct chcr_wr *chcr_req;
  2604. struct cpl_rx_phys_dsgl *phys_cpl;
  2605. struct ulptx_sgl *ulptx;
  2606. unsigned int transhdr_len, dnents = 0;
  2607. unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
  2608. unsigned int authsize = crypto_aead_authsize(tfm);
  2609. int error = -EINVAL;
  2610. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2611. GFP_ATOMIC;
  2612. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2613. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
  2614. assoclen = req->assoclen - 8;
  2615. reqctx->b0_len = 0;
  2616. error = chcr_aead_common_init(req);
  2617. if (error)
  2618. return ERR_PTR(error);
  2619. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2620. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2621. (reqctx->op ? -authsize : authsize),
  2622. CHCR_DST_SG_SIZE, req->assoclen);
  2623. dnents += MIN_GCM_SG; // For IV
  2624. dst_size = get_space_for_phys_dsgl(dnents);
  2625. kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
  2626. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2627. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
  2628. SGE_MAX_WR_LEN;
  2629. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
  2630. (sgl_len(reqctx->src_nents +
  2631. reqctx->aad_nents + MIN_GCM_SG) * 8);
  2632. transhdr_len += temp;
  2633. transhdr_len = roundup(transhdr_len, 16);
  2634. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2635. transhdr_len, reqctx->op)) {
  2636. atomic_inc(&adap->chcr_stats.fallback);
  2637. chcr_aead_common_exit(req);
  2638. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2639. }
  2640. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2641. if (!skb) {
  2642. error = -ENOMEM;
  2643. goto err;
  2644. }
  2645. chcr_req = __skb_put_zero(skb, transhdr_len);
  2646. //Offset of tag from end
  2647. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2648. chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
  2649. a_ctx(tfm)->dev->rx_channel_id, 2,
  2650. (assoclen + 1));
  2651. chcr_req->sec_cpl.pldlen =
  2652. htonl(assoclen + IV + req->cryptlen);
  2653. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2654. assoclen ? 1 : 0, assoclen,
  2655. assoclen + IV + 1, 0);
  2656. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  2657. FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
  2658. temp, temp);
  2659. chcr_req->sec_cpl.seqno_numivs =
  2660. FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
  2661. CHCR_ENCRYPT_OP) ? 1 : 0,
  2662. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  2663. CHCR_SCMD_AUTH_MODE_GHASH,
  2664. aeadctx->hmac_ctrl, IV >> 1);
  2665. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2666. 0, 0, dst_size);
  2667. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2668. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2669. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2670. GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
  2671. /* prepare a 16 byte iv */
  2672. /* S A L T | IV | 0x00000001 */
  2673. if (get_aead_subtype(tfm) ==
  2674. CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
  2675. memcpy(reqctx->iv, aeadctx->salt, 4);
  2676. memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
  2677. } else {
  2678. memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
  2679. }
  2680. *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
  2681. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2682. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2683. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2684. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2685. atomic_inc(&adap->chcr_stats.aead_rqst);
  2686. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2687. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2688. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2689. transhdr_len, temp, reqctx->verify);
  2690. reqctx->skb = skb;
  2691. return skb;
  2692. err:
  2693. chcr_aead_common_exit(req);
  2694. return ERR_PTR(error);
  2695. }
  2696. static int chcr_aead_cra_init(struct crypto_aead *tfm)
  2697. {
  2698. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2699. struct aead_alg *alg = crypto_aead_alg(tfm);
  2700. aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
  2701. CRYPTO_ALG_NEED_FALLBACK |
  2702. CRYPTO_ALG_ASYNC);
  2703. if (IS_ERR(aeadctx->sw_cipher))
  2704. return PTR_ERR(aeadctx->sw_cipher);
  2705. crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
  2706. sizeof(struct aead_request) +
  2707. crypto_aead_reqsize(aeadctx->sw_cipher)));
  2708. return chcr_device_init(a_ctx(tfm));
  2709. }
  2710. static void chcr_aead_cra_exit(struct crypto_aead *tfm)
  2711. {
  2712. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2713. crypto_free_aead(aeadctx->sw_cipher);
  2714. }
  2715. static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
  2716. unsigned int authsize)
  2717. {
  2718. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2719. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
  2720. aeadctx->mayverify = VERIFY_HW;
  2721. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2722. }
  2723. static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
  2724. unsigned int authsize)
  2725. {
  2726. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2727. u32 maxauth = crypto_aead_maxauthsize(tfm);
  2728. /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
  2729. * true for sha1. authsize == 12 condition should be before
  2730. * authsize == (maxauth >> 1)
  2731. */
  2732. if (authsize == ICV_4) {
  2733. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2734. aeadctx->mayverify = VERIFY_HW;
  2735. } else if (authsize == ICV_6) {
  2736. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2737. aeadctx->mayverify = VERIFY_HW;
  2738. } else if (authsize == ICV_10) {
  2739. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2740. aeadctx->mayverify = VERIFY_HW;
  2741. } else if (authsize == ICV_12) {
  2742. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2743. aeadctx->mayverify = VERIFY_HW;
  2744. } else if (authsize == ICV_14) {
  2745. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2746. aeadctx->mayverify = VERIFY_HW;
  2747. } else if (authsize == (maxauth >> 1)) {
  2748. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2749. aeadctx->mayverify = VERIFY_HW;
  2750. } else if (authsize == maxauth) {
  2751. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2752. aeadctx->mayverify = VERIFY_HW;
  2753. } else {
  2754. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2755. aeadctx->mayverify = VERIFY_SW;
  2756. }
  2757. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2758. }
  2759. static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  2760. {
  2761. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2762. switch (authsize) {
  2763. case ICV_4:
  2764. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2765. aeadctx->mayverify = VERIFY_HW;
  2766. break;
  2767. case ICV_8:
  2768. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2769. aeadctx->mayverify = VERIFY_HW;
  2770. break;
  2771. case ICV_12:
  2772. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2773. aeadctx->mayverify = VERIFY_HW;
  2774. break;
  2775. case ICV_14:
  2776. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2777. aeadctx->mayverify = VERIFY_HW;
  2778. break;
  2779. case ICV_16:
  2780. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2781. aeadctx->mayverify = VERIFY_HW;
  2782. break;
  2783. case ICV_13:
  2784. case ICV_15:
  2785. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2786. aeadctx->mayverify = VERIFY_SW;
  2787. break;
  2788. default:
  2789. crypto_tfm_set_flags((struct crypto_tfm *) tfm,
  2790. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2791. return -EINVAL;
  2792. }
  2793. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2794. }
  2795. static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
  2796. unsigned int authsize)
  2797. {
  2798. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2799. switch (authsize) {
  2800. case ICV_8:
  2801. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2802. aeadctx->mayverify = VERIFY_HW;
  2803. break;
  2804. case ICV_12:
  2805. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2806. aeadctx->mayverify = VERIFY_HW;
  2807. break;
  2808. case ICV_16:
  2809. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2810. aeadctx->mayverify = VERIFY_HW;
  2811. break;
  2812. default:
  2813. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2814. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2815. return -EINVAL;
  2816. }
  2817. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2818. }
  2819. static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
  2820. unsigned int authsize)
  2821. {
  2822. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2823. switch (authsize) {
  2824. case ICV_4:
  2825. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2826. aeadctx->mayverify = VERIFY_HW;
  2827. break;
  2828. case ICV_6:
  2829. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2830. aeadctx->mayverify = VERIFY_HW;
  2831. break;
  2832. case ICV_8:
  2833. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2834. aeadctx->mayverify = VERIFY_HW;
  2835. break;
  2836. case ICV_10:
  2837. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2838. aeadctx->mayverify = VERIFY_HW;
  2839. break;
  2840. case ICV_12:
  2841. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2842. aeadctx->mayverify = VERIFY_HW;
  2843. break;
  2844. case ICV_14:
  2845. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2846. aeadctx->mayverify = VERIFY_HW;
  2847. break;
  2848. case ICV_16:
  2849. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2850. aeadctx->mayverify = VERIFY_HW;
  2851. break;
  2852. default:
  2853. crypto_tfm_set_flags((struct crypto_tfm *)tfm,
  2854. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2855. return -EINVAL;
  2856. }
  2857. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2858. }
  2859. static int chcr_ccm_common_setkey(struct crypto_aead *aead,
  2860. const u8 *key,
  2861. unsigned int keylen)
  2862. {
  2863. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2864. unsigned char ck_size, mk_size;
  2865. int key_ctx_size = 0;
  2866. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
  2867. if (keylen == AES_KEYSIZE_128) {
  2868. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2869. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  2870. } else if (keylen == AES_KEYSIZE_192) {
  2871. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2872. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
  2873. } else if (keylen == AES_KEYSIZE_256) {
  2874. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2875. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  2876. } else {
  2877. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2878. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2879. aeadctx->enckey_len = 0;
  2880. return -EINVAL;
  2881. }
  2882. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
  2883. key_ctx_size >> 4);
  2884. memcpy(aeadctx->key, key, keylen);
  2885. aeadctx->enckey_len = keylen;
  2886. return 0;
  2887. }
  2888. static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
  2889. const u8 *key,
  2890. unsigned int keylen)
  2891. {
  2892. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2893. int error;
  2894. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2895. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2896. CRYPTO_TFM_REQ_MASK);
  2897. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2898. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2899. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2900. CRYPTO_TFM_RES_MASK);
  2901. if (error)
  2902. return error;
  2903. return chcr_ccm_common_setkey(aead, key, keylen);
  2904. }
  2905. static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
  2906. unsigned int keylen)
  2907. {
  2908. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2909. int error;
  2910. if (keylen < 3) {
  2911. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2912. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2913. aeadctx->enckey_len = 0;
  2914. return -EINVAL;
  2915. }
  2916. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2917. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2918. CRYPTO_TFM_REQ_MASK);
  2919. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2920. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2921. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2922. CRYPTO_TFM_RES_MASK);
  2923. if (error)
  2924. return error;
  2925. keylen -= 3;
  2926. memcpy(aeadctx->salt, key + keylen, 3);
  2927. return chcr_ccm_common_setkey(aead, key, keylen);
  2928. }
  2929. static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  2930. unsigned int keylen)
  2931. {
  2932. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2933. struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
  2934. struct crypto_cipher *cipher;
  2935. unsigned int ck_size;
  2936. int ret = 0, key_ctx_size = 0;
  2937. aeadctx->enckey_len = 0;
  2938. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2939. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
  2940. & CRYPTO_TFM_REQ_MASK);
  2941. ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2942. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2943. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2944. CRYPTO_TFM_RES_MASK);
  2945. if (ret)
  2946. goto out;
  2947. if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
  2948. keylen > 3) {
  2949. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  2950. memcpy(aeadctx->salt, key + keylen, 4);
  2951. }
  2952. if (keylen == AES_KEYSIZE_128) {
  2953. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2954. } else if (keylen == AES_KEYSIZE_192) {
  2955. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2956. } else if (keylen == AES_KEYSIZE_256) {
  2957. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2958. } else {
  2959. crypto_tfm_set_flags((struct crypto_tfm *)aead,
  2960. CRYPTO_TFM_RES_BAD_KEY_LEN);
  2961. pr_err("GCM: Invalid key length %d\n", keylen);
  2962. ret = -EINVAL;
  2963. goto out;
  2964. }
  2965. memcpy(aeadctx->key, key, keylen);
  2966. aeadctx->enckey_len = keylen;
  2967. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
  2968. AEAD_H_SIZE;
  2969. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  2970. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  2971. 0, 0,
  2972. key_ctx_size >> 4);
  2973. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  2974. * It will go in key context
  2975. */
  2976. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  2977. if (IS_ERR(cipher)) {
  2978. aeadctx->enckey_len = 0;
  2979. ret = -ENOMEM;
  2980. goto out;
  2981. }
  2982. ret = crypto_cipher_setkey(cipher, key, keylen);
  2983. if (ret) {
  2984. aeadctx->enckey_len = 0;
  2985. goto out1;
  2986. }
  2987. memset(gctx->ghash_h, 0, AEAD_H_SIZE);
  2988. crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
  2989. out1:
  2990. crypto_free_cipher(cipher);
  2991. out:
  2992. return ret;
  2993. }
  2994. static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
  2995. unsigned int keylen)
  2996. {
  2997. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  2998. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2999. /* it contains auth and cipher key both*/
  3000. struct crypto_authenc_keys keys;
  3001. unsigned int bs, subtype;
  3002. unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
  3003. int err = 0, i, key_ctx_len = 0;
  3004. unsigned char ck_size = 0;
  3005. unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
  3006. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  3007. struct algo_param param;
  3008. int align;
  3009. u8 *o_ptr = NULL;
  3010. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3011. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3012. & CRYPTO_TFM_REQ_MASK);
  3013. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3014. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3015. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3016. & CRYPTO_TFM_RES_MASK);
  3017. if (err)
  3018. goto out;
  3019. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3020. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3021. goto out;
  3022. }
  3023. if (get_alg_config(&param, max_authsize)) {
  3024. pr_err("chcr : Unsupported digest size\n");
  3025. goto out;
  3026. }
  3027. subtype = get_aead_subtype(authenc);
  3028. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3029. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3030. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3031. goto out;
  3032. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3033. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3034. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3035. }
  3036. if (keys.enckeylen == AES_KEYSIZE_128) {
  3037. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3038. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3039. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3040. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3041. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3042. } else {
  3043. pr_err("chcr : Unsupported cipher key\n");
  3044. goto out;
  3045. }
  3046. /* Copy only encryption key. We use authkey to generate h(ipad) and
  3047. * h(opad) so authkey is not needed again. authkeylen size have the
  3048. * size of the hash digest size.
  3049. */
  3050. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3051. aeadctx->enckey_len = keys.enckeylen;
  3052. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3053. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3054. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3055. aeadctx->enckey_len << 3);
  3056. }
  3057. base_hash = chcr_alloc_shash(max_authsize);
  3058. if (IS_ERR(base_hash)) {
  3059. pr_err("chcr : Base driver cannot be loaded\n");
  3060. aeadctx->enckey_len = 0;
  3061. memzero_explicit(&keys, sizeof(keys));
  3062. return -EINVAL;
  3063. }
  3064. {
  3065. SHASH_DESC_ON_STACK(shash, base_hash);
  3066. shash->tfm = base_hash;
  3067. shash->flags = crypto_shash_get_flags(base_hash);
  3068. bs = crypto_shash_blocksize(base_hash);
  3069. align = KEYCTX_ALIGN_PAD(max_authsize);
  3070. o_ptr = actx->h_iopad + param.result_size + align;
  3071. if (keys.authkeylen > bs) {
  3072. err = crypto_shash_digest(shash, keys.authkey,
  3073. keys.authkeylen,
  3074. o_ptr);
  3075. if (err) {
  3076. pr_err("chcr : Base driver cannot be loaded\n");
  3077. goto out;
  3078. }
  3079. keys.authkeylen = max_authsize;
  3080. } else
  3081. memcpy(o_ptr, keys.authkey, keys.authkeylen);
  3082. /* Compute the ipad-digest*/
  3083. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3084. memcpy(pad, o_ptr, keys.authkeylen);
  3085. for (i = 0; i < bs >> 2; i++)
  3086. *((unsigned int *)pad + i) ^= IPAD_DATA;
  3087. if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
  3088. max_authsize))
  3089. goto out;
  3090. /* Compute the opad-digest */
  3091. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3092. memcpy(pad, o_ptr, keys.authkeylen);
  3093. for (i = 0; i < bs >> 2; i++)
  3094. *((unsigned int *)pad + i) ^= OPAD_DATA;
  3095. if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
  3096. goto out;
  3097. /* convert the ipad and opad digest to network order */
  3098. chcr_change_order(actx->h_iopad, param.result_size);
  3099. chcr_change_order(o_ptr, param.result_size);
  3100. key_ctx_len = sizeof(struct _key_ctx) +
  3101. roundup(keys.enckeylen, 16) +
  3102. (param.result_size + align) * 2;
  3103. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
  3104. 0, 1, key_ctx_len >> 4);
  3105. actx->auth_mode = param.auth_mode;
  3106. chcr_free_shash(base_hash);
  3107. memzero_explicit(&keys, sizeof(keys));
  3108. return 0;
  3109. }
  3110. out:
  3111. aeadctx->enckey_len = 0;
  3112. memzero_explicit(&keys, sizeof(keys));
  3113. if (!IS_ERR(base_hash))
  3114. chcr_free_shash(base_hash);
  3115. return -EINVAL;
  3116. }
  3117. static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
  3118. const u8 *key, unsigned int keylen)
  3119. {
  3120. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3121. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3122. struct crypto_authenc_keys keys;
  3123. int err;
  3124. /* it contains auth and cipher key both*/
  3125. unsigned int subtype;
  3126. int key_ctx_len = 0;
  3127. unsigned char ck_size = 0;
  3128. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3129. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3130. & CRYPTO_TFM_REQ_MASK);
  3131. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3132. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3133. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3134. & CRYPTO_TFM_RES_MASK);
  3135. if (err)
  3136. goto out;
  3137. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3138. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3139. goto out;
  3140. }
  3141. subtype = get_aead_subtype(authenc);
  3142. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3143. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3144. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3145. goto out;
  3146. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3147. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3148. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3149. }
  3150. if (keys.enckeylen == AES_KEYSIZE_128) {
  3151. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3152. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3153. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3154. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3155. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3156. } else {
  3157. pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
  3158. goto out;
  3159. }
  3160. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3161. aeadctx->enckey_len = keys.enckeylen;
  3162. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3163. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3164. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3165. aeadctx->enckey_len << 3);
  3166. }
  3167. key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
  3168. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
  3169. 0, key_ctx_len >> 4);
  3170. actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
  3171. memzero_explicit(&keys, sizeof(keys));
  3172. return 0;
  3173. out:
  3174. aeadctx->enckey_len = 0;
  3175. memzero_explicit(&keys, sizeof(keys));
  3176. return -EINVAL;
  3177. }
  3178. static int chcr_aead_op(struct aead_request *req,
  3179. int size,
  3180. create_wr_t create_wr_fn)
  3181. {
  3182. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3183. struct uld_ctx *u_ctx;
  3184. struct sk_buff *skb;
  3185. int isfull = 0;
  3186. if (!a_ctx(tfm)->dev) {
  3187. pr_err("chcr : %s : No crypto device.\n", __func__);
  3188. return -ENXIO;
  3189. }
  3190. u_ctx = ULD_CTX(a_ctx(tfm));
  3191. if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  3192. a_ctx(tfm)->tx_qidx)) {
  3193. isfull = 1;
  3194. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  3195. return -ENOSPC;
  3196. }
  3197. /* Form a WR from req */
  3198. skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
  3199. if (IS_ERR(skb) || !skb)
  3200. return PTR_ERR(skb);
  3201. skb->dev = u_ctx->lldi.ports[0];
  3202. set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
  3203. chcr_send_wr(skb);
  3204. return isfull ? -EBUSY : -EINPROGRESS;
  3205. }
  3206. static int chcr_aead_encrypt(struct aead_request *req)
  3207. {
  3208. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3209. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3210. reqctx->verify = VERIFY_HW;
  3211. reqctx->op = CHCR_ENCRYPT_OP;
  3212. switch (get_aead_subtype(tfm)) {
  3213. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3214. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3215. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3216. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3217. return chcr_aead_op(req, 0, create_authenc_wr);
  3218. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3219. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3220. return chcr_aead_op(req, 0, create_aead_ccm_wr);
  3221. default:
  3222. return chcr_aead_op(req, 0, create_gcm_wr);
  3223. }
  3224. }
  3225. static int chcr_aead_decrypt(struct aead_request *req)
  3226. {
  3227. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3228. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  3229. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3230. int size;
  3231. if (aeadctx->mayverify == VERIFY_SW) {
  3232. size = crypto_aead_maxauthsize(tfm);
  3233. reqctx->verify = VERIFY_SW;
  3234. } else {
  3235. size = 0;
  3236. reqctx->verify = VERIFY_HW;
  3237. }
  3238. reqctx->op = CHCR_DECRYPT_OP;
  3239. switch (get_aead_subtype(tfm)) {
  3240. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3241. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3242. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3243. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3244. return chcr_aead_op(req, size, create_authenc_wr);
  3245. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3246. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3247. return chcr_aead_op(req, size, create_aead_ccm_wr);
  3248. default:
  3249. return chcr_aead_op(req, size, create_gcm_wr);
  3250. }
  3251. }
  3252. static struct chcr_alg_template driver_algs[] = {
  3253. /* AES-CBC */
  3254. {
  3255. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
  3256. .is_registered = 0,
  3257. .alg.crypto = {
  3258. .cra_name = "cbc(aes)",
  3259. .cra_driver_name = "cbc-aes-chcr",
  3260. .cra_blocksize = AES_BLOCK_SIZE,
  3261. .cra_init = chcr_cra_init,
  3262. .cra_exit = chcr_cra_exit,
  3263. .cra_u.ablkcipher = {
  3264. .min_keysize = AES_MIN_KEY_SIZE,
  3265. .max_keysize = AES_MAX_KEY_SIZE,
  3266. .ivsize = AES_BLOCK_SIZE,
  3267. .setkey = chcr_aes_cbc_setkey,
  3268. .encrypt = chcr_aes_encrypt,
  3269. .decrypt = chcr_aes_decrypt,
  3270. }
  3271. }
  3272. },
  3273. {
  3274. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
  3275. .is_registered = 0,
  3276. .alg.crypto = {
  3277. .cra_name = "xts(aes)",
  3278. .cra_driver_name = "xts-aes-chcr",
  3279. .cra_blocksize = AES_BLOCK_SIZE,
  3280. .cra_init = chcr_cra_init,
  3281. .cra_exit = NULL,
  3282. .cra_u .ablkcipher = {
  3283. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  3284. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  3285. .ivsize = AES_BLOCK_SIZE,
  3286. .setkey = chcr_aes_xts_setkey,
  3287. .encrypt = chcr_aes_encrypt,
  3288. .decrypt = chcr_aes_decrypt,
  3289. }
  3290. }
  3291. },
  3292. {
  3293. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
  3294. .is_registered = 0,
  3295. .alg.crypto = {
  3296. .cra_name = "ctr(aes)",
  3297. .cra_driver_name = "ctr-aes-chcr",
  3298. .cra_blocksize = 1,
  3299. .cra_init = chcr_cra_init,
  3300. .cra_exit = chcr_cra_exit,
  3301. .cra_u.ablkcipher = {
  3302. .min_keysize = AES_MIN_KEY_SIZE,
  3303. .max_keysize = AES_MAX_KEY_SIZE,
  3304. .ivsize = AES_BLOCK_SIZE,
  3305. .setkey = chcr_aes_ctr_setkey,
  3306. .encrypt = chcr_aes_encrypt,
  3307. .decrypt = chcr_aes_decrypt,
  3308. }
  3309. }
  3310. },
  3311. {
  3312. .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
  3313. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
  3314. .is_registered = 0,
  3315. .alg.crypto = {
  3316. .cra_name = "rfc3686(ctr(aes))",
  3317. .cra_driver_name = "rfc3686-ctr-aes-chcr",
  3318. .cra_blocksize = 1,
  3319. .cra_init = chcr_rfc3686_init,
  3320. .cra_exit = chcr_cra_exit,
  3321. .cra_u.ablkcipher = {
  3322. .min_keysize = AES_MIN_KEY_SIZE +
  3323. CTR_RFC3686_NONCE_SIZE,
  3324. .max_keysize = AES_MAX_KEY_SIZE +
  3325. CTR_RFC3686_NONCE_SIZE,
  3326. .ivsize = CTR_RFC3686_IV_SIZE,
  3327. .setkey = chcr_aes_rfc3686_setkey,
  3328. .encrypt = chcr_aes_encrypt,
  3329. .decrypt = chcr_aes_decrypt,
  3330. .geniv = "seqiv",
  3331. }
  3332. }
  3333. },
  3334. /* SHA */
  3335. {
  3336. .type = CRYPTO_ALG_TYPE_AHASH,
  3337. .is_registered = 0,
  3338. .alg.hash = {
  3339. .halg.digestsize = SHA1_DIGEST_SIZE,
  3340. .halg.base = {
  3341. .cra_name = "sha1",
  3342. .cra_driver_name = "sha1-chcr",
  3343. .cra_blocksize = SHA1_BLOCK_SIZE,
  3344. }
  3345. }
  3346. },
  3347. {
  3348. .type = CRYPTO_ALG_TYPE_AHASH,
  3349. .is_registered = 0,
  3350. .alg.hash = {
  3351. .halg.digestsize = SHA256_DIGEST_SIZE,
  3352. .halg.base = {
  3353. .cra_name = "sha256",
  3354. .cra_driver_name = "sha256-chcr",
  3355. .cra_blocksize = SHA256_BLOCK_SIZE,
  3356. }
  3357. }
  3358. },
  3359. {
  3360. .type = CRYPTO_ALG_TYPE_AHASH,
  3361. .is_registered = 0,
  3362. .alg.hash = {
  3363. .halg.digestsize = SHA224_DIGEST_SIZE,
  3364. .halg.base = {
  3365. .cra_name = "sha224",
  3366. .cra_driver_name = "sha224-chcr",
  3367. .cra_blocksize = SHA224_BLOCK_SIZE,
  3368. }
  3369. }
  3370. },
  3371. {
  3372. .type = CRYPTO_ALG_TYPE_AHASH,
  3373. .is_registered = 0,
  3374. .alg.hash = {
  3375. .halg.digestsize = SHA384_DIGEST_SIZE,
  3376. .halg.base = {
  3377. .cra_name = "sha384",
  3378. .cra_driver_name = "sha384-chcr",
  3379. .cra_blocksize = SHA384_BLOCK_SIZE,
  3380. }
  3381. }
  3382. },
  3383. {
  3384. .type = CRYPTO_ALG_TYPE_AHASH,
  3385. .is_registered = 0,
  3386. .alg.hash = {
  3387. .halg.digestsize = SHA512_DIGEST_SIZE,
  3388. .halg.base = {
  3389. .cra_name = "sha512",
  3390. .cra_driver_name = "sha512-chcr",
  3391. .cra_blocksize = SHA512_BLOCK_SIZE,
  3392. }
  3393. }
  3394. },
  3395. /* HMAC */
  3396. {
  3397. .type = CRYPTO_ALG_TYPE_HMAC,
  3398. .is_registered = 0,
  3399. .alg.hash = {
  3400. .halg.digestsize = SHA1_DIGEST_SIZE,
  3401. .halg.base = {
  3402. .cra_name = "hmac(sha1)",
  3403. .cra_driver_name = "hmac-sha1-chcr",
  3404. .cra_blocksize = SHA1_BLOCK_SIZE,
  3405. }
  3406. }
  3407. },
  3408. {
  3409. .type = CRYPTO_ALG_TYPE_HMAC,
  3410. .is_registered = 0,
  3411. .alg.hash = {
  3412. .halg.digestsize = SHA224_DIGEST_SIZE,
  3413. .halg.base = {
  3414. .cra_name = "hmac(sha224)",
  3415. .cra_driver_name = "hmac-sha224-chcr",
  3416. .cra_blocksize = SHA224_BLOCK_SIZE,
  3417. }
  3418. }
  3419. },
  3420. {
  3421. .type = CRYPTO_ALG_TYPE_HMAC,
  3422. .is_registered = 0,
  3423. .alg.hash = {
  3424. .halg.digestsize = SHA256_DIGEST_SIZE,
  3425. .halg.base = {
  3426. .cra_name = "hmac(sha256)",
  3427. .cra_driver_name = "hmac-sha256-chcr",
  3428. .cra_blocksize = SHA256_BLOCK_SIZE,
  3429. }
  3430. }
  3431. },
  3432. {
  3433. .type = CRYPTO_ALG_TYPE_HMAC,
  3434. .is_registered = 0,
  3435. .alg.hash = {
  3436. .halg.digestsize = SHA384_DIGEST_SIZE,
  3437. .halg.base = {
  3438. .cra_name = "hmac(sha384)",
  3439. .cra_driver_name = "hmac-sha384-chcr",
  3440. .cra_blocksize = SHA384_BLOCK_SIZE,
  3441. }
  3442. }
  3443. },
  3444. {
  3445. .type = CRYPTO_ALG_TYPE_HMAC,
  3446. .is_registered = 0,
  3447. .alg.hash = {
  3448. .halg.digestsize = SHA512_DIGEST_SIZE,
  3449. .halg.base = {
  3450. .cra_name = "hmac(sha512)",
  3451. .cra_driver_name = "hmac-sha512-chcr",
  3452. .cra_blocksize = SHA512_BLOCK_SIZE,
  3453. }
  3454. }
  3455. },
  3456. /* Add AEAD Algorithms */
  3457. {
  3458. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
  3459. .is_registered = 0,
  3460. .alg.aead = {
  3461. .base = {
  3462. .cra_name = "gcm(aes)",
  3463. .cra_driver_name = "gcm-aes-chcr",
  3464. .cra_blocksize = 1,
  3465. .cra_priority = CHCR_AEAD_PRIORITY,
  3466. .cra_ctxsize = sizeof(struct chcr_context) +
  3467. sizeof(struct chcr_aead_ctx) +
  3468. sizeof(struct chcr_gcm_ctx),
  3469. },
  3470. .ivsize = GCM_AES_IV_SIZE,
  3471. .maxauthsize = GHASH_DIGEST_SIZE,
  3472. .setkey = chcr_gcm_setkey,
  3473. .setauthsize = chcr_gcm_setauthsize,
  3474. }
  3475. },
  3476. {
  3477. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
  3478. .is_registered = 0,
  3479. .alg.aead = {
  3480. .base = {
  3481. .cra_name = "rfc4106(gcm(aes))",
  3482. .cra_driver_name = "rfc4106-gcm-aes-chcr",
  3483. .cra_blocksize = 1,
  3484. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3485. .cra_ctxsize = sizeof(struct chcr_context) +
  3486. sizeof(struct chcr_aead_ctx) +
  3487. sizeof(struct chcr_gcm_ctx),
  3488. },
  3489. .ivsize = GCM_RFC4106_IV_SIZE,
  3490. .maxauthsize = GHASH_DIGEST_SIZE,
  3491. .setkey = chcr_gcm_setkey,
  3492. .setauthsize = chcr_4106_4309_setauthsize,
  3493. }
  3494. },
  3495. {
  3496. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
  3497. .is_registered = 0,
  3498. .alg.aead = {
  3499. .base = {
  3500. .cra_name = "ccm(aes)",
  3501. .cra_driver_name = "ccm-aes-chcr",
  3502. .cra_blocksize = 1,
  3503. .cra_priority = CHCR_AEAD_PRIORITY,
  3504. .cra_ctxsize = sizeof(struct chcr_context) +
  3505. sizeof(struct chcr_aead_ctx),
  3506. },
  3507. .ivsize = AES_BLOCK_SIZE,
  3508. .maxauthsize = GHASH_DIGEST_SIZE,
  3509. .setkey = chcr_aead_ccm_setkey,
  3510. .setauthsize = chcr_ccm_setauthsize,
  3511. }
  3512. },
  3513. {
  3514. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
  3515. .is_registered = 0,
  3516. .alg.aead = {
  3517. .base = {
  3518. .cra_name = "rfc4309(ccm(aes))",
  3519. .cra_driver_name = "rfc4309-ccm-aes-chcr",
  3520. .cra_blocksize = 1,
  3521. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3522. .cra_ctxsize = sizeof(struct chcr_context) +
  3523. sizeof(struct chcr_aead_ctx),
  3524. },
  3525. .ivsize = 8,
  3526. .maxauthsize = GHASH_DIGEST_SIZE,
  3527. .setkey = chcr_aead_rfc4309_setkey,
  3528. .setauthsize = chcr_4106_4309_setauthsize,
  3529. }
  3530. },
  3531. {
  3532. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3533. .is_registered = 0,
  3534. .alg.aead = {
  3535. .base = {
  3536. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  3537. .cra_driver_name =
  3538. "authenc-hmac-sha1-cbc-aes-chcr",
  3539. .cra_blocksize = AES_BLOCK_SIZE,
  3540. .cra_priority = CHCR_AEAD_PRIORITY,
  3541. .cra_ctxsize = sizeof(struct chcr_context) +
  3542. sizeof(struct chcr_aead_ctx) +
  3543. sizeof(struct chcr_authenc_ctx),
  3544. },
  3545. .ivsize = AES_BLOCK_SIZE,
  3546. .maxauthsize = SHA1_DIGEST_SIZE,
  3547. .setkey = chcr_authenc_setkey,
  3548. .setauthsize = chcr_authenc_setauthsize,
  3549. }
  3550. },
  3551. {
  3552. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3553. .is_registered = 0,
  3554. .alg.aead = {
  3555. .base = {
  3556. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  3557. .cra_driver_name =
  3558. "authenc-hmac-sha256-cbc-aes-chcr",
  3559. .cra_blocksize = AES_BLOCK_SIZE,
  3560. .cra_priority = CHCR_AEAD_PRIORITY,
  3561. .cra_ctxsize = sizeof(struct chcr_context) +
  3562. sizeof(struct chcr_aead_ctx) +
  3563. sizeof(struct chcr_authenc_ctx),
  3564. },
  3565. .ivsize = AES_BLOCK_SIZE,
  3566. .maxauthsize = SHA256_DIGEST_SIZE,
  3567. .setkey = chcr_authenc_setkey,
  3568. .setauthsize = chcr_authenc_setauthsize,
  3569. }
  3570. },
  3571. {
  3572. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3573. .is_registered = 0,
  3574. .alg.aead = {
  3575. .base = {
  3576. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  3577. .cra_driver_name =
  3578. "authenc-hmac-sha224-cbc-aes-chcr",
  3579. .cra_blocksize = AES_BLOCK_SIZE,
  3580. .cra_priority = CHCR_AEAD_PRIORITY,
  3581. .cra_ctxsize = sizeof(struct chcr_context) +
  3582. sizeof(struct chcr_aead_ctx) +
  3583. sizeof(struct chcr_authenc_ctx),
  3584. },
  3585. .ivsize = AES_BLOCK_SIZE,
  3586. .maxauthsize = SHA224_DIGEST_SIZE,
  3587. .setkey = chcr_authenc_setkey,
  3588. .setauthsize = chcr_authenc_setauthsize,
  3589. }
  3590. },
  3591. {
  3592. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3593. .is_registered = 0,
  3594. .alg.aead = {
  3595. .base = {
  3596. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  3597. .cra_driver_name =
  3598. "authenc-hmac-sha384-cbc-aes-chcr",
  3599. .cra_blocksize = AES_BLOCK_SIZE,
  3600. .cra_priority = CHCR_AEAD_PRIORITY,
  3601. .cra_ctxsize = sizeof(struct chcr_context) +
  3602. sizeof(struct chcr_aead_ctx) +
  3603. sizeof(struct chcr_authenc_ctx),
  3604. },
  3605. .ivsize = AES_BLOCK_SIZE,
  3606. .maxauthsize = SHA384_DIGEST_SIZE,
  3607. .setkey = chcr_authenc_setkey,
  3608. .setauthsize = chcr_authenc_setauthsize,
  3609. }
  3610. },
  3611. {
  3612. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3613. .is_registered = 0,
  3614. .alg.aead = {
  3615. .base = {
  3616. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  3617. .cra_driver_name =
  3618. "authenc-hmac-sha512-cbc-aes-chcr",
  3619. .cra_blocksize = AES_BLOCK_SIZE,
  3620. .cra_priority = CHCR_AEAD_PRIORITY,
  3621. .cra_ctxsize = sizeof(struct chcr_context) +
  3622. sizeof(struct chcr_aead_ctx) +
  3623. sizeof(struct chcr_authenc_ctx),
  3624. },
  3625. .ivsize = AES_BLOCK_SIZE,
  3626. .maxauthsize = SHA512_DIGEST_SIZE,
  3627. .setkey = chcr_authenc_setkey,
  3628. .setauthsize = chcr_authenc_setauthsize,
  3629. }
  3630. },
  3631. {
  3632. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
  3633. .is_registered = 0,
  3634. .alg.aead = {
  3635. .base = {
  3636. .cra_name = "authenc(digest_null,cbc(aes))",
  3637. .cra_driver_name =
  3638. "authenc-digest_null-cbc-aes-chcr",
  3639. .cra_blocksize = AES_BLOCK_SIZE,
  3640. .cra_priority = CHCR_AEAD_PRIORITY,
  3641. .cra_ctxsize = sizeof(struct chcr_context) +
  3642. sizeof(struct chcr_aead_ctx) +
  3643. sizeof(struct chcr_authenc_ctx),
  3644. },
  3645. .ivsize = AES_BLOCK_SIZE,
  3646. .maxauthsize = 0,
  3647. .setkey = chcr_aead_digest_null_setkey,
  3648. .setauthsize = chcr_authenc_null_setauthsize,
  3649. }
  3650. },
  3651. {
  3652. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3653. .is_registered = 0,
  3654. .alg.aead = {
  3655. .base = {
  3656. .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3657. .cra_driver_name =
  3658. "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
  3659. .cra_blocksize = 1,
  3660. .cra_priority = CHCR_AEAD_PRIORITY,
  3661. .cra_ctxsize = sizeof(struct chcr_context) +
  3662. sizeof(struct chcr_aead_ctx) +
  3663. sizeof(struct chcr_authenc_ctx),
  3664. },
  3665. .ivsize = CTR_RFC3686_IV_SIZE,
  3666. .maxauthsize = SHA1_DIGEST_SIZE,
  3667. .setkey = chcr_authenc_setkey,
  3668. .setauthsize = chcr_authenc_setauthsize,
  3669. }
  3670. },
  3671. {
  3672. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3673. .is_registered = 0,
  3674. .alg.aead = {
  3675. .base = {
  3676. .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3677. .cra_driver_name =
  3678. "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
  3679. .cra_blocksize = 1,
  3680. .cra_priority = CHCR_AEAD_PRIORITY,
  3681. .cra_ctxsize = sizeof(struct chcr_context) +
  3682. sizeof(struct chcr_aead_ctx) +
  3683. sizeof(struct chcr_authenc_ctx),
  3684. },
  3685. .ivsize = CTR_RFC3686_IV_SIZE,
  3686. .maxauthsize = SHA256_DIGEST_SIZE,
  3687. .setkey = chcr_authenc_setkey,
  3688. .setauthsize = chcr_authenc_setauthsize,
  3689. }
  3690. },
  3691. {
  3692. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3693. .is_registered = 0,
  3694. .alg.aead = {
  3695. .base = {
  3696. .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3697. .cra_driver_name =
  3698. "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
  3699. .cra_blocksize = 1,
  3700. .cra_priority = CHCR_AEAD_PRIORITY,
  3701. .cra_ctxsize = sizeof(struct chcr_context) +
  3702. sizeof(struct chcr_aead_ctx) +
  3703. sizeof(struct chcr_authenc_ctx),
  3704. },
  3705. .ivsize = CTR_RFC3686_IV_SIZE,
  3706. .maxauthsize = SHA224_DIGEST_SIZE,
  3707. .setkey = chcr_authenc_setkey,
  3708. .setauthsize = chcr_authenc_setauthsize,
  3709. }
  3710. },
  3711. {
  3712. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3713. .is_registered = 0,
  3714. .alg.aead = {
  3715. .base = {
  3716. .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3717. .cra_driver_name =
  3718. "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
  3719. .cra_blocksize = 1,
  3720. .cra_priority = CHCR_AEAD_PRIORITY,
  3721. .cra_ctxsize = sizeof(struct chcr_context) +
  3722. sizeof(struct chcr_aead_ctx) +
  3723. sizeof(struct chcr_authenc_ctx),
  3724. },
  3725. .ivsize = CTR_RFC3686_IV_SIZE,
  3726. .maxauthsize = SHA384_DIGEST_SIZE,
  3727. .setkey = chcr_authenc_setkey,
  3728. .setauthsize = chcr_authenc_setauthsize,
  3729. }
  3730. },
  3731. {
  3732. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3733. .is_registered = 0,
  3734. .alg.aead = {
  3735. .base = {
  3736. .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3737. .cra_driver_name =
  3738. "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
  3739. .cra_blocksize = 1,
  3740. .cra_priority = CHCR_AEAD_PRIORITY,
  3741. .cra_ctxsize = sizeof(struct chcr_context) +
  3742. sizeof(struct chcr_aead_ctx) +
  3743. sizeof(struct chcr_authenc_ctx),
  3744. },
  3745. .ivsize = CTR_RFC3686_IV_SIZE,
  3746. .maxauthsize = SHA512_DIGEST_SIZE,
  3747. .setkey = chcr_authenc_setkey,
  3748. .setauthsize = chcr_authenc_setauthsize,
  3749. }
  3750. },
  3751. {
  3752. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
  3753. .is_registered = 0,
  3754. .alg.aead = {
  3755. .base = {
  3756. .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
  3757. .cra_driver_name =
  3758. "authenc-digest_null-rfc3686-ctr-aes-chcr",
  3759. .cra_blocksize = 1,
  3760. .cra_priority = CHCR_AEAD_PRIORITY,
  3761. .cra_ctxsize = sizeof(struct chcr_context) +
  3762. sizeof(struct chcr_aead_ctx) +
  3763. sizeof(struct chcr_authenc_ctx),
  3764. },
  3765. .ivsize = CTR_RFC3686_IV_SIZE,
  3766. .maxauthsize = 0,
  3767. .setkey = chcr_aead_digest_null_setkey,
  3768. .setauthsize = chcr_authenc_null_setauthsize,
  3769. }
  3770. },
  3771. };
  3772. /*
  3773. * chcr_unregister_alg - Deregister crypto algorithms with
  3774. * kernel framework.
  3775. */
  3776. static int chcr_unregister_alg(void)
  3777. {
  3778. int i;
  3779. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3780. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3781. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3782. if (driver_algs[i].is_registered)
  3783. crypto_unregister_alg(
  3784. &driver_algs[i].alg.crypto);
  3785. break;
  3786. case CRYPTO_ALG_TYPE_AEAD:
  3787. if (driver_algs[i].is_registered)
  3788. crypto_unregister_aead(
  3789. &driver_algs[i].alg.aead);
  3790. break;
  3791. case CRYPTO_ALG_TYPE_AHASH:
  3792. if (driver_algs[i].is_registered)
  3793. crypto_unregister_ahash(
  3794. &driver_algs[i].alg.hash);
  3795. break;
  3796. }
  3797. driver_algs[i].is_registered = 0;
  3798. }
  3799. return 0;
  3800. }
  3801. #define SZ_AHASH_CTX sizeof(struct chcr_context)
  3802. #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
  3803. #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
  3804. /*
  3805. * chcr_register_alg - Register crypto algorithms with kernel framework.
  3806. */
  3807. static int chcr_register_alg(void)
  3808. {
  3809. struct crypto_alg ai;
  3810. struct ahash_alg *a_hash;
  3811. int err = 0, i;
  3812. char *name = NULL;
  3813. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3814. if (driver_algs[i].is_registered)
  3815. continue;
  3816. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3817. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3818. driver_algs[i].alg.crypto.cra_priority =
  3819. CHCR_CRA_PRIORITY;
  3820. driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
  3821. driver_algs[i].alg.crypto.cra_flags =
  3822. CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  3823. CRYPTO_ALG_NEED_FALLBACK;
  3824. driver_algs[i].alg.crypto.cra_ctxsize =
  3825. sizeof(struct chcr_context) +
  3826. sizeof(struct ablk_ctx);
  3827. driver_algs[i].alg.crypto.cra_alignmask = 0;
  3828. driver_algs[i].alg.crypto.cra_type =
  3829. &crypto_ablkcipher_type;
  3830. err = crypto_register_alg(&driver_algs[i].alg.crypto);
  3831. name = driver_algs[i].alg.crypto.cra_driver_name;
  3832. break;
  3833. case CRYPTO_ALG_TYPE_AEAD:
  3834. driver_algs[i].alg.aead.base.cra_flags =
  3835. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
  3836. driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
  3837. driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
  3838. driver_algs[i].alg.aead.init = chcr_aead_cra_init;
  3839. driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
  3840. driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
  3841. err = crypto_register_aead(&driver_algs[i].alg.aead);
  3842. name = driver_algs[i].alg.aead.base.cra_driver_name;
  3843. break;
  3844. case CRYPTO_ALG_TYPE_AHASH:
  3845. a_hash = &driver_algs[i].alg.hash;
  3846. a_hash->update = chcr_ahash_update;
  3847. a_hash->final = chcr_ahash_final;
  3848. a_hash->finup = chcr_ahash_finup;
  3849. a_hash->digest = chcr_ahash_digest;
  3850. a_hash->export = chcr_ahash_export;
  3851. a_hash->import = chcr_ahash_import;
  3852. a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
  3853. a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
  3854. a_hash->halg.base.cra_module = THIS_MODULE;
  3855. a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
  3856. a_hash->halg.base.cra_alignmask = 0;
  3857. a_hash->halg.base.cra_exit = NULL;
  3858. if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
  3859. a_hash->halg.base.cra_init = chcr_hmac_cra_init;
  3860. a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
  3861. a_hash->init = chcr_hmac_init;
  3862. a_hash->setkey = chcr_ahash_setkey;
  3863. a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
  3864. } else {
  3865. a_hash->init = chcr_sha_init;
  3866. a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
  3867. a_hash->halg.base.cra_init = chcr_sha_cra_init;
  3868. }
  3869. err = crypto_register_ahash(&driver_algs[i].alg.hash);
  3870. ai = driver_algs[i].alg.hash.halg.base;
  3871. name = ai.cra_driver_name;
  3872. break;
  3873. }
  3874. if (err) {
  3875. pr_err("chcr : %s : Algorithm registration failed\n",
  3876. name);
  3877. goto register_err;
  3878. } else {
  3879. driver_algs[i].is_registered = 1;
  3880. }
  3881. }
  3882. return 0;
  3883. register_err:
  3884. chcr_unregister_alg();
  3885. return err;
  3886. }
  3887. /*
  3888. * start_crypto - Register the crypto algorithms.
  3889. * This should called once when the first device comesup. After this
  3890. * kernel will start calling driver APIs for crypto operations.
  3891. */
  3892. int start_crypto(void)
  3893. {
  3894. return chcr_register_alg();
  3895. }
  3896. /*
  3897. * stop_crypto - Deregister all the crypto algorithms with kernel.
  3898. * This should be called once when the last device goes down. After this
  3899. * kernel will not call the driver API for crypto operations.
  3900. */
  3901. int stop_crypto(void)
  3902. {
  3903. chcr_unregister_alg();
  3904. return 0;
  3905. }