caamalg.c 128 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. CTR_RFC3686_NONCE_SIZE + \
  61. SHA512_DIGEST_SIZE * 2)
  62. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  63. #define CAAM_MAX_IV_LENGTH 16
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  68. CAAM_CMD_SZ * 5)
  69. /* length of descriptors text */
  70. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  71. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
  72. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
  73. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
  74. /* Note: Nonce is counted in enckeylen */
  75. #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
  76. #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
  77. #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
  78. #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
  79. #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
  80. #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
  81. #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
  82. #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
  83. #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  84. #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  85. #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
  86. #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
  87. #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
  88. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  89. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  90. 20 * CAAM_CMD_SZ)
  91. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  92. 15 * CAAM_CMD_SZ)
  93. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  94. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  95. #ifdef DEBUG
  96. /* for print_hex_dumps with line references */
  97. #define debug(format, arg...) printk(format, arg)
  98. #else
  99. #define debug(format, arg...)
  100. #endif
  101. static struct list_head alg_list;
  102. struct caam_alg_entry {
  103. int class1_alg_type;
  104. int class2_alg_type;
  105. int alg_op;
  106. bool rfc3686;
  107. bool geniv;
  108. };
  109. struct caam_aead_alg {
  110. struct aead_alg aead;
  111. struct caam_alg_entry caam;
  112. bool registered;
  113. };
  114. /* Set DK bit in class 1 operation if shared */
  115. static inline void append_dec_op1(u32 *desc, u32 type)
  116. {
  117. u32 *jump_cmd, *uncond_jump_cmd;
  118. /* DK bit is valid only for AES */
  119. if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
  120. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  121. OP_ALG_DECRYPT);
  122. return;
  123. }
  124. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  125. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  126. OP_ALG_DECRYPT);
  127. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  128. set_jump_tgt_here(desc, jump_cmd);
  129. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  130. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  131. set_jump_tgt_here(desc, uncond_jump_cmd);
  132. }
  133. /*
  134. * For aead functions, read payload and write payload,
  135. * both of which are specified in req->src and req->dst
  136. */
  137. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  138. {
  139. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  140. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  141. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  142. }
  143. /*
  144. * For ablkcipher encrypt and decrypt, read from req->src and
  145. * write to req->dst
  146. */
  147. static inline void ablkcipher_append_src_dst(u32 *desc)
  148. {
  149. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  150. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  151. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  152. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  153. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  154. }
  155. /*
  156. * per-session context
  157. */
  158. struct caam_ctx {
  159. struct device *jrdev;
  160. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  161. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  162. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  163. dma_addr_t sh_desc_enc_dma;
  164. dma_addr_t sh_desc_dec_dma;
  165. dma_addr_t sh_desc_givenc_dma;
  166. u32 class1_alg_type;
  167. u32 class2_alg_type;
  168. u32 alg_op;
  169. u8 key[CAAM_MAX_KEY_SIZE];
  170. dma_addr_t key_dma;
  171. unsigned int enckeylen;
  172. unsigned int split_key_len;
  173. unsigned int split_key_pad_len;
  174. unsigned int authsize;
  175. };
  176. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  177. int keys_fit_inline, bool is_rfc3686)
  178. {
  179. u32 *nonce;
  180. unsigned int enckeylen = ctx->enckeylen;
  181. /*
  182. * RFC3686 specific:
  183. * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
  184. * | enckeylen = encryption key size + nonce size
  185. */
  186. if (is_rfc3686)
  187. enckeylen -= CTR_RFC3686_NONCE_SIZE;
  188. if (keys_fit_inline) {
  189. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  190. ctx->split_key_len, CLASS_2 |
  191. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  192. append_key_as_imm(desc, (void *)ctx->key +
  193. ctx->split_key_pad_len, enckeylen,
  194. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  195. } else {
  196. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  197. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  198. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  199. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  200. }
  201. /* Load Counter into CONTEXT1 reg */
  202. if (is_rfc3686) {
  203. nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
  204. enckeylen);
  205. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  206. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  207. append_move(desc,
  208. MOVE_SRC_OUTFIFO |
  209. MOVE_DEST_CLASS1CTX |
  210. (16 << MOVE_OFFSET_SHIFT) |
  211. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  212. }
  213. }
  214. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  215. int keys_fit_inline, bool is_rfc3686)
  216. {
  217. u32 *key_jump_cmd;
  218. /* Note: Context registers are saved. */
  219. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  220. /* Skip if already shared */
  221. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  222. JUMP_COND_SHRD);
  223. append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  224. set_jump_tgt_here(desc, key_jump_cmd);
  225. }
  226. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  227. {
  228. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  229. struct device *jrdev = ctx->jrdev;
  230. bool keys_fit_inline = false;
  231. u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
  232. u32 *desc;
  233. /*
  234. * Job Descriptor and Shared Descriptors
  235. * must all fit into the 64-word Descriptor h/w Buffer
  236. */
  237. if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
  238. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  239. keys_fit_inline = true;
  240. /* aead_encrypt shared descriptor */
  241. desc = ctx->sh_desc_enc;
  242. init_sh_desc(desc, HDR_SHARE_SERIAL);
  243. /* Skip if already shared */
  244. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  245. JUMP_COND_SHRD);
  246. if (keys_fit_inline)
  247. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  248. ctx->split_key_len, CLASS_2 |
  249. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  250. else
  251. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  252. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  253. set_jump_tgt_here(desc, key_jump_cmd);
  254. /* assoclen + cryptlen = seqinlen */
  255. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  256. /* Prepare to read and write cryptlen + assoclen bytes */
  257. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  258. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  259. /*
  260. * MOVE_LEN opcode is not available in all SEC HW revisions,
  261. * thus need to do some magic, i.e. self-patch the descriptor
  262. * buffer.
  263. */
  264. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  265. MOVE_DEST_MATH3 |
  266. (0x6 << MOVE_LEN_SHIFT));
  267. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
  268. MOVE_DEST_DESCBUF |
  269. MOVE_WAITCOMP |
  270. (0x8 << MOVE_LEN_SHIFT));
  271. /* Class 2 operation */
  272. append_operation(desc, ctx->class2_alg_type |
  273. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  274. /* Read and write cryptlen bytes */
  275. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  276. set_move_tgt_here(desc, read_move_cmd);
  277. set_move_tgt_here(desc, write_move_cmd);
  278. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  279. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  280. MOVE_AUX_LS);
  281. /* Write ICV */
  282. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  283. LDST_SRCDST_BYTE_CONTEXT);
  284. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  285. desc_bytes(desc),
  286. DMA_TO_DEVICE);
  287. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  288. dev_err(jrdev, "unable to map shared descriptor\n");
  289. return -ENOMEM;
  290. }
  291. #ifdef DEBUG
  292. print_hex_dump(KERN_ERR,
  293. "aead null enc shdesc@"__stringify(__LINE__)": ",
  294. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  295. desc_bytes(desc), 1);
  296. #endif
  297. /*
  298. * Job Descriptor and Shared Descriptors
  299. * must all fit into the 64-word Descriptor h/w Buffer
  300. */
  301. keys_fit_inline = false;
  302. if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
  303. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  304. keys_fit_inline = true;
  305. desc = ctx->sh_desc_dec;
  306. /* aead_decrypt shared descriptor */
  307. init_sh_desc(desc, HDR_SHARE_SERIAL);
  308. /* Skip if already shared */
  309. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  310. JUMP_COND_SHRD);
  311. if (keys_fit_inline)
  312. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  313. ctx->split_key_len, CLASS_2 |
  314. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  315. else
  316. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  317. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  318. set_jump_tgt_here(desc, key_jump_cmd);
  319. /* Class 2 operation */
  320. append_operation(desc, ctx->class2_alg_type |
  321. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  322. /* assoclen + cryptlen = seqoutlen */
  323. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  324. /* Prepare to read and write cryptlen + assoclen bytes */
  325. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  326. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  327. /*
  328. * MOVE_LEN opcode is not available in all SEC HW revisions,
  329. * thus need to do some magic, i.e. self-patch the descriptor
  330. * buffer.
  331. */
  332. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  333. MOVE_DEST_MATH2 |
  334. (0x6 << MOVE_LEN_SHIFT));
  335. write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
  336. MOVE_DEST_DESCBUF |
  337. MOVE_WAITCOMP |
  338. (0x8 << MOVE_LEN_SHIFT));
  339. /* Read and write cryptlen bytes */
  340. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  341. /*
  342. * Insert a NOP here, since we need at least 4 instructions between
  343. * code patching the descriptor buffer and the location being patched.
  344. */
  345. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  346. set_jump_tgt_here(desc, jump_cmd);
  347. set_move_tgt_here(desc, read_move_cmd);
  348. set_move_tgt_here(desc, write_move_cmd);
  349. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  350. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  351. MOVE_AUX_LS);
  352. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  353. /* Load ICV */
  354. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  355. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  356. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  357. desc_bytes(desc),
  358. DMA_TO_DEVICE);
  359. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  360. dev_err(jrdev, "unable to map shared descriptor\n");
  361. return -ENOMEM;
  362. }
  363. #ifdef DEBUG
  364. print_hex_dump(KERN_ERR,
  365. "aead null dec shdesc@"__stringify(__LINE__)": ",
  366. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  367. desc_bytes(desc), 1);
  368. #endif
  369. return 0;
  370. }
  371. static int aead_set_sh_desc(struct crypto_aead *aead)
  372. {
  373. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  374. struct caam_aead_alg, aead);
  375. unsigned int ivsize = crypto_aead_ivsize(aead);
  376. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  377. struct device *jrdev = ctx->jrdev;
  378. bool keys_fit_inline;
  379. u32 geniv, moveiv;
  380. u32 ctx1_iv_off = 0;
  381. u32 *desc;
  382. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  383. OP_ALG_AAI_CTR_MOD128);
  384. const bool is_rfc3686 = alg->caam.rfc3686;
  385. if (!ctx->authsize)
  386. return 0;
  387. /* NULL encryption / decryption */
  388. if (!ctx->enckeylen)
  389. return aead_null_set_sh_desc(aead);
  390. /*
  391. * AES-CTR needs to load IV in CONTEXT1 reg
  392. * at an offset of 128bits (16bytes)
  393. * CONTEXT1[255:128] = IV
  394. */
  395. if (ctr_mode)
  396. ctx1_iv_off = 16;
  397. /*
  398. * RFC3686 specific:
  399. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  400. */
  401. if (is_rfc3686)
  402. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  403. if (alg->caam.geniv)
  404. goto skip_enc;
  405. /*
  406. * Job Descriptor and Shared Descriptors
  407. * must all fit into the 64-word Descriptor h/w Buffer
  408. */
  409. keys_fit_inline = false;
  410. if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  411. ctx->split_key_pad_len + ctx->enckeylen +
  412. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  413. CAAM_DESC_BYTES_MAX)
  414. keys_fit_inline = true;
  415. /* aead_encrypt shared descriptor */
  416. desc = ctx->sh_desc_enc;
  417. /* Note: Context registers are saved. */
  418. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  419. /* Class 2 operation */
  420. append_operation(desc, ctx->class2_alg_type |
  421. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  422. /* Read and write assoclen bytes */
  423. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  424. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  425. /* Skip assoc data */
  426. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  427. /* read assoc before reading payload */
  428. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  429. FIFOLDST_VLF);
  430. /* Load Counter into CONTEXT1 reg */
  431. if (is_rfc3686)
  432. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  433. LDST_CLASS_1_CCB |
  434. LDST_SRCDST_BYTE_CONTEXT |
  435. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  436. LDST_OFFSET_SHIFT));
  437. /* Class 1 operation */
  438. append_operation(desc, ctx->class1_alg_type |
  439. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  440. /* Read and write cryptlen bytes */
  441. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  442. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  443. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  444. /* Write ICV */
  445. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  446. LDST_SRCDST_BYTE_CONTEXT);
  447. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  448. desc_bytes(desc),
  449. DMA_TO_DEVICE);
  450. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  451. dev_err(jrdev, "unable to map shared descriptor\n");
  452. return -ENOMEM;
  453. }
  454. #ifdef DEBUG
  455. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  456. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  457. desc_bytes(desc), 1);
  458. #endif
  459. skip_enc:
  460. /*
  461. * Job Descriptor and Shared Descriptors
  462. * must all fit into the 64-word Descriptor h/w Buffer
  463. */
  464. keys_fit_inline = false;
  465. if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  466. ctx->split_key_pad_len + ctx->enckeylen +
  467. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  468. CAAM_DESC_BYTES_MAX)
  469. keys_fit_inline = true;
  470. /* aead_decrypt shared descriptor */
  471. desc = ctx->sh_desc_dec;
  472. /* Note: Context registers are saved. */
  473. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  474. /* Class 2 operation */
  475. append_operation(desc, ctx->class2_alg_type |
  476. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  477. /* Read and write assoclen bytes */
  478. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  479. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  480. /* Skip assoc data */
  481. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  482. /* read assoc before reading payload */
  483. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  484. KEY_VLF);
  485. /* Load Counter into CONTEXT1 reg */
  486. if (is_rfc3686)
  487. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  488. LDST_CLASS_1_CCB |
  489. LDST_SRCDST_BYTE_CONTEXT |
  490. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  491. LDST_OFFSET_SHIFT));
  492. /* Choose operation */
  493. if (ctr_mode)
  494. append_operation(desc, ctx->class1_alg_type |
  495. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  496. else
  497. append_dec_op1(desc, ctx->class1_alg_type);
  498. /* Read and write cryptlen bytes */
  499. append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  500. append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  501. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  502. /* Load ICV */
  503. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  504. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  505. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  506. desc_bytes(desc),
  507. DMA_TO_DEVICE);
  508. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  509. dev_err(jrdev, "unable to map shared descriptor\n");
  510. return -ENOMEM;
  511. }
  512. #ifdef DEBUG
  513. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  514. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  515. desc_bytes(desc), 1);
  516. #endif
  517. if (!alg->caam.geniv)
  518. goto skip_givenc;
  519. /*
  520. * Job Descriptor and Shared Descriptors
  521. * must all fit into the 64-word Descriptor h/w Buffer
  522. */
  523. keys_fit_inline = false;
  524. if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  525. ctx->split_key_pad_len + ctx->enckeylen +
  526. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  527. CAAM_DESC_BYTES_MAX)
  528. keys_fit_inline = true;
  529. /* aead_givencrypt shared descriptor */
  530. desc = ctx->sh_desc_enc;
  531. /* Note: Context registers are saved. */
  532. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  533. if (is_rfc3686)
  534. goto copy_iv;
  535. /* Generate IV */
  536. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  537. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  538. NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  539. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  540. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  541. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  542. append_move(desc, MOVE_WAITCOMP |
  543. MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
  544. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  545. (ivsize << MOVE_LEN_SHIFT));
  546. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  547. copy_iv:
  548. /* Copy IV to class 1 context */
  549. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
  550. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  551. (ivsize << MOVE_LEN_SHIFT));
  552. /* Return to encryption */
  553. append_operation(desc, ctx->class2_alg_type |
  554. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  555. /* Read and write assoclen bytes */
  556. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  557. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  558. /* ivsize + cryptlen = seqoutlen - authsize */
  559. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  560. /* Skip assoc data */
  561. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  562. /* read assoc before reading payload */
  563. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  564. KEY_VLF);
  565. /* Copy iv from outfifo to class 2 fifo */
  566. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  567. NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  568. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  569. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  570. append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
  571. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  572. /* Load Counter into CONTEXT1 reg */
  573. if (is_rfc3686)
  574. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  575. LDST_CLASS_1_CCB |
  576. LDST_SRCDST_BYTE_CONTEXT |
  577. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  578. LDST_OFFSET_SHIFT));
  579. /* Class 1 operation */
  580. append_operation(desc, ctx->class1_alg_type |
  581. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  582. /* Will write ivsize + cryptlen */
  583. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  584. /* Not need to reload iv */
  585. append_seq_fifo_load(desc, ivsize,
  586. FIFOLD_CLASS_SKIP);
  587. /* Will read cryptlen */
  588. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  589. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  590. /* Write ICV */
  591. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  592. LDST_SRCDST_BYTE_CONTEXT);
  593. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  594. desc_bytes(desc),
  595. DMA_TO_DEVICE);
  596. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  597. dev_err(jrdev, "unable to map shared descriptor\n");
  598. return -ENOMEM;
  599. }
  600. #ifdef DEBUG
  601. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  602. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  603. desc_bytes(desc), 1);
  604. #endif
  605. skip_givenc:
  606. return 0;
  607. }
  608. static int aead_setauthsize(struct crypto_aead *authenc,
  609. unsigned int authsize)
  610. {
  611. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  612. ctx->authsize = authsize;
  613. aead_set_sh_desc(authenc);
  614. return 0;
  615. }
  616. static int gcm_set_sh_desc(struct crypto_aead *aead)
  617. {
  618. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  619. struct device *jrdev = ctx->jrdev;
  620. bool keys_fit_inline = false;
  621. u32 *key_jump_cmd, *zero_payload_jump_cmd,
  622. *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
  623. u32 *desc;
  624. if (!ctx->enckeylen || !ctx->authsize)
  625. return 0;
  626. /*
  627. * AES GCM encrypt shared descriptor
  628. * Job Descriptor and Shared Descriptor
  629. * must fit into the 64-word Descriptor h/w Buffer
  630. */
  631. if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  632. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  633. keys_fit_inline = true;
  634. desc = ctx->sh_desc_enc;
  635. init_sh_desc(desc, HDR_SHARE_SERIAL);
  636. /* skip key loading if they are loaded due to sharing */
  637. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  638. JUMP_COND_SHRD | JUMP_COND_SELF);
  639. if (keys_fit_inline)
  640. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  641. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  642. else
  643. append_key(desc, ctx->key_dma, ctx->enckeylen,
  644. CLASS_1 | KEY_DEST_CLASS_REG);
  645. set_jump_tgt_here(desc, key_jump_cmd);
  646. /* class 1 operation */
  647. append_operation(desc, ctx->class1_alg_type |
  648. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  649. /* if assoclen + cryptlen is ZERO, skip to ICV write */
  650. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  651. zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
  652. JUMP_COND_MATH_Z);
  653. /* if assoclen is ZERO, skip reading the assoc data */
  654. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  655. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  656. JUMP_COND_MATH_Z);
  657. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  658. /* skip assoc data */
  659. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  660. /* cryptlen = seqinlen - assoclen */
  661. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  662. /* if cryptlen is ZERO jump to zero-payload commands */
  663. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  664. JUMP_COND_MATH_Z);
  665. /* read assoc data */
  666. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  667. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  668. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  669. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  670. /* write encrypted data */
  671. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  672. /* read payload data */
  673. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  674. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  675. /* jump the zero-payload commands */
  676. append_jump(desc, JUMP_TEST_ALL | 2);
  677. /* zero-payload commands */
  678. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  679. /* read assoc data */
  680. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  681. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
  682. /* There is no input data */
  683. set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
  684. /* write ICV */
  685. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  686. LDST_SRCDST_BYTE_CONTEXT);
  687. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  688. desc_bytes(desc),
  689. DMA_TO_DEVICE);
  690. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  691. dev_err(jrdev, "unable to map shared descriptor\n");
  692. return -ENOMEM;
  693. }
  694. #ifdef DEBUG
  695. print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
  696. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  697. desc_bytes(desc), 1);
  698. #endif
  699. /*
  700. * Job Descriptor and Shared Descriptors
  701. * must all fit into the 64-word Descriptor h/w Buffer
  702. */
  703. keys_fit_inline = false;
  704. if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  705. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  706. keys_fit_inline = true;
  707. desc = ctx->sh_desc_dec;
  708. init_sh_desc(desc, HDR_SHARE_SERIAL);
  709. /* skip key loading if they are loaded due to sharing */
  710. key_jump_cmd = append_jump(desc, JUMP_JSL |
  711. JUMP_TEST_ALL | JUMP_COND_SHRD |
  712. JUMP_COND_SELF);
  713. if (keys_fit_inline)
  714. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  715. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  716. else
  717. append_key(desc, ctx->key_dma, ctx->enckeylen,
  718. CLASS_1 | KEY_DEST_CLASS_REG);
  719. set_jump_tgt_here(desc, key_jump_cmd);
  720. /* class 1 operation */
  721. append_operation(desc, ctx->class1_alg_type |
  722. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  723. /* if assoclen is ZERO, skip reading the assoc data */
  724. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  725. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  726. JUMP_COND_MATH_Z);
  727. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  728. /* skip assoc data */
  729. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  730. /* read assoc data */
  731. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  732. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  733. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  734. /* cryptlen = seqoutlen - assoclen */
  735. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  736. /* jump to zero-payload command if cryptlen is zero */
  737. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  738. JUMP_COND_MATH_Z);
  739. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  740. /* store encrypted data */
  741. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  742. /* read payload data */
  743. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  744. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  745. /* zero-payload command */
  746. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  747. /* read ICV */
  748. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  749. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  750. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  751. desc_bytes(desc),
  752. DMA_TO_DEVICE);
  753. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  754. dev_err(jrdev, "unable to map shared descriptor\n");
  755. return -ENOMEM;
  756. }
  757. #ifdef DEBUG
  758. print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
  759. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  760. desc_bytes(desc), 1);
  761. #endif
  762. return 0;
  763. }
  764. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  765. {
  766. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  767. ctx->authsize = authsize;
  768. gcm_set_sh_desc(authenc);
  769. return 0;
  770. }
  771. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  772. {
  773. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  774. struct device *jrdev = ctx->jrdev;
  775. bool keys_fit_inline = false;
  776. u32 *key_jump_cmd;
  777. u32 *desc;
  778. if (!ctx->enckeylen || !ctx->authsize)
  779. return 0;
  780. /*
  781. * RFC4106 encrypt shared descriptor
  782. * Job Descriptor and Shared Descriptor
  783. * must fit into the 64-word Descriptor h/w Buffer
  784. */
  785. if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  786. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  787. keys_fit_inline = true;
  788. desc = ctx->sh_desc_enc;
  789. init_sh_desc(desc, HDR_SHARE_SERIAL);
  790. /* Skip key loading if it is loaded due to sharing */
  791. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  792. JUMP_COND_SHRD);
  793. if (keys_fit_inline)
  794. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  795. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  796. else
  797. append_key(desc, ctx->key_dma, ctx->enckeylen,
  798. CLASS_1 | KEY_DEST_CLASS_REG);
  799. set_jump_tgt_here(desc, key_jump_cmd);
  800. /* Class 1 operation */
  801. append_operation(desc, ctx->class1_alg_type |
  802. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  803. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  804. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  805. /* Read assoc data */
  806. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  807. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  808. /* Skip IV */
  809. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  810. /* Will read cryptlen bytes */
  811. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  812. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  813. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  814. /* Skip assoc data */
  815. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  816. /* cryptlen = seqoutlen - assoclen */
  817. append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
  818. /* Write encrypted data */
  819. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  820. /* Read payload data */
  821. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  822. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  823. /* Write ICV */
  824. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  825. LDST_SRCDST_BYTE_CONTEXT);
  826. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  827. desc_bytes(desc),
  828. DMA_TO_DEVICE);
  829. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  830. dev_err(jrdev, "unable to map shared descriptor\n");
  831. return -ENOMEM;
  832. }
  833. #ifdef DEBUG
  834. print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
  835. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  836. desc_bytes(desc), 1);
  837. #endif
  838. /*
  839. * Job Descriptor and Shared Descriptors
  840. * must all fit into the 64-word Descriptor h/w Buffer
  841. */
  842. keys_fit_inline = false;
  843. if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
  844. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  845. keys_fit_inline = true;
  846. desc = ctx->sh_desc_dec;
  847. init_sh_desc(desc, HDR_SHARE_SERIAL);
  848. /* Skip key loading if it is loaded due to sharing */
  849. key_jump_cmd = append_jump(desc, JUMP_JSL |
  850. JUMP_TEST_ALL | JUMP_COND_SHRD);
  851. if (keys_fit_inline)
  852. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  853. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  854. else
  855. append_key(desc, ctx->key_dma, ctx->enckeylen,
  856. CLASS_1 | KEY_DEST_CLASS_REG);
  857. set_jump_tgt_here(desc, key_jump_cmd);
  858. /* Class 1 operation */
  859. append_operation(desc, ctx->class1_alg_type |
  860. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  861. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  862. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  863. /* Read assoc data */
  864. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  865. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  866. /* Skip IV */
  867. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  868. /* Will read cryptlen bytes */
  869. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
  870. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  871. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  872. /* Skip assoc data */
  873. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  874. /* Will write cryptlen bytes */
  875. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  876. /* Store payload data */
  877. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  878. /* Read encrypted data */
  879. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  880. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  881. /* Read ICV */
  882. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  883. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  884. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  885. desc_bytes(desc),
  886. DMA_TO_DEVICE);
  887. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  888. dev_err(jrdev, "unable to map shared descriptor\n");
  889. return -ENOMEM;
  890. }
  891. #ifdef DEBUG
  892. print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
  893. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  894. desc_bytes(desc), 1);
  895. #endif
  896. return 0;
  897. }
  898. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  899. unsigned int authsize)
  900. {
  901. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  902. ctx->authsize = authsize;
  903. rfc4106_set_sh_desc(authenc);
  904. return 0;
  905. }
  906. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  907. {
  908. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  909. struct device *jrdev = ctx->jrdev;
  910. bool keys_fit_inline = false;
  911. u32 *key_jump_cmd;
  912. u32 *read_move_cmd, *write_move_cmd;
  913. u32 *desc;
  914. if (!ctx->enckeylen || !ctx->authsize)
  915. return 0;
  916. /*
  917. * RFC4543 encrypt shared descriptor
  918. * Job Descriptor and Shared Descriptor
  919. * must fit into the 64-word Descriptor h/w Buffer
  920. */
  921. if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  922. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  923. keys_fit_inline = true;
  924. desc = ctx->sh_desc_enc;
  925. init_sh_desc(desc, HDR_SHARE_SERIAL);
  926. /* Skip key loading if it is loaded due to sharing */
  927. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  928. JUMP_COND_SHRD);
  929. if (keys_fit_inline)
  930. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  931. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  932. else
  933. append_key(desc, ctx->key_dma, ctx->enckeylen,
  934. CLASS_1 | KEY_DEST_CLASS_REG);
  935. set_jump_tgt_here(desc, key_jump_cmd);
  936. /* Class 1 operation */
  937. append_operation(desc, ctx->class1_alg_type |
  938. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  939. /* assoclen + cryptlen = seqinlen */
  940. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  941. /*
  942. * MOVE_LEN opcode is not available in all SEC HW revisions,
  943. * thus need to do some magic, i.e. self-patch the descriptor
  944. * buffer.
  945. */
  946. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  947. (0x6 << MOVE_LEN_SHIFT));
  948. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  949. (0x8 << MOVE_LEN_SHIFT));
  950. /* Will read assoclen + cryptlen bytes */
  951. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  952. /* Will write assoclen + cryptlen bytes */
  953. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  954. /* Read and write assoclen + cryptlen bytes */
  955. aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
  956. set_move_tgt_here(desc, read_move_cmd);
  957. set_move_tgt_here(desc, write_move_cmd);
  958. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  959. /* Move payload data to OFIFO */
  960. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  961. /* Write ICV */
  962. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  963. LDST_SRCDST_BYTE_CONTEXT);
  964. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  965. desc_bytes(desc),
  966. DMA_TO_DEVICE);
  967. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  968. dev_err(jrdev, "unable to map shared descriptor\n");
  969. return -ENOMEM;
  970. }
  971. #ifdef DEBUG
  972. print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
  973. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  974. desc_bytes(desc), 1);
  975. #endif
  976. /*
  977. * Job Descriptor and Shared Descriptors
  978. * must all fit into the 64-word Descriptor h/w Buffer
  979. */
  980. keys_fit_inline = false;
  981. if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  982. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  983. keys_fit_inline = true;
  984. desc = ctx->sh_desc_dec;
  985. init_sh_desc(desc, HDR_SHARE_SERIAL);
  986. /* Skip key loading if it is loaded due to sharing */
  987. key_jump_cmd = append_jump(desc, JUMP_JSL |
  988. JUMP_TEST_ALL | JUMP_COND_SHRD);
  989. if (keys_fit_inline)
  990. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  991. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  992. else
  993. append_key(desc, ctx->key_dma, ctx->enckeylen,
  994. CLASS_1 | KEY_DEST_CLASS_REG);
  995. set_jump_tgt_here(desc, key_jump_cmd);
  996. /* Class 1 operation */
  997. append_operation(desc, ctx->class1_alg_type |
  998. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  999. /* assoclen + cryptlen = seqoutlen */
  1000. append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1001. /*
  1002. * MOVE_LEN opcode is not available in all SEC HW revisions,
  1003. * thus need to do some magic, i.e. self-patch the descriptor
  1004. * buffer.
  1005. */
  1006. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  1007. (0x6 << MOVE_LEN_SHIFT));
  1008. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  1009. (0x8 << MOVE_LEN_SHIFT));
  1010. /* Will read assoclen + cryptlen bytes */
  1011. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1012. /* Will write assoclen + cryptlen bytes */
  1013. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1014. /* Store payload data */
  1015. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  1016. /* In-snoop assoclen + cryptlen data */
  1017. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
  1018. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
  1019. set_move_tgt_here(desc, read_move_cmd);
  1020. set_move_tgt_here(desc, write_move_cmd);
  1021. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1022. /* Move payload data to OFIFO */
  1023. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  1024. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1025. /* Read ICV */
  1026. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  1027. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  1028. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1029. desc_bytes(desc),
  1030. DMA_TO_DEVICE);
  1031. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1032. dev_err(jrdev, "unable to map shared descriptor\n");
  1033. return -ENOMEM;
  1034. }
  1035. #ifdef DEBUG
  1036. print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
  1037. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1038. desc_bytes(desc), 1);
  1039. #endif
  1040. return 0;
  1041. }
  1042. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  1043. unsigned int authsize)
  1044. {
  1045. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  1046. ctx->authsize = authsize;
  1047. rfc4543_set_sh_desc(authenc);
  1048. return 0;
  1049. }
  1050. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  1051. u32 authkeylen)
  1052. {
  1053. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  1054. ctx->split_key_pad_len, key_in, authkeylen,
  1055. ctx->alg_op);
  1056. }
  1057. static int aead_setkey(struct crypto_aead *aead,
  1058. const u8 *key, unsigned int keylen)
  1059. {
  1060. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  1061. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  1062. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1063. struct device *jrdev = ctx->jrdev;
  1064. struct crypto_authenc_keys keys;
  1065. int ret = 0;
  1066. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1067. goto badkey;
  1068. /* Pick class 2 key length from algorithm submask */
  1069. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  1070. OP_ALG_ALGSEL_SHIFT] * 2;
  1071. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  1072. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  1073. goto badkey;
  1074. #ifdef DEBUG
  1075. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  1076. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  1077. keys.authkeylen);
  1078. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  1079. ctx->split_key_len, ctx->split_key_pad_len);
  1080. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1081. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1082. #endif
  1083. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  1084. if (ret) {
  1085. goto badkey;
  1086. }
  1087. /* postpend encryption key to auth split key */
  1088. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  1089. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  1090. keys.enckeylen, DMA_TO_DEVICE);
  1091. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1092. dev_err(jrdev, "unable to map key i/o memory\n");
  1093. return -ENOMEM;
  1094. }
  1095. #ifdef DEBUG
  1096. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  1097. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  1098. ctx->split_key_pad_len + keys.enckeylen, 1);
  1099. #endif
  1100. ctx->enckeylen = keys.enckeylen;
  1101. ret = aead_set_sh_desc(aead);
  1102. if (ret) {
  1103. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  1104. keys.enckeylen, DMA_TO_DEVICE);
  1105. }
  1106. return ret;
  1107. badkey:
  1108. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1109. return -EINVAL;
  1110. }
  1111. static int gcm_setkey(struct crypto_aead *aead,
  1112. const u8 *key, unsigned int keylen)
  1113. {
  1114. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1115. struct device *jrdev = ctx->jrdev;
  1116. int ret = 0;
  1117. #ifdef DEBUG
  1118. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1119. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1120. #endif
  1121. memcpy(ctx->key, key, keylen);
  1122. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1123. DMA_TO_DEVICE);
  1124. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1125. dev_err(jrdev, "unable to map key i/o memory\n");
  1126. return -ENOMEM;
  1127. }
  1128. ctx->enckeylen = keylen;
  1129. ret = gcm_set_sh_desc(aead);
  1130. if (ret) {
  1131. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1132. DMA_TO_DEVICE);
  1133. }
  1134. return ret;
  1135. }
  1136. static int rfc4106_setkey(struct crypto_aead *aead,
  1137. const u8 *key, unsigned int keylen)
  1138. {
  1139. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1140. struct device *jrdev = ctx->jrdev;
  1141. int ret = 0;
  1142. if (keylen < 4)
  1143. return -EINVAL;
  1144. #ifdef DEBUG
  1145. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1146. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1147. #endif
  1148. memcpy(ctx->key, key, keylen);
  1149. /*
  1150. * The last four bytes of the key material are used as the salt value
  1151. * in the nonce. Update the AES key length.
  1152. */
  1153. ctx->enckeylen = keylen - 4;
  1154. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1155. DMA_TO_DEVICE);
  1156. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1157. dev_err(jrdev, "unable to map key i/o memory\n");
  1158. return -ENOMEM;
  1159. }
  1160. ret = rfc4106_set_sh_desc(aead);
  1161. if (ret) {
  1162. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1163. DMA_TO_DEVICE);
  1164. }
  1165. return ret;
  1166. }
  1167. static int rfc4543_setkey(struct crypto_aead *aead,
  1168. const u8 *key, unsigned int keylen)
  1169. {
  1170. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1171. struct device *jrdev = ctx->jrdev;
  1172. int ret = 0;
  1173. if (keylen < 4)
  1174. return -EINVAL;
  1175. #ifdef DEBUG
  1176. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1177. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1178. #endif
  1179. memcpy(ctx->key, key, keylen);
  1180. /*
  1181. * The last four bytes of the key material are used as the salt value
  1182. * in the nonce. Update the AES key length.
  1183. */
  1184. ctx->enckeylen = keylen - 4;
  1185. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1186. DMA_TO_DEVICE);
  1187. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1188. dev_err(jrdev, "unable to map key i/o memory\n");
  1189. return -ENOMEM;
  1190. }
  1191. ret = rfc4543_set_sh_desc(aead);
  1192. if (ret) {
  1193. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1194. DMA_TO_DEVICE);
  1195. }
  1196. return ret;
  1197. }
  1198. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1199. const u8 *key, unsigned int keylen)
  1200. {
  1201. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1202. struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
  1203. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  1204. const char *alg_name = crypto_tfm_alg_name(tfm);
  1205. struct device *jrdev = ctx->jrdev;
  1206. int ret = 0;
  1207. u32 *key_jump_cmd;
  1208. u32 *desc;
  1209. u32 *nonce;
  1210. u32 geniv;
  1211. u32 ctx1_iv_off = 0;
  1212. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1213. OP_ALG_AAI_CTR_MOD128);
  1214. const bool is_rfc3686 = (ctr_mode &&
  1215. (strstr(alg_name, "rfc3686") != NULL));
  1216. #ifdef DEBUG
  1217. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1218. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1219. #endif
  1220. /*
  1221. * AES-CTR needs to load IV in CONTEXT1 reg
  1222. * at an offset of 128bits (16bytes)
  1223. * CONTEXT1[255:128] = IV
  1224. */
  1225. if (ctr_mode)
  1226. ctx1_iv_off = 16;
  1227. /*
  1228. * RFC3686 specific:
  1229. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1230. * | *key = {KEY, NONCE}
  1231. */
  1232. if (is_rfc3686) {
  1233. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  1234. keylen -= CTR_RFC3686_NONCE_SIZE;
  1235. }
  1236. memcpy(ctx->key, key, keylen);
  1237. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1238. DMA_TO_DEVICE);
  1239. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1240. dev_err(jrdev, "unable to map key i/o memory\n");
  1241. return -ENOMEM;
  1242. }
  1243. ctx->enckeylen = keylen;
  1244. /* ablkcipher_encrypt shared descriptor */
  1245. desc = ctx->sh_desc_enc;
  1246. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1247. /* Skip if already shared */
  1248. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1249. JUMP_COND_SHRD);
  1250. /* Load class1 key only */
  1251. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1252. ctx->enckeylen, CLASS_1 |
  1253. KEY_DEST_CLASS_REG);
  1254. /* Load nonce into CONTEXT1 reg */
  1255. if (is_rfc3686) {
  1256. nonce = (u32 *)(key + keylen);
  1257. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1258. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1259. append_move(desc, MOVE_WAITCOMP |
  1260. MOVE_SRC_OUTFIFO |
  1261. MOVE_DEST_CLASS1CTX |
  1262. (16 << MOVE_OFFSET_SHIFT) |
  1263. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1264. }
  1265. set_jump_tgt_here(desc, key_jump_cmd);
  1266. /* Load iv */
  1267. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1268. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1269. /* Load counter into CONTEXT1 reg */
  1270. if (is_rfc3686)
  1271. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1272. LDST_CLASS_1_CCB |
  1273. LDST_SRCDST_BYTE_CONTEXT |
  1274. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1275. LDST_OFFSET_SHIFT));
  1276. /* Load operation */
  1277. append_operation(desc, ctx->class1_alg_type |
  1278. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1279. /* Perform operation */
  1280. ablkcipher_append_src_dst(desc);
  1281. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  1282. desc_bytes(desc),
  1283. DMA_TO_DEVICE);
  1284. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1285. dev_err(jrdev, "unable to map shared descriptor\n");
  1286. return -ENOMEM;
  1287. }
  1288. #ifdef DEBUG
  1289. print_hex_dump(KERN_ERR,
  1290. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  1291. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1292. desc_bytes(desc), 1);
  1293. #endif
  1294. /* ablkcipher_decrypt shared descriptor */
  1295. desc = ctx->sh_desc_dec;
  1296. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1297. /* Skip if already shared */
  1298. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1299. JUMP_COND_SHRD);
  1300. /* Load class1 key only */
  1301. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1302. ctx->enckeylen, CLASS_1 |
  1303. KEY_DEST_CLASS_REG);
  1304. /* Load nonce into CONTEXT1 reg */
  1305. if (is_rfc3686) {
  1306. nonce = (u32 *)(key + keylen);
  1307. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1308. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1309. append_move(desc, MOVE_WAITCOMP |
  1310. MOVE_SRC_OUTFIFO |
  1311. MOVE_DEST_CLASS1CTX |
  1312. (16 << MOVE_OFFSET_SHIFT) |
  1313. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1314. }
  1315. set_jump_tgt_here(desc, key_jump_cmd);
  1316. /* load IV */
  1317. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1318. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1319. /* Load counter into CONTEXT1 reg */
  1320. if (is_rfc3686)
  1321. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1322. LDST_CLASS_1_CCB |
  1323. LDST_SRCDST_BYTE_CONTEXT |
  1324. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1325. LDST_OFFSET_SHIFT));
  1326. /* Choose operation */
  1327. if (ctr_mode)
  1328. append_operation(desc, ctx->class1_alg_type |
  1329. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  1330. else
  1331. append_dec_op1(desc, ctx->class1_alg_type);
  1332. /* Perform operation */
  1333. ablkcipher_append_src_dst(desc);
  1334. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1335. desc_bytes(desc),
  1336. DMA_TO_DEVICE);
  1337. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1338. dev_err(jrdev, "unable to map shared descriptor\n");
  1339. return -ENOMEM;
  1340. }
  1341. #ifdef DEBUG
  1342. print_hex_dump(KERN_ERR,
  1343. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  1344. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1345. desc_bytes(desc), 1);
  1346. #endif
  1347. /* ablkcipher_givencrypt shared descriptor */
  1348. desc = ctx->sh_desc_givenc;
  1349. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1350. /* Skip if already shared */
  1351. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1352. JUMP_COND_SHRD);
  1353. /* Load class1 key only */
  1354. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1355. ctx->enckeylen, CLASS_1 |
  1356. KEY_DEST_CLASS_REG);
  1357. /* Load Nonce into CONTEXT1 reg */
  1358. if (is_rfc3686) {
  1359. nonce = (u32 *)(key + keylen);
  1360. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1361. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1362. append_move(desc, MOVE_WAITCOMP |
  1363. MOVE_SRC_OUTFIFO |
  1364. MOVE_DEST_CLASS1CTX |
  1365. (16 << MOVE_OFFSET_SHIFT) |
  1366. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1367. }
  1368. set_jump_tgt_here(desc, key_jump_cmd);
  1369. /* Generate IV */
  1370. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  1371. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  1372. NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
  1373. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  1374. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  1375. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1376. append_move(desc, MOVE_WAITCOMP |
  1377. MOVE_SRC_INFIFO |
  1378. MOVE_DEST_CLASS1CTX |
  1379. (crt->ivsize << MOVE_LEN_SHIFT) |
  1380. (ctx1_iv_off << MOVE_OFFSET_SHIFT));
  1381. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1382. /* Copy generated IV to memory */
  1383. append_seq_store(desc, crt->ivsize,
  1384. LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
  1385. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1386. /* Load Counter into CONTEXT1 reg */
  1387. if (is_rfc3686)
  1388. append_load_imm_u32(desc, (u32)1, LDST_IMM |
  1389. LDST_CLASS_1_CCB |
  1390. LDST_SRCDST_BYTE_CONTEXT |
  1391. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1392. LDST_OFFSET_SHIFT));
  1393. if (ctx1_iv_off)
  1394. append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
  1395. (1 << JUMP_OFFSET_SHIFT));
  1396. /* Load operation */
  1397. append_operation(desc, ctx->class1_alg_type |
  1398. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1399. /* Perform operation */
  1400. ablkcipher_append_src_dst(desc);
  1401. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  1402. desc_bytes(desc),
  1403. DMA_TO_DEVICE);
  1404. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  1405. dev_err(jrdev, "unable to map shared descriptor\n");
  1406. return -ENOMEM;
  1407. }
  1408. #ifdef DEBUG
  1409. print_hex_dump(KERN_ERR,
  1410. "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
  1411. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1412. desc_bytes(desc), 1);
  1413. #endif
  1414. return ret;
  1415. }
  1416. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1417. const u8 *key, unsigned int keylen)
  1418. {
  1419. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1420. struct device *jrdev = ctx->jrdev;
  1421. u32 *key_jump_cmd, *desc;
  1422. __be64 sector_size = cpu_to_be64(512);
  1423. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  1424. crypto_ablkcipher_set_flags(ablkcipher,
  1425. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1426. dev_err(jrdev, "key size mismatch\n");
  1427. return -EINVAL;
  1428. }
  1429. memcpy(ctx->key, key, keylen);
  1430. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
  1431. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1432. dev_err(jrdev, "unable to map key i/o memory\n");
  1433. return -ENOMEM;
  1434. }
  1435. ctx->enckeylen = keylen;
  1436. /* xts_ablkcipher_encrypt shared descriptor */
  1437. desc = ctx->sh_desc_enc;
  1438. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1439. /* Skip if already shared */
  1440. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1441. JUMP_COND_SHRD);
  1442. /* Load class1 keys only */
  1443. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1444. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1445. /* Load sector size with index 40 bytes (0x28) */
  1446. append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
  1447. LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
  1448. append_data(desc, (void *)&sector_size, 8);
  1449. set_jump_tgt_here(desc, key_jump_cmd);
  1450. /*
  1451. * create sequence for loading the sector index
  1452. * Upper 8B of IV - will be used as sector index
  1453. * Lower 8B of IV - will be discarded
  1454. */
  1455. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  1456. LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
  1457. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  1458. /* Load operation */
  1459. append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
  1460. OP_ALG_ENCRYPT);
  1461. /* Perform operation */
  1462. ablkcipher_append_src_dst(desc);
  1463. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
  1464. DMA_TO_DEVICE);
  1465. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1466. dev_err(jrdev, "unable to map shared descriptor\n");
  1467. return -ENOMEM;
  1468. }
  1469. #ifdef DEBUG
  1470. print_hex_dump(KERN_ERR,
  1471. "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
  1472. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  1473. #endif
  1474. /* xts_ablkcipher_decrypt shared descriptor */
  1475. desc = ctx->sh_desc_dec;
  1476. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1477. /* Skip if already shared */
  1478. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1479. JUMP_COND_SHRD);
  1480. /* Load class1 key only */
  1481. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1482. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1483. /* Load sector size with index 40 bytes (0x28) */
  1484. append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
  1485. LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
  1486. append_data(desc, (void *)&sector_size, 8);
  1487. set_jump_tgt_here(desc, key_jump_cmd);
  1488. /*
  1489. * create sequence for loading the sector index
  1490. * Upper 8B of IV - will be used as sector index
  1491. * Lower 8B of IV - will be discarded
  1492. */
  1493. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  1494. LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
  1495. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  1496. /* Load operation */
  1497. append_dec_op1(desc, ctx->class1_alg_type);
  1498. /* Perform operation */
  1499. ablkcipher_append_src_dst(desc);
  1500. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
  1501. DMA_TO_DEVICE);
  1502. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1503. dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
  1504. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  1505. dev_err(jrdev, "unable to map shared descriptor\n");
  1506. return -ENOMEM;
  1507. }
  1508. #ifdef DEBUG
  1509. print_hex_dump(KERN_ERR,
  1510. "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
  1511. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  1512. #endif
  1513. return 0;
  1514. }
  1515. /*
  1516. * aead_edesc - s/w-extended aead descriptor
  1517. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  1518. * @src_nents: number of segments in input scatterlist
  1519. * @dst_nents: number of segments in output scatterlist
  1520. * @iv_dma: dma address of iv for checking continuity and link table
  1521. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1522. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1523. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1524. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1525. */
  1526. struct aead_edesc {
  1527. int assoc_nents;
  1528. int src_nents;
  1529. int dst_nents;
  1530. dma_addr_t iv_dma;
  1531. int sec4_sg_bytes;
  1532. dma_addr_t sec4_sg_dma;
  1533. struct sec4_sg_entry *sec4_sg;
  1534. u32 hw_desc[];
  1535. };
  1536. /*
  1537. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  1538. * @src_nents: number of segments in input scatterlist
  1539. * @dst_nents: number of segments in output scatterlist
  1540. * @iv_dma: dma address of iv for checking continuity and link table
  1541. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1542. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1543. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1544. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1545. */
  1546. struct ablkcipher_edesc {
  1547. int src_nents;
  1548. int dst_nents;
  1549. dma_addr_t iv_dma;
  1550. int sec4_sg_bytes;
  1551. dma_addr_t sec4_sg_dma;
  1552. struct sec4_sg_entry *sec4_sg;
  1553. u32 hw_desc[0];
  1554. };
  1555. static void caam_unmap(struct device *dev, struct scatterlist *src,
  1556. struct scatterlist *dst, int src_nents,
  1557. int dst_nents,
  1558. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  1559. int sec4_sg_bytes)
  1560. {
  1561. if (dst != src) {
  1562. dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
  1563. dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
  1564. } else {
  1565. dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
  1566. }
  1567. if (iv_dma)
  1568. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  1569. if (sec4_sg_bytes)
  1570. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  1571. DMA_TO_DEVICE);
  1572. }
  1573. static void aead_unmap(struct device *dev,
  1574. struct aead_edesc *edesc,
  1575. struct aead_request *req)
  1576. {
  1577. caam_unmap(dev, req->src, req->dst,
  1578. edesc->src_nents, edesc->dst_nents, 0, 0,
  1579. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1580. }
  1581. static void ablkcipher_unmap(struct device *dev,
  1582. struct ablkcipher_edesc *edesc,
  1583. struct ablkcipher_request *req)
  1584. {
  1585. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1586. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1587. caam_unmap(dev, req->src, req->dst,
  1588. edesc->src_nents, edesc->dst_nents,
  1589. edesc->iv_dma, ivsize,
  1590. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1591. }
  1592. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1593. void *context)
  1594. {
  1595. struct aead_request *req = context;
  1596. struct aead_edesc *edesc;
  1597. #ifdef DEBUG
  1598. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1599. #endif
  1600. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1601. if (err)
  1602. caam_jr_strstatus(jrdev, err);
  1603. aead_unmap(jrdev, edesc, req);
  1604. kfree(edesc);
  1605. aead_request_complete(req, err);
  1606. }
  1607. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1608. void *context)
  1609. {
  1610. struct aead_request *req = context;
  1611. struct aead_edesc *edesc;
  1612. #ifdef DEBUG
  1613. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1614. #endif
  1615. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1616. if (err)
  1617. caam_jr_strstatus(jrdev, err);
  1618. aead_unmap(jrdev, edesc, req);
  1619. /*
  1620. * verify hw auth check passed else return -EBADMSG
  1621. */
  1622. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  1623. err = -EBADMSG;
  1624. kfree(edesc);
  1625. aead_request_complete(req, err);
  1626. }
  1627. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1628. void *context)
  1629. {
  1630. struct ablkcipher_request *req = context;
  1631. struct ablkcipher_edesc *edesc;
  1632. #ifdef DEBUG
  1633. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1634. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1635. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1636. #endif
  1637. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1638. offsetof(struct ablkcipher_edesc, hw_desc));
  1639. if (err)
  1640. caam_jr_strstatus(jrdev, err);
  1641. #ifdef DEBUG
  1642. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1643. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1644. edesc->src_nents > 1 ? 100 : ivsize, 1);
  1645. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1646. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1647. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1648. #endif
  1649. ablkcipher_unmap(jrdev, edesc, req);
  1650. kfree(edesc);
  1651. ablkcipher_request_complete(req, err);
  1652. }
  1653. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1654. void *context)
  1655. {
  1656. struct ablkcipher_request *req = context;
  1657. struct ablkcipher_edesc *edesc;
  1658. #ifdef DEBUG
  1659. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1660. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1661. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1662. #endif
  1663. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1664. offsetof(struct ablkcipher_edesc, hw_desc));
  1665. if (err)
  1666. caam_jr_strstatus(jrdev, err);
  1667. #ifdef DEBUG
  1668. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1669. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1670. ivsize, 1);
  1671. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1672. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1673. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1674. #endif
  1675. ablkcipher_unmap(jrdev, edesc, req);
  1676. kfree(edesc);
  1677. ablkcipher_request_complete(req, err);
  1678. }
  1679. /*
  1680. * Fill in aead job descriptor
  1681. */
  1682. static void init_aead_job(struct aead_request *req,
  1683. struct aead_edesc *edesc,
  1684. bool all_contig, bool encrypt)
  1685. {
  1686. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1687. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1688. int authsize = ctx->authsize;
  1689. u32 *desc = edesc->hw_desc;
  1690. u32 out_options, in_options;
  1691. dma_addr_t dst_dma, src_dma;
  1692. int len, sec4_sg_index = 0;
  1693. dma_addr_t ptr;
  1694. u32 *sh_desc;
  1695. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  1696. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  1697. len = desc_len(sh_desc);
  1698. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1699. if (all_contig) {
  1700. src_dma = sg_dma_address(req->src);
  1701. in_options = 0;
  1702. } else {
  1703. src_dma = edesc->sec4_sg_dma;
  1704. sec4_sg_index += edesc->src_nents;
  1705. in_options = LDST_SGF;
  1706. }
  1707. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  1708. in_options);
  1709. dst_dma = src_dma;
  1710. out_options = in_options;
  1711. if (unlikely(req->src != req->dst)) {
  1712. if (!edesc->dst_nents) {
  1713. dst_dma = sg_dma_address(req->dst);
  1714. } else {
  1715. dst_dma = edesc->sec4_sg_dma +
  1716. sec4_sg_index *
  1717. sizeof(struct sec4_sg_entry);
  1718. out_options = LDST_SGF;
  1719. }
  1720. }
  1721. if (encrypt)
  1722. append_seq_out_ptr(desc, dst_dma,
  1723. req->assoclen + req->cryptlen + authsize,
  1724. out_options);
  1725. else
  1726. append_seq_out_ptr(desc, dst_dma,
  1727. req->assoclen + req->cryptlen - authsize,
  1728. out_options);
  1729. /* REG3 = assoclen */
  1730. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1731. }
  1732. static void init_gcm_job(struct aead_request *req,
  1733. struct aead_edesc *edesc,
  1734. bool all_contig, bool encrypt)
  1735. {
  1736. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1737. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1738. unsigned int ivsize = crypto_aead_ivsize(aead);
  1739. u32 *desc = edesc->hw_desc;
  1740. bool generic_gcm = (ivsize == 12);
  1741. unsigned int last;
  1742. init_aead_job(req, edesc, all_contig, encrypt);
  1743. /* BUG This should not be specific to generic GCM. */
  1744. last = 0;
  1745. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  1746. last = FIFOLD_TYPE_LAST1;
  1747. /* Read GCM IV */
  1748. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  1749. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
  1750. /* Append Salt */
  1751. if (!generic_gcm)
  1752. append_data(desc, ctx->key + ctx->enckeylen, 4);
  1753. /* Append IV */
  1754. append_data(desc, req->iv, ivsize);
  1755. /* End of blank commands */
  1756. }
  1757. static void init_authenc_job(struct aead_request *req,
  1758. struct aead_edesc *edesc,
  1759. bool all_contig, bool encrypt)
  1760. {
  1761. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1762. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  1763. struct caam_aead_alg, aead);
  1764. unsigned int ivsize = crypto_aead_ivsize(aead);
  1765. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1766. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1767. OP_ALG_AAI_CTR_MOD128);
  1768. const bool is_rfc3686 = alg->caam.rfc3686;
  1769. u32 *desc = edesc->hw_desc;
  1770. u32 ivoffset = 0;
  1771. /*
  1772. * AES-CTR needs to load IV in CONTEXT1 reg
  1773. * at an offset of 128bits (16bytes)
  1774. * CONTEXT1[255:128] = IV
  1775. */
  1776. if (ctr_mode)
  1777. ivoffset = 16;
  1778. /*
  1779. * RFC3686 specific:
  1780. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1781. */
  1782. if (is_rfc3686)
  1783. ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
  1784. init_aead_job(req, edesc, all_contig, encrypt);
  1785. if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
  1786. append_load_as_imm(desc, req->iv, ivsize,
  1787. LDST_CLASS_1_CCB |
  1788. LDST_SRCDST_BYTE_CONTEXT |
  1789. (ivoffset << LDST_OFFSET_SHIFT));
  1790. }
  1791. /*
  1792. * Fill in ablkcipher job descriptor
  1793. */
  1794. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  1795. struct ablkcipher_edesc *edesc,
  1796. struct ablkcipher_request *req,
  1797. bool iv_contig)
  1798. {
  1799. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1800. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1801. u32 *desc = edesc->hw_desc;
  1802. u32 out_options = 0, in_options;
  1803. dma_addr_t dst_dma, src_dma;
  1804. int len, sec4_sg_index = 0;
  1805. #ifdef DEBUG
  1806. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1807. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1808. ivsize, 1);
  1809. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1810. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1811. edesc->src_nents ? 100 : req->nbytes, 1);
  1812. #endif
  1813. len = desc_len(sh_desc);
  1814. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1815. if (iv_contig) {
  1816. src_dma = edesc->iv_dma;
  1817. in_options = 0;
  1818. } else {
  1819. src_dma = edesc->sec4_sg_dma;
  1820. sec4_sg_index += edesc->src_nents + 1;
  1821. in_options = LDST_SGF;
  1822. }
  1823. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  1824. if (likely(req->src == req->dst)) {
  1825. if (!edesc->src_nents && iv_contig) {
  1826. dst_dma = sg_dma_address(req->src);
  1827. } else {
  1828. dst_dma = edesc->sec4_sg_dma +
  1829. sizeof(struct sec4_sg_entry);
  1830. out_options = LDST_SGF;
  1831. }
  1832. } else {
  1833. if (!edesc->dst_nents) {
  1834. dst_dma = sg_dma_address(req->dst);
  1835. } else {
  1836. dst_dma = edesc->sec4_sg_dma +
  1837. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1838. out_options = LDST_SGF;
  1839. }
  1840. }
  1841. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  1842. }
  1843. /*
  1844. * Fill in ablkcipher givencrypt job descriptor
  1845. */
  1846. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  1847. struct ablkcipher_edesc *edesc,
  1848. struct ablkcipher_request *req,
  1849. bool iv_contig)
  1850. {
  1851. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1852. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1853. u32 *desc = edesc->hw_desc;
  1854. u32 out_options, in_options;
  1855. dma_addr_t dst_dma, src_dma;
  1856. int len, sec4_sg_index = 0;
  1857. #ifdef DEBUG
  1858. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  1859. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1860. ivsize, 1);
  1861. print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1862. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1863. edesc->src_nents ? 100 : req->nbytes, 1);
  1864. #endif
  1865. len = desc_len(sh_desc);
  1866. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1867. if (!edesc->src_nents) {
  1868. src_dma = sg_dma_address(req->src);
  1869. in_options = 0;
  1870. } else {
  1871. src_dma = edesc->sec4_sg_dma;
  1872. sec4_sg_index += edesc->src_nents;
  1873. in_options = LDST_SGF;
  1874. }
  1875. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1876. if (iv_contig) {
  1877. dst_dma = edesc->iv_dma;
  1878. out_options = 0;
  1879. } else {
  1880. dst_dma = edesc->sec4_sg_dma +
  1881. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1882. out_options = LDST_SGF;
  1883. }
  1884. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
  1885. }
  1886. /*
  1887. * allocate and map the aead extended descriptor
  1888. */
  1889. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1890. int desc_bytes, bool *all_contig_ptr,
  1891. bool encrypt)
  1892. {
  1893. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1894. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1895. struct device *jrdev = ctx->jrdev;
  1896. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1897. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1898. int src_nents, dst_nents = 0;
  1899. struct aead_edesc *edesc;
  1900. int sgc;
  1901. bool all_contig = true;
  1902. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1903. unsigned int authsize = ctx->authsize;
  1904. if (unlikely(req->dst != req->src)) {
  1905. src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
  1906. dst_nents = sg_count(req->dst,
  1907. req->assoclen + req->cryptlen +
  1908. (encrypt ? authsize : (-authsize)));
  1909. } else {
  1910. src_nents = sg_count(req->src,
  1911. req->assoclen + req->cryptlen +
  1912. (encrypt ? authsize : 0));
  1913. }
  1914. /* Check if data are contiguous. */
  1915. all_contig = !src_nents;
  1916. if (!all_contig) {
  1917. src_nents = src_nents ? : 1;
  1918. sec4_sg_len = src_nents;
  1919. }
  1920. sec4_sg_len += dst_nents;
  1921. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1922. /* allocate space for base edesc and hw desc commands, link tables */
  1923. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1924. GFP_DMA | flags);
  1925. if (!edesc) {
  1926. dev_err(jrdev, "could not allocate extended descriptor\n");
  1927. return ERR_PTR(-ENOMEM);
  1928. }
  1929. if (likely(req->src == req->dst)) {
  1930. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  1931. DMA_BIDIRECTIONAL);
  1932. if (unlikely(!sgc)) {
  1933. dev_err(jrdev, "unable to map source\n");
  1934. kfree(edesc);
  1935. return ERR_PTR(-ENOMEM);
  1936. }
  1937. } else {
  1938. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  1939. DMA_TO_DEVICE);
  1940. if (unlikely(!sgc)) {
  1941. dev_err(jrdev, "unable to map source\n");
  1942. kfree(edesc);
  1943. return ERR_PTR(-ENOMEM);
  1944. }
  1945. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  1946. DMA_FROM_DEVICE);
  1947. if (unlikely(!sgc)) {
  1948. dev_err(jrdev, "unable to map destination\n");
  1949. dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
  1950. DMA_TO_DEVICE);
  1951. kfree(edesc);
  1952. return ERR_PTR(-ENOMEM);
  1953. }
  1954. }
  1955. edesc->src_nents = src_nents;
  1956. edesc->dst_nents = dst_nents;
  1957. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1958. desc_bytes;
  1959. *all_contig_ptr = all_contig;
  1960. sec4_sg_index = 0;
  1961. if (!all_contig) {
  1962. sg_to_sec4_sg_last(req->src, src_nents,
  1963. edesc->sec4_sg + sec4_sg_index, 0);
  1964. sec4_sg_index += src_nents;
  1965. }
  1966. if (dst_nents) {
  1967. sg_to_sec4_sg_last(req->dst, dst_nents,
  1968. edesc->sec4_sg + sec4_sg_index, 0);
  1969. }
  1970. if (!sec4_sg_bytes)
  1971. return edesc;
  1972. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1973. sec4_sg_bytes, DMA_TO_DEVICE);
  1974. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1975. dev_err(jrdev, "unable to map S/G table\n");
  1976. aead_unmap(jrdev, edesc, req);
  1977. kfree(edesc);
  1978. return ERR_PTR(-ENOMEM);
  1979. }
  1980. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1981. return edesc;
  1982. }
  1983. static int gcm_encrypt(struct aead_request *req)
  1984. {
  1985. struct aead_edesc *edesc;
  1986. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1987. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1988. struct device *jrdev = ctx->jrdev;
  1989. bool all_contig;
  1990. u32 *desc;
  1991. int ret = 0;
  1992. /* allocate extended descriptor */
  1993. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  1994. if (IS_ERR(edesc))
  1995. return PTR_ERR(edesc);
  1996. /* Create and submit job descriptor */
  1997. init_gcm_job(req, edesc, all_contig, true);
  1998. #ifdef DEBUG
  1999. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2000. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2001. desc_bytes(edesc->hw_desc), 1);
  2002. #endif
  2003. desc = edesc->hw_desc;
  2004. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2005. if (!ret) {
  2006. ret = -EINPROGRESS;
  2007. } else {
  2008. aead_unmap(jrdev, edesc, req);
  2009. kfree(edesc);
  2010. }
  2011. return ret;
  2012. }
  2013. static int ipsec_gcm_encrypt(struct aead_request *req)
  2014. {
  2015. if (req->assoclen < 8)
  2016. return -EINVAL;
  2017. return gcm_encrypt(req);
  2018. }
  2019. static int aead_encrypt(struct aead_request *req)
  2020. {
  2021. struct aead_edesc *edesc;
  2022. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2023. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2024. struct device *jrdev = ctx->jrdev;
  2025. bool all_contig;
  2026. u32 *desc;
  2027. int ret = 0;
  2028. /* allocate extended descriptor */
  2029. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2030. &all_contig, true);
  2031. if (IS_ERR(edesc))
  2032. return PTR_ERR(edesc);
  2033. /* Create and submit job descriptor */
  2034. init_authenc_job(req, edesc, all_contig, true);
  2035. #ifdef DEBUG
  2036. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2037. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2038. desc_bytes(edesc->hw_desc), 1);
  2039. #endif
  2040. desc = edesc->hw_desc;
  2041. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2042. if (!ret) {
  2043. ret = -EINPROGRESS;
  2044. } else {
  2045. aead_unmap(jrdev, edesc, req);
  2046. kfree(edesc);
  2047. }
  2048. return ret;
  2049. }
  2050. static int gcm_decrypt(struct aead_request *req)
  2051. {
  2052. struct aead_edesc *edesc;
  2053. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2054. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2055. struct device *jrdev = ctx->jrdev;
  2056. bool all_contig;
  2057. u32 *desc;
  2058. int ret = 0;
  2059. /* allocate extended descriptor */
  2060. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  2061. if (IS_ERR(edesc))
  2062. return PTR_ERR(edesc);
  2063. /* Create and submit job descriptor*/
  2064. init_gcm_job(req, edesc, all_contig, false);
  2065. #ifdef DEBUG
  2066. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2067. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2068. desc_bytes(edesc->hw_desc), 1);
  2069. #endif
  2070. desc = edesc->hw_desc;
  2071. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2072. if (!ret) {
  2073. ret = -EINPROGRESS;
  2074. } else {
  2075. aead_unmap(jrdev, edesc, req);
  2076. kfree(edesc);
  2077. }
  2078. return ret;
  2079. }
  2080. static int ipsec_gcm_decrypt(struct aead_request *req)
  2081. {
  2082. if (req->assoclen < 8)
  2083. return -EINVAL;
  2084. return gcm_decrypt(req);
  2085. }
  2086. static int aead_decrypt(struct aead_request *req)
  2087. {
  2088. struct aead_edesc *edesc;
  2089. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2090. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2091. struct device *jrdev = ctx->jrdev;
  2092. bool all_contig;
  2093. u32 *desc;
  2094. int ret = 0;
  2095. /* allocate extended descriptor */
  2096. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2097. &all_contig, false);
  2098. if (IS_ERR(edesc))
  2099. return PTR_ERR(edesc);
  2100. #ifdef DEBUG
  2101. print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  2102. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  2103. req->assoclen + req->cryptlen, 1);
  2104. #endif
  2105. /* Create and submit job descriptor*/
  2106. init_authenc_job(req, edesc, all_contig, false);
  2107. #ifdef DEBUG
  2108. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2109. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2110. desc_bytes(edesc->hw_desc), 1);
  2111. #endif
  2112. desc = edesc->hw_desc;
  2113. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2114. if (!ret) {
  2115. ret = -EINPROGRESS;
  2116. } else {
  2117. aead_unmap(jrdev, edesc, req);
  2118. kfree(edesc);
  2119. }
  2120. return ret;
  2121. }
  2122. static int aead_givdecrypt(struct aead_request *req)
  2123. {
  2124. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2125. unsigned int ivsize = crypto_aead_ivsize(aead);
  2126. if (req->cryptlen < ivsize)
  2127. return -EINVAL;
  2128. req->cryptlen -= ivsize;
  2129. req->assoclen += ivsize;
  2130. return aead_decrypt(req);
  2131. }
  2132. /*
  2133. * allocate and map the ablkcipher extended descriptor for ablkcipher
  2134. */
  2135. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  2136. *req, int desc_bytes,
  2137. bool *iv_contig_out)
  2138. {
  2139. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2140. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2141. struct device *jrdev = ctx->jrdev;
  2142. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2143. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2144. GFP_KERNEL : GFP_ATOMIC;
  2145. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2146. struct ablkcipher_edesc *edesc;
  2147. dma_addr_t iv_dma = 0;
  2148. bool iv_contig = false;
  2149. int sgc;
  2150. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2151. int sec4_sg_index;
  2152. src_nents = sg_count(req->src, req->nbytes);
  2153. if (req->dst != req->src)
  2154. dst_nents = sg_count(req->dst, req->nbytes);
  2155. if (likely(req->src == req->dst)) {
  2156. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2157. DMA_BIDIRECTIONAL);
  2158. } else {
  2159. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2160. DMA_TO_DEVICE);
  2161. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2162. DMA_FROM_DEVICE);
  2163. }
  2164. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  2165. if (dma_mapping_error(jrdev, iv_dma)) {
  2166. dev_err(jrdev, "unable to map IV\n");
  2167. return ERR_PTR(-ENOMEM);
  2168. }
  2169. /*
  2170. * Check if iv can be contiguous with source and destination.
  2171. * If so, include it. If not, create scatterlist.
  2172. */
  2173. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  2174. iv_contig = true;
  2175. else
  2176. src_nents = src_nents ? : 1;
  2177. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2178. sizeof(struct sec4_sg_entry);
  2179. /* allocate space for base edesc and hw desc commands, link tables */
  2180. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2181. GFP_DMA | flags);
  2182. if (!edesc) {
  2183. dev_err(jrdev, "could not allocate extended descriptor\n");
  2184. return ERR_PTR(-ENOMEM);
  2185. }
  2186. edesc->src_nents = src_nents;
  2187. edesc->dst_nents = dst_nents;
  2188. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2189. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2190. desc_bytes;
  2191. sec4_sg_index = 0;
  2192. if (!iv_contig) {
  2193. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  2194. sg_to_sec4_sg_last(req->src, src_nents,
  2195. edesc->sec4_sg + 1, 0);
  2196. sec4_sg_index += 1 + src_nents;
  2197. }
  2198. if (dst_nents) {
  2199. sg_to_sec4_sg_last(req->dst, dst_nents,
  2200. edesc->sec4_sg + sec4_sg_index, 0);
  2201. }
  2202. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2203. sec4_sg_bytes, DMA_TO_DEVICE);
  2204. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2205. dev_err(jrdev, "unable to map S/G table\n");
  2206. return ERR_PTR(-ENOMEM);
  2207. }
  2208. edesc->iv_dma = iv_dma;
  2209. #ifdef DEBUG
  2210. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  2211. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2212. sec4_sg_bytes, 1);
  2213. #endif
  2214. *iv_contig_out = iv_contig;
  2215. return edesc;
  2216. }
  2217. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  2218. {
  2219. struct ablkcipher_edesc *edesc;
  2220. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2221. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2222. struct device *jrdev = ctx->jrdev;
  2223. bool iv_contig;
  2224. u32 *desc;
  2225. int ret = 0;
  2226. /* allocate extended descriptor */
  2227. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2228. CAAM_CMD_SZ, &iv_contig);
  2229. if (IS_ERR(edesc))
  2230. return PTR_ERR(edesc);
  2231. /* Create and submit job descriptor*/
  2232. init_ablkcipher_job(ctx->sh_desc_enc,
  2233. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  2234. #ifdef DEBUG
  2235. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2236. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2237. desc_bytes(edesc->hw_desc), 1);
  2238. #endif
  2239. desc = edesc->hw_desc;
  2240. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2241. if (!ret) {
  2242. ret = -EINPROGRESS;
  2243. } else {
  2244. ablkcipher_unmap(jrdev, edesc, req);
  2245. kfree(edesc);
  2246. }
  2247. return ret;
  2248. }
  2249. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  2250. {
  2251. struct ablkcipher_edesc *edesc;
  2252. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2253. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2254. struct device *jrdev = ctx->jrdev;
  2255. bool iv_contig;
  2256. u32 *desc;
  2257. int ret = 0;
  2258. /* allocate extended descriptor */
  2259. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2260. CAAM_CMD_SZ, &iv_contig);
  2261. if (IS_ERR(edesc))
  2262. return PTR_ERR(edesc);
  2263. /* Create and submit job descriptor*/
  2264. init_ablkcipher_job(ctx->sh_desc_dec,
  2265. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  2266. desc = edesc->hw_desc;
  2267. #ifdef DEBUG
  2268. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2269. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2270. desc_bytes(edesc->hw_desc), 1);
  2271. #endif
  2272. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  2273. if (!ret) {
  2274. ret = -EINPROGRESS;
  2275. } else {
  2276. ablkcipher_unmap(jrdev, edesc, req);
  2277. kfree(edesc);
  2278. }
  2279. return ret;
  2280. }
  2281. /*
  2282. * allocate and map the ablkcipher extended descriptor
  2283. * for ablkcipher givencrypt
  2284. */
  2285. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  2286. struct skcipher_givcrypt_request *greq,
  2287. int desc_bytes,
  2288. bool *iv_contig_out)
  2289. {
  2290. struct ablkcipher_request *req = &greq->creq;
  2291. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2292. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2293. struct device *jrdev = ctx->jrdev;
  2294. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2295. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2296. GFP_KERNEL : GFP_ATOMIC;
  2297. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2298. struct ablkcipher_edesc *edesc;
  2299. dma_addr_t iv_dma = 0;
  2300. bool iv_contig = false;
  2301. int sgc;
  2302. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2303. int sec4_sg_index;
  2304. src_nents = sg_count(req->src, req->nbytes);
  2305. if (unlikely(req->dst != req->src))
  2306. dst_nents = sg_count(req->dst, req->nbytes);
  2307. if (likely(req->src == req->dst)) {
  2308. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2309. DMA_BIDIRECTIONAL);
  2310. } else {
  2311. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2312. DMA_TO_DEVICE);
  2313. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2314. DMA_FROM_DEVICE);
  2315. }
  2316. /*
  2317. * Check if iv can be contiguous with source and destination.
  2318. * If so, include it. If not, create scatterlist.
  2319. */
  2320. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  2321. if (dma_mapping_error(jrdev, iv_dma)) {
  2322. dev_err(jrdev, "unable to map IV\n");
  2323. return ERR_PTR(-ENOMEM);
  2324. }
  2325. if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
  2326. iv_contig = true;
  2327. else
  2328. dst_nents = dst_nents ? : 1;
  2329. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2330. sizeof(struct sec4_sg_entry);
  2331. /* allocate space for base edesc and hw desc commands, link tables */
  2332. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2333. GFP_DMA | flags);
  2334. if (!edesc) {
  2335. dev_err(jrdev, "could not allocate extended descriptor\n");
  2336. return ERR_PTR(-ENOMEM);
  2337. }
  2338. edesc->src_nents = src_nents;
  2339. edesc->dst_nents = dst_nents;
  2340. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2341. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2342. desc_bytes;
  2343. sec4_sg_index = 0;
  2344. if (src_nents) {
  2345. sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
  2346. sec4_sg_index += src_nents;
  2347. }
  2348. if (!iv_contig) {
  2349. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2350. iv_dma, ivsize, 0);
  2351. sec4_sg_index += 1;
  2352. sg_to_sec4_sg_last(req->dst, dst_nents,
  2353. edesc->sec4_sg + sec4_sg_index, 0);
  2354. }
  2355. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2356. sec4_sg_bytes, DMA_TO_DEVICE);
  2357. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2358. dev_err(jrdev, "unable to map S/G table\n");
  2359. return ERR_PTR(-ENOMEM);
  2360. }
  2361. edesc->iv_dma = iv_dma;
  2362. #ifdef DEBUG
  2363. print_hex_dump(KERN_ERR,
  2364. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  2365. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2366. sec4_sg_bytes, 1);
  2367. #endif
  2368. *iv_contig_out = iv_contig;
  2369. return edesc;
  2370. }
  2371. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  2372. {
  2373. struct ablkcipher_request *req = &creq->creq;
  2374. struct ablkcipher_edesc *edesc;
  2375. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2376. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2377. struct device *jrdev = ctx->jrdev;
  2378. bool iv_contig;
  2379. u32 *desc;
  2380. int ret = 0;
  2381. /* allocate extended descriptor */
  2382. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
  2383. CAAM_CMD_SZ, &iv_contig);
  2384. if (IS_ERR(edesc))
  2385. return PTR_ERR(edesc);
  2386. /* Create and submit job descriptor*/
  2387. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  2388. edesc, req, iv_contig);
  2389. #ifdef DEBUG
  2390. print_hex_dump(KERN_ERR,
  2391. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  2392. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2393. desc_bytes(edesc->hw_desc), 1);
  2394. #endif
  2395. desc = edesc->hw_desc;
  2396. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2397. if (!ret) {
  2398. ret = -EINPROGRESS;
  2399. } else {
  2400. ablkcipher_unmap(jrdev, edesc, req);
  2401. kfree(edesc);
  2402. }
  2403. return ret;
  2404. }
  2405. #define template_aead template_u.aead
  2406. #define template_ablkcipher template_u.ablkcipher
  2407. struct caam_alg_template {
  2408. char name[CRYPTO_MAX_ALG_NAME];
  2409. char driver_name[CRYPTO_MAX_ALG_NAME];
  2410. unsigned int blocksize;
  2411. u32 type;
  2412. union {
  2413. struct ablkcipher_alg ablkcipher;
  2414. } template_u;
  2415. u32 class1_alg_type;
  2416. u32 class2_alg_type;
  2417. u32 alg_op;
  2418. };
  2419. static struct caam_alg_template driver_algs[] = {
  2420. /* ablkcipher descriptor */
  2421. {
  2422. .name = "cbc(aes)",
  2423. .driver_name = "cbc-aes-caam",
  2424. .blocksize = AES_BLOCK_SIZE,
  2425. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2426. .template_ablkcipher = {
  2427. .setkey = ablkcipher_setkey,
  2428. .encrypt = ablkcipher_encrypt,
  2429. .decrypt = ablkcipher_decrypt,
  2430. .givencrypt = ablkcipher_givencrypt,
  2431. .geniv = "<built-in>",
  2432. .min_keysize = AES_MIN_KEY_SIZE,
  2433. .max_keysize = AES_MAX_KEY_SIZE,
  2434. .ivsize = AES_BLOCK_SIZE,
  2435. },
  2436. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2437. },
  2438. {
  2439. .name = "cbc(des3_ede)",
  2440. .driver_name = "cbc-3des-caam",
  2441. .blocksize = DES3_EDE_BLOCK_SIZE,
  2442. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2443. .template_ablkcipher = {
  2444. .setkey = ablkcipher_setkey,
  2445. .encrypt = ablkcipher_encrypt,
  2446. .decrypt = ablkcipher_decrypt,
  2447. .givencrypt = ablkcipher_givencrypt,
  2448. .geniv = "<built-in>",
  2449. .min_keysize = DES3_EDE_KEY_SIZE,
  2450. .max_keysize = DES3_EDE_KEY_SIZE,
  2451. .ivsize = DES3_EDE_BLOCK_SIZE,
  2452. },
  2453. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2454. },
  2455. {
  2456. .name = "cbc(des)",
  2457. .driver_name = "cbc-des-caam",
  2458. .blocksize = DES_BLOCK_SIZE,
  2459. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2460. .template_ablkcipher = {
  2461. .setkey = ablkcipher_setkey,
  2462. .encrypt = ablkcipher_encrypt,
  2463. .decrypt = ablkcipher_decrypt,
  2464. .givencrypt = ablkcipher_givencrypt,
  2465. .geniv = "<built-in>",
  2466. .min_keysize = DES_KEY_SIZE,
  2467. .max_keysize = DES_KEY_SIZE,
  2468. .ivsize = DES_BLOCK_SIZE,
  2469. },
  2470. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2471. },
  2472. {
  2473. .name = "ctr(aes)",
  2474. .driver_name = "ctr-aes-caam",
  2475. .blocksize = 1,
  2476. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2477. .template_ablkcipher = {
  2478. .setkey = ablkcipher_setkey,
  2479. .encrypt = ablkcipher_encrypt,
  2480. .decrypt = ablkcipher_decrypt,
  2481. .geniv = "chainiv",
  2482. .min_keysize = AES_MIN_KEY_SIZE,
  2483. .max_keysize = AES_MAX_KEY_SIZE,
  2484. .ivsize = AES_BLOCK_SIZE,
  2485. },
  2486. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2487. },
  2488. {
  2489. .name = "rfc3686(ctr(aes))",
  2490. .driver_name = "rfc3686-ctr-aes-caam",
  2491. .blocksize = 1,
  2492. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2493. .template_ablkcipher = {
  2494. .setkey = ablkcipher_setkey,
  2495. .encrypt = ablkcipher_encrypt,
  2496. .decrypt = ablkcipher_decrypt,
  2497. .givencrypt = ablkcipher_givencrypt,
  2498. .geniv = "<built-in>",
  2499. .min_keysize = AES_MIN_KEY_SIZE +
  2500. CTR_RFC3686_NONCE_SIZE,
  2501. .max_keysize = AES_MAX_KEY_SIZE +
  2502. CTR_RFC3686_NONCE_SIZE,
  2503. .ivsize = CTR_RFC3686_IV_SIZE,
  2504. },
  2505. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2506. },
  2507. {
  2508. .name = "xts(aes)",
  2509. .driver_name = "xts-aes-caam",
  2510. .blocksize = AES_BLOCK_SIZE,
  2511. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2512. .template_ablkcipher = {
  2513. .setkey = xts_ablkcipher_setkey,
  2514. .encrypt = ablkcipher_encrypt,
  2515. .decrypt = ablkcipher_decrypt,
  2516. .geniv = "eseqiv",
  2517. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  2518. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  2519. .ivsize = AES_BLOCK_SIZE,
  2520. },
  2521. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  2522. },
  2523. };
  2524. static struct caam_aead_alg driver_aeads[] = {
  2525. {
  2526. .aead = {
  2527. .base = {
  2528. .cra_name = "rfc4106(gcm(aes))",
  2529. .cra_driver_name = "rfc4106-gcm-aes-caam",
  2530. .cra_blocksize = 1,
  2531. },
  2532. .setkey = rfc4106_setkey,
  2533. .setauthsize = rfc4106_setauthsize,
  2534. .encrypt = ipsec_gcm_encrypt,
  2535. .decrypt = ipsec_gcm_decrypt,
  2536. .ivsize = 8,
  2537. .maxauthsize = AES_BLOCK_SIZE,
  2538. },
  2539. .caam = {
  2540. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2541. },
  2542. },
  2543. {
  2544. .aead = {
  2545. .base = {
  2546. .cra_name = "rfc4543(gcm(aes))",
  2547. .cra_driver_name = "rfc4543-gcm-aes-caam",
  2548. .cra_blocksize = 1,
  2549. },
  2550. .setkey = rfc4543_setkey,
  2551. .setauthsize = rfc4543_setauthsize,
  2552. .encrypt = ipsec_gcm_encrypt,
  2553. .decrypt = ipsec_gcm_decrypt,
  2554. .ivsize = 8,
  2555. .maxauthsize = AES_BLOCK_SIZE,
  2556. },
  2557. .caam = {
  2558. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2559. },
  2560. },
  2561. /* Galois Counter Mode */
  2562. {
  2563. .aead = {
  2564. .base = {
  2565. .cra_name = "gcm(aes)",
  2566. .cra_driver_name = "gcm-aes-caam",
  2567. .cra_blocksize = 1,
  2568. },
  2569. .setkey = gcm_setkey,
  2570. .setauthsize = gcm_setauthsize,
  2571. .encrypt = gcm_encrypt,
  2572. .decrypt = gcm_decrypt,
  2573. .ivsize = 12,
  2574. .maxauthsize = AES_BLOCK_SIZE,
  2575. },
  2576. .caam = {
  2577. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2578. },
  2579. },
  2580. /* single-pass ipsec_esp descriptor */
  2581. {
  2582. .aead = {
  2583. .base = {
  2584. .cra_name = "authenc(hmac(md5),"
  2585. "ecb(cipher_null))",
  2586. .cra_driver_name = "authenc-hmac-md5-"
  2587. "ecb-cipher_null-caam",
  2588. .cra_blocksize = NULL_BLOCK_SIZE,
  2589. },
  2590. .setkey = aead_setkey,
  2591. .setauthsize = aead_setauthsize,
  2592. .encrypt = aead_encrypt,
  2593. .decrypt = aead_decrypt,
  2594. .ivsize = NULL_IV_SIZE,
  2595. .maxauthsize = MD5_DIGEST_SIZE,
  2596. },
  2597. .caam = {
  2598. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2599. OP_ALG_AAI_HMAC_PRECOMP,
  2600. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2601. },
  2602. },
  2603. {
  2604. .aead = {
  2605. .base = {
  2606. .cra_name = "authenc(hmac(sha1),"
  2607. "ecb(cipher_null))",
  2608. .cra_driver_name = "authenc-hmac-sha1-"
  2609. "ecb-cipher_null-caam",
  2610. .cra_blocksize = NULL_BLOCK_SIZE,
  2611. },
  2612. .setkey = aead_setkey,
  2613. .setauthsize = aead_setauthsize,
  2614. .encrypt = aead_encrypt,
  2615. .decrypt = aead_decrypt,
  2616. .ivsize = NULL_IV_SIZE,
  2617. .maxauthsize = SHA1_DIGEST_SIZE,
  2618. },
  2619. .caam = {
  2620. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2621. OP_ALG_AAI_HMAC_PRECOMP,
  2622. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2623. },
  2624. },
  2625. {
  2626. .aead = {
  2627. .base = {
  2628. .cra_name = "authenc(hmac(sha224),"
  2629. "ecb(cipher_null))",
  2630. .cra_driver_name = "authenc-hmac-sha224-"
  2631. "ecb-cipher_null-caam",
  2632. .cra_blocksize = NULL_BLOCK_SIZE,
  2633. },
  2634. .setkey = aead_setkey,
  2635. .setauthsize = aead_setauthsize,
  2636. .encrypt = aead_encrypt,
  2637. .decrypt = aead_decrypt,
  2638. .ivsize = NULL_IV_SIZE,
  2639. .maxauthsize = SHA224_DIGEST_SIZE,
  2640. },
  2641. .caam = {
  2642. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2643. OP_ALG_AAI_HMAC_PRECOMP,
  2644. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2645. },
  2646. },
  2647. {
  2648. .aead = {
  2649. .base = {
  2650. .cra_name = "authenc(hmac(sha256),"
  2651. "ecb(cipher_null))",
  2652. .cra_driver_name = "authenc-hmac-sha256-"
  2653. "ecb-cipher_null-caam",
  2654. .cra_blocksize = NULL_BLOCK_SIZE,
  2655. },
  2656. .setkey = aead_setkey,
  2657. .setauthsize = aead_setauthsize,
  2658. .encrypt = aead_encrypt,
  2659. .decrypt = aead_decrypt,
  2660. .ivsize = NULL_IV_SIZE,
  2661. .maxauthsize = SHA256_DIGEST_SIZE,
  2662. },
  2663. .caam = {
  2664. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2665. OP_ALG_AAI_HMAC_PRECOMP,
  2666. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2667. },
  2668. },
  2669. {
  2670. .aead = {
  2671. .base = {
  2672. .cra_name = "authenc(hmac(sha384),"
  2673. "ecb(cipher_null))",
  2674. .cra_driver_name = "authenc-hmac-sha384-"
  2675. "ecb-cipher_null-caam",
  2676. .cra_blocksize = NULL_BLOCK_SIZE,
  2677. },
  2678. .setkey = aead_setkey,
  2679. .setauthsize = aead_setauthsize,
  2680. .encrypt = aead_encrypt,
  2681. .decrypt = aead_decrypt,
  2682. .ivsize = NULL_IV_SIZE,
  2683. .maxauthsize = SHA384_DIGEST_SIZE,
  2684. },
  2685. .caam = {
  2686. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2687. OP_ALG_AAI_HMAC_PRECOMP,
  2688. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2689. },
  2690. },
  2691. {
  2692. .aead = {
  2693. .base = {
  2694. .cra_name = "authenc(hmac(sha512),"
  2695. "ecb(cipher_null))",
  2696. .cra_driver_name = "authenc-hmac-sha512-"
  2697. "ecb-cipher_null-caam",
  2698. .cra_blocksize = NULL_BLOCK_SIZE,
  2699. },
  2700. .setkey = aead_setkey,
  2701. .setauthsize = aead_setauthsize,
  2702. .encrypt = aead_encrypt,
  2703. .decrypt = aead_decrypt,
  2704. .ivsize = NULL_IV_SIZE,
  2705. .maxauthsize = SHA512_DIGEST_SIZE,
  2706. },
  2707. .caam = {
  2708. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2709. OP_ALG_AAI_HMAC_PRECOMP,
  2710. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2711. },
  2712. },
  2713. {
  2714. .aead = {
  2715. .base = {
  2716. .cra_name = "authenc(hmac(md5),cbc(aes))",
  2717. .cra_driver_name = "authenc-hmac-md5-"
  2718. "cbc-aes-caam",
  2719. .cra_blocksize = AES_BLOCK_SIZE,
  2720. },
  2721. .setkey = aead_setkey,
  2722. .setauthsize = aead_setauthsize,
  2723. .encrypt = aead_encrypt,
  2724. .decrypt = aead_decrypt,
  2725. .ivsize = AES_BLOCK_SIZE,
  2726. .maxauthsize = MD5_DIGEST_SIZE,
  2727. },
  2728. .caam = {
  2729. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2730. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2731. OP_ALG_AAI_HMAC_PRECOMP,
  2732. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2733. },
  2734. },
  2735. {
  2736. .aead = {
  2737. .base = {
  2738. .cra_name = "echainiv(authenc(hmac(md5),"
  2739. "cbc(aes)))",
  2740. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2741. "cbc-aes-caam",
  2742. .cra_blocksize = AES_BLOCK_SIZE,
  2743. },
  2744. .setkey = aead_setkey,
  2745. .setauthsize = aead_setauthsize,
  2746. .encrypt = aead_encrypt,
  2747. .decrypt = aead_givdecrypt,
  2748. .ivsize = AES_BLOCK_SIZE,
  2749. .maxauthsize = MD5_DIGEST_SIZE,
  2750. },
  2751. .caam = {
  2752. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2753. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2754. OP_ALG_AAI_HMAC_PRECOMP,
  2755. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2756. .geniv = true,
  2757. },
  2758. },
  2759. {
  2760. .aead = {
  2761. .base = {
  2762. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  2763. .cra_driver_name = "authenc-hmac-sha1-"
  2764. "cbc-aes-caam",
  2765. .cra_blocksize = AES_BLOCK_SIZE,
  2766. },
  2767. .setkey = aead_setkey,
  2768. .setauthsize = aead_setauthsize,
  2769. .encrypt = aead_encrypt,
  2770. .decrypt = aead_decrypt,
  2771. .ivsize = AES_BLOCK_SIZE,
  2772. .maxauthsize = SHA1_DIGEST_SIZE,
  2773. },
  2774. .caam = {
  2775. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2776. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2777. OP_ALG_AAI_HMAC_PRECOMP,
  2778. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2779. },
  2780. },
  2781. {
  2782. .aead = {
  2783. .base = {
  2784. .cra_name = "echainiv(authenc(hmac(sha1),"
  2785. "cbc(aes)))",
  2786. .cra_driver_name = "echainiv-authenc-"
  2787. "hmac-sha1-cbc-aes-caam",
  2788. .cra_blocksize = AES_BLOCK_SIZE,
  2789. },
  2790. .setkey = aead_setkey,
  2791. .setauthsize = aead_setauthsize,
  2792. .encrypt = aead_encrypt,
  2793. .decrypt = aead_givdecrypt,
  2794. .ivsize = AES_BLOCK_SIZE,
  2795. .maxauthsize = SHA1_DIGEST_SIZE,
  2796. },
  2797. .caam = {
  2798. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2799. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2800. OP_ALG_AAI_HMAC_PRECOMP,
  2801. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2802. .geniv = true,
  2803. },
  2804. },
  2805. {
  2806. .aead = {
  2807. .base = {
  2808. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  2809. .cra_driver_name = "authenc-hmac-sha224-"
  2810. "cbc-aes-caam",
  2811. .cra_blocksize = AES_BLOCK_SIZE,
  2812. },
  2813. .setkey = aead_setkey,
  2814. .setauthsize = aead_setauthsize,
  2815. .encrypt = aead_encrypt,
  2816. .decrypt = aead_decrypt,
  2817. .ivsize = AES_BLOCK_SIZE,
  2818. .maxauthsize = SHA224_DIGEST_SIZE,
  2819. },
  2820. .caam = {
  2821. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2822. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2823. OP_ALG_AAI_HMAC_PRECOMP,
  2824. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2825. },
  2826. },
  2827. {
  2828. .aead = {
  2829. .base = {
  2830. .cra_name = "echainiv(authenc(hmac(sha224),"
  2831. "cbc(aes)))",
  2832. .cra_driver_name = "echainiv-authenc-"
  2833. "hmac-sha224-cbc-aes-caam",
  2834. .cra_blocksize = AES_BLOCK_SIZE,
  2835. },
  2836. .setkey = aead_setkey,
  2837. .setauthsize = aead_setauthsize,
  2838. .encrypt = aead_encrypt,
  2839. .decrypt = aead_givdecrypt,
  2840. .ivsize = AES_BLOCK_SIZE,
  2841. .maxauthsize = SHA224_DIGEST_SIZE,
  2842. },
  2843. .caam = {
  2844. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2845. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2846. OP_ALG_AAI_HMAC_PRECOMP,
  2847. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2848. .geniv = true,
  2849. },
  2850. },
  2851. {
  2852. .aead = {
  2853. .base = {
  2854. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2855. .cra_driver_name = "authenc-hmac-sha256-"
  2856. "cbc-aes-caam",
  2857. .cra_blocksize = AES_BLOCK_SIZE,
  2858. },
  2859. .setkey = aead_setkey,
  2860. .setauthsize = aead_setauthsize,
  2861. .encrypt = aead_encrypt,
  2862. .decrypt = aead_decrypt,
  2863. .ivsize = AES_BLOCK_SIZE,
  2864. .maxauthsize = SHA256_DIGEST_SIZE,
  2865. },
  2866. .caam = {
  2867. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2868. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2869. OP_ALG_AAI_HMAC_PRECOMP,
  2870. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2871. },
  2872. },
  2873. {
  2874. .aead = {
  2875. .base = {
  2876. .cra_name = "echainiv(authenc(hmac(sha256),"
  2877. "cbc(aes)))",
  2878. .cra_driver_name = "echainiv-authenc-"
  2879. "hmac-sha256-cbc-aes-caam",
  2880. .cra_blocksize = AES_BLOCK_SIZE,
  2881. },
  2882. .setkey = aead_setkey,
  2883. .setauthsize = aead_setauthsize,
  2884. .encrypt = aead_encrypt,
  2885. .decrypt = aead_givdecrypt,
  2886. .ivsize = AES_BLOCK_SIZE,
  2887. .maxauthsize = SHA256_DIGEST_SIZE,
  2888. },
  2889. .caam = {
  2890. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2891. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2892. OP_ALG_AAI_HMAC_PRECOMP,
  2893. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2894. .geniv = true,
  2895. },
  2896. },
  2897. {
  2898. .aead = {
  2899. .base = {
  2900. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2901. .cra_driver_name = "authenc-hmac-sha384-"
  2902. "cbc-aes-caam",
  2903. .cra_blocksize = AES_BLOCK_SIZE,
  2904. },
  2905. .setkey = aead_setkey,
  2906. .setauthsize = aead_setauthsize,
  2907. .encrypt = aead_encrypt,
  2908. .decrypt = aead_decrypt,
  2909. .ivsize = AES_BLOCK_SIZE,
  2910. .maxauthsize = SHA384_DIGEST_SIZE,
  2911. },
  2912. .caam = {
  2913. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2914. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2915. OP_ALG_AAI_HMAC_PRECOMP,
  2916. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2917. },
  2918. },
  2919. {
  2920. .aead = {
  2921. .base = {
  2922. .cra_name = "echainiv(authenc(hmac(sha384),"
  2923. "cbc(aes)))",
  2924. .cra_driver_name = "echainiv-authenc-"
  2925. "hmac-sha384-cbc-aes-caam",
  2926. .cra_blocksize = AES_BLOCK_SIZE,
  2927. },
  2928. .setkey = aead_setkey,
  2929. .setauthsize = aead_setauthsize,
  2930. .encrypt = aead_encrypt,
  2931. .decrypt = aead_givdecrypt,
  2932. .ivsize = AES_BLOCK_SIZE,
  2933. .maxauthsize = SHA384_DIGEST_SIZE,
  2934. },
  2935. .caam = {
  2936. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2937. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2938. OP_ALG_AAI_HMAC_PRECOMP,
  2939. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2940. .geniv = true,
  2941. },
  2942. },
  2943. {
  2944. .aead = {
  2945. .base = {
  2946. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2947. .cra_driver_name = "authenc-hmac-sha512-"
  2948. "cbc-aes-caam",
  2949. .cra_blocksize = AES_BLOCK_SIZE,
  2950. },
  2951. .setkey = aead_setkey,
  2952. .setauthsize = aead_setauthsize,
  2953. .encrypt = aead_encrypt,
  2954. .decrypt = aead_decrypt,
  2955. .ivsize = AES_BLOCK_SIZE,
  2956. .maxauthsize = SHA512_DIGEST_SIZE,
  2957. },
  2958. .caam = {
  2959. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2960. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2961. OP_ALG_AAI_HMAC_PRECOMP,
  2962. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2963. },
  2964. },
  2965. {
  2966. .aead = {
  2967. .base = {
  2968. .cra_name = "echainiv(authenc(hmac(sha512),"
  2969. "cbc(aes)))",
  2970. .cra_driver_name = "echainiv-authenc-"
  2971. "hmac-sha512-cbc-aes-caam",
  2972. .cra_blocksize = AES_BLOCK_SIZE,
  2973. },
  2974. .setkey = aead_setkey,
  2975. .setauthsize = aead_setauthsize,
  2976. .encrypt = aead_encrypt,
  2977. .decrypt = aead_givdecrypt,
  2978. .ivsize = AES_BLOCK_SIZE,
  2979. .maxauthsize = SHA512_DIGEST_SIZE,
  2980. },
  2981. .caam = {
  2982. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2983. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2984. OP_ALG_AAI_HMAC_PRECOMP,
  2985. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2986. .geniv = true,
  2987. },
  2988. },
  2989. {
  2990. .aead = {
  2991. .base = {
  2992. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2993. .cra_driver_name = "authenc-hmac-md5-"
  2994. "cbc-des3_ede-caam",
  2995. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2996. },
  2997. .setkey = aead_setkey,
  2998. .setauthsize = aead_setauthsize,
  2999. .encrypt = aead_encrypt,
  3000. .decrypt = aead_decrypt,
  3001. .ivsize = DES3_EDE_BLOCK_SIZE,
  3002. .maxauthsize = MD5_DIGEST_SIZE,
  3003. },
  3004. .caam = {
  3005. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3006. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3007. OP_ALG_AAI_HMAC_PRECOMP,
  3008. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3009. }
  3010. },
  3011. {
  3012. .aead = {
  3013. .base = {
  3014. .cra_name = "echainiv(authenc(hmac(md5),"
  3015. "cbc(des3_ede)))",
  3016. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3017. "cbc-des3_ede-caam",
  3018. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3019. },
  3020. .setkey = aead_setkey,
  3021. .setauthsize = aead_setauthsize,
  3022. .encrypt = aead_encrypt,
  3023. .decrypt = aead_givdecrypt,
  3024. .ivsize = DES3_EDE_BLOCK_SIZE,
  3025. .maxauthsize = MD5_DIGEST_SIZE,
  3026. },
  3027. .caam = {
  3028. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3029. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3030. OP_ALG_AAI_HMAC_PRECOMP,
  3031. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3032. .geniv = true,
  3033. }
  3034. },
  3035. {
  3036. .aead = {
  3037. .base = {
  3038. .cra_name = "authenc(hmac(sha1),"
  3039. "cbc(des3_ede))",
  3040. .cra_driver_name = "authenc-hmac-sha1-"
  3041. "cbc-des3_ede-caam",
  3042. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3043. },
  3044. .setkey = aead_setkey,
  3045. .setauthsize = aead_setauthsize,
  3046. .encrypt = aead_encrypt,
  3047. .decrypt = aead_decrypt,
  3048. .ivsize = DES3_EDE_BLOCK_SIZE,
  3049. .maxauthsize = SHA1_DIGEST_SIZE,
  3050. },
  3051. .caam = {
  3052. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3053. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3054. OP_ALG_AAI_HMAC_PRECOMP,
  3055. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3056. },
  3057. },
  3058. {
  3059. .aead = {
  3060. .base = {
  3061. .cra_name = "echainiv(authenc(hmac(sha1),"
  3062. "cbc(des3_ede)))",
  3063. .cra_driver_name = "echainiv-authenc-"
  3064. "hmac-sha1-"
  3065. "cbc-des3_ede-caam",
  3066. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3067. },
  3068. .setkey = aead_setkey,
  3069. .setauthsize = aead_setauthsize,
  3070. .encrypt = aead_encrypt,
  3071. .decrypt = aead_givdecrypt,
  3072. .ivsize = DES3_EDE_BLOCK_SIZE,
  3073. .maxauthsize = SHA1_DIGEST_SIZE,
  3074. },
  3075. .caam = {
  3076. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3077. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3078. OP_ALG_AAI_HMAC_PRECOMP,
  3079. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3080. .geniv = true,
  3081. },
  3082. },
  3083. {
  3084. .aead = {
  3085. .base = {
  3086. .cra_name = "authenc(hmac(sha224),"
  3087. "cbc(des3_ede))",
  3088. .cra_driver_name = "authenc-hmac-sha224-"
  3089. "cbc-des3_ede-caam",
  3090. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3091. },
  3092. .setkey = aead_setkey,
  3093. .setauthsize = aead_setauthsize,
  3094. .encrypt = aead_encrypt,
  3095. .decrypt = aead_decrypt,
  3096. .ivsize = DES3_EDE_BLOCK_SIZE,
  3097. .maxauthsize = SHA224_DIGEST_SIZE,
  3098. },
  3099. .caam = {
  3100. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3101. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3102. OP_ALG_AAI_HMAC_PRECOMP,
  3103. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3104. },
  3105. },
  3106. {
  3107. .aead = {
  3108. .base = {
  3109. .cra_name = "echainiv(authenc(hmac(sha224),"
  3110. "cbc(des3_ede)))",
  3111. .cra_driver_name = "echainiv-authenc-"
  3112. "hmac-sha224-"
  3113. "cbc-des3_ede-caam",
  3114. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3115. },
  3116. .setkey = aead_setkey,
  3117. .setauthsize = aead_setauthsize,
  3118. .encrypt = aead_encrypt,
  3119. .decrypt = aead_givdecrypt,
  3120. .ivsize = DES3_EDE_BLOCK_SIZE,
  3121. .maxauthsize = SHA224_DIGEST_SIZE,
  3122. },
  3123. .caam = {
  3124. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3125. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3126. OP_ALG_AAI_HMAC_PRECOMP,
  3127. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3128. .geniv = true,
  3129. },
  3130. },
  3131. {
  3132. .aead = {
  3133. .base = {
  3134. .cra_name = "authenc(hmac(sha256),"
  3135. "cbc(des3_ede))",
  3136. .cra_driver_name = "authenc-hmac-sha256-"
  3137. "cbc-des3_ede-caam",
  3138. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3139. },
  3140. .setkey = aead_setkey,
  3141. .setauthsize = aead_setauthsize,
  3142. .encrypt = aead_encrypt,
  3143. .decrypt = aead_decrypt,
  3144. .ivsize = DES3_EDE_BLOCK_SIZE,
  3145. .maxauthsize = SHA256_DIGEST_SIZE,
  3146. },
  3147. .caam = {
  3148. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3149. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3150. OP_ALG_AAI_HMAC_PRECOMP,
  3151. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3152. },
  3153. },
  3154. {
  3155. .aead = {
  3156. .base = {
  3157. .cra_name = "echainiv(authenc(hmac(sha256),"
  3158. "cbc(des3_ede)))",
  3159. .cra_driver_name = "echainiv-authenc-"
  3160. "hmac-sha256-"
  3161. "cbc-des3_ede-caam",
  3162. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3163. },
  3164. .setkey = aead_setkey,
  3165. .setauthsize = aead_setauthsize,
  3166. .encrypt = aead_encrypt,
  3167. .decrypt = aead_givdecrypt,
  3168. .ivsize = DES3_EDE_BLOCK_SIZE,
  3169. .maxauthsize = SHA256_DIGEST_SIZE,
  3170. },
  3171. .caam = {
  3172. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3173. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3174. OP_ALG_AAI_HMAC_PRECOMP,
  3175. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3176. .geniv = true,
  3177. },
  3178. },
  3179. {
  3180. .aead = {
  3181. .base = {
  3182. .cra_name = "authenc(hmac(sha384),"
  3183. "cbc(des3_ede))",
  3184. .cra_driver_name = "authenc-hmac-sha384-"
  3185. "cbc-des3_ede-caam",
  3186. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3187. },
  3188. .setkey = aead_setkey,
  3189. .setauthsize = aead_setauthsize,
  3190. .encrypt = aead_encrypt,
  3191. .decrypt = aead_decrypt,
  3192. .ivsize = DES3_EDE_BLOCK_SIZE,
  3193. .maxauthsize = SHA384_DIGEST_SIZE,
  3194. },
  3195. .caam = {
  3196. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3197. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3198. OP_ALG_AAI_HMAC_PRECOMP,
  3199. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3200. },
  3201. },
  3202. {
  3203. .aead = {
  3204. .base = {
  3205. .cra_name = "echainiv(authenc(hmac(sha384),"
  3206. "cbc(des3_ede)))",
  3207. .cra_driver_name = "echainiv-authenc-"
  3208. "hmac-sha384-"
  3209. "cbc-des3_ede-caam",
  3210. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3211. },
  3212. .setkey = aead_setkey,
  3213. .setauthsize = aead_setauthsize,
  3214. .encrypt = aead_encrypt,
  3215. .decrypt = aead_givdecrypt,
  3216. .ivsize = DES3_EDE_BLOCK_SIZE,
  3217. .maxauthsize = SHA384_DIGEST_SIZE,
  3218. },
  3219. .caam = {
  3220. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3221. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3222. OP_ALG_AAI_HMAC_PRECOMP,
  3223. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3224. .geniv = true,
  3225. },
  3226. },
  3227. {
  3228. .aead = {
  3229. .base = {
  3230. .cra_name = "authenc(hmac(sha512),"
  3231. "cbc(des3_ede))",
  3232. .cra_driver_name = "authenc-hmac-sha512-"
  3233. "cbc-des3_ede-caam",
  3234. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3235. },
  3236. .setkey = aead_setkey,
  3237. .setauthsize = aead_setauthsize,
  3238. .encrypt = aead_encrypt,
  3239. .decrypt = aead_decrypt,
  3240. .ivsize = DES3_EDE_BLOCK_SIZE,
  3241. .maxauthsize = SHA512_DIGEST_SIZE,
  3242. },
  3243. .caam = {
  3244. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3245. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3246. OP_ALG_AAI_HMAC_PRECOMP,
  3247. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3248. },
  3249. },
  3250. {
  3251. .aead = {
  3252. .base = {
  3253. .cra_name = "echainiv(authenc(hmac(sha512),"
  3254. "cbc(des3_ede)))",
  3255. .cra_driver_name = "echainiv-authenc-"
  3256. "hmac-sha512-"
  3257. "cbc-des3_ede-caam",
  3258. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3259. },
  3260. .setkey = aead_setkey,
  3261. .setauthsize = aead_setauthsize,
  3262. .encrypt = aead_encrypt,
  3263. .decrypt = aead_givdecrypt,
  3264. .ivsize = DES3_EDE_BLOCK_SIZE,
  3265. .maxauthsize = SHA512_DIGEST_SIZE,
  3266. },
  3267. .caam = {
  3268. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3269. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3270. OP_ALG_AAI_HMAC_PRECOMP,
  3271. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3272. .geniv = true,
  3273. },
  3274. },
  3275. {
  3276. .aead = {
  3277. .base = {
  3278. .cra_name = "authenc(hmac(md5),cbc(des))",
  3279. .cra_driver_name = "authenc-hmac-md5-"
  3280. "cbc-des-caam",
  3281. .cra_blocksize = DES_BLOCK_SIZE,
  3282. },
  3283. .setkey = aead_setkey,
  3284. .setauthsize = aead_setauthsize,
  3285. .encrypt = aead_encrypt,
  3286. .decrypt = aead_decrypt,
  3287. .ivsize = DES_BLOCK_SIZE,
  3288. .maxauthsize = MD5_DIGEST_SIZE,
  3289. },
  3290. .caam = {
  3291. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3292. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3293. OP_ALG_AAI_HMAC_PRECOMP,
  3294. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3295. },
  3296. },
  3297. {
  3298. .aead = {
  3299. .base = {
  3300. .cra_name = "echainiv(authenc(hmac(md5),"
  3301. "cbc(des)))",
  3302. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3303. "cbc-des-caam",
  3304. .cra_blocksize = DES_BLOCK_SIZE,
  3305. },
  3306. .setkey = aead_setkey,
  3307. .setauthsize = aead_setauthsize,
  3308. .encrypt = aead_encrypt,
  3309. .decrypt = aead_givdecrypt,
  3310. .ivsize = DES_BLOCK_SIZE,
  3311. .maxauthsize = MD5_DIGEST_SIZE,
  3312. },
  3313. .caam = {
  3314. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3315. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3316. OP_ALG_AAI_HMAC_PRECOMP,
  3317. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3318. .geniv = true,
  3319. },
  3320. },
  3321. {
  3322. .aead = {
  3323. .base = {
  3324. .cra_name = "authenc(hmac(sha1),cbc(des))",
  3325. .cra_driver_name = "authenc-hmac-sha1-"
  3326. "cbc-des-caam",
  3327. .cra_blocksize = DES_BLOCK_SIZE,
  3328. },
  3329. .setkey = aead_setkey,
  3330. .setauthsize = aead_setauthsize,
  3331. .encrypt = aead_encrypt,
  3332. .decrypt = aead_decrypt,
  3333. .ivsize = DES_BLOCK_SIZE,
  3334. .maxauthsize = SHA1_DIGEST_SIZE,
  3335. },
  3336. .caam = {
  3337. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3338. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3339. OP_ALG_AAI_HMAC_PRECOMP,
  3340. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3341. },
  3342. },
  3343. {
  3344. .aead = {
  3345. .base = {
  3346. .cra_name = "echainiv(authenc(hmac(sha1),"
  3347. "cbc(des)))",
  3348. .cra_driver_name = "echainiv-authenc-"
  3349. "hmac-sha1-cbc-des-caam",
  3350. .cra_blocksize = DES_BLOCK_SIZE,
  3351. },
  3352. .setkey = aead_setkey,
  3353. .setauthsize = aead_setauthsize,
  3354. .encrypt = aead_encrypt,
  3355. .decrypt = aead_givdecrypt,
  3356. .ivsize = DES_BLOCK_SIZE,
  3357. .maxauthsize = SHA1_DIGEST_SIZE,
  3358. },
  3359. .caam = {
  3360. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3361. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3362. OP_ALG_AAI_HMAC_PRECOMP,
  3363. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3364. .geniv = true,
  3365. },
  3366. },
  3367. {
  3368. .aead = {
  3369. .base = {
  3370. .cra_name = "authenc(hmac(sha224),cbc(des))",
  3371. .cra_driver_name = "authenc-hmac-sha224-"
  3372. "cbc-des-caam",
  3373. .cra_blocksize = DES_BLOCK_SIZE,
  3374. },
  3375. .setkey = aead_setkey,
  3376. .setauthsize = aead_setauthsize,
  3377. .encrypt = aead_encrypt,
  3378. .decrypt = aead_decrypt,
  3379. .ivsize = DES_BLOCK_SIZE,
  3380. .maxauthsize = SHA224_DIGEST_SIZE,
  3381. },
  3382. .caam = {
  3383. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3384. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3385. OP_ALG_AAI_HMAC_PRECOMP,
  3386. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3387. },
  3388. },
  3389. {
  3390. .aead = {
  3391. .base = {
  3392. .cra_name = "echainiv(authenc(hmac(sha224),"
  3393. "cbc(des)))",
  3394. .cra_driver_name = "echainiv-authenc-"
  3395. "hmac-sha224-cbc-des-caam",
  3396. .cra_blocksize = DES_BLOCK_SIZE,
  3397. },
  3398. .setkey = aead_setkey,
  3399. .setauthsize = aead_setauthsize,
  3400. .encrypt = aead_encrypt,
  3401. .decrypt = aead_givdecrypt,
  3402. .ivsize = DES_BLOCK_SIZE,
  3403. .maxauthsize = SHA224_DIGEST_SIZE,
  3404. },
  3405. .caam = {
  3406. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3407. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3408. OP_ALG_AAI_HMAC_PRECOMP,
  3409. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3410. .geniv = true,
  3411. },
  3412. },
  3413. {
  3414. .aead = {
  3415. .base = {
  3416. .cra_name = "authenc(hmac(sha256),cbc(des))",
  3417. .cra_driver_name = "authenc-hmac-sha256-"
  3418. "cbc-des-caam",
  3419. .cra_blocksize = DES_BLOCK_SIZE,
  3420. },
  3421. .setkey = aead_setkey,
  3422. .setauthsize = aead_setauthsize,
  3423. .encrypt = aead_encrypt,
  3424. .decrypt = aead_decrypt,
  3425. .ivsize = DES_BLOCK_SIZE,
  3426. .maxauthsize = SHA256_DIGEST_SIZE,
  3427. },
  3428. .caam = {
  3429. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3430. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3431. OP_ALG_AAI_HMAC_PRECOMP,
  3432. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3433. },
  3434. },
  3435. {
  3436. .aead = {
  3437. .base = {
  3438. .cra_name = "echainiv(authenc(hmac(sha256),"
  3439. "cbc(des)))",
  3440. .cra_driver_name = "echainiv-authenc-"
  3441. "hmac-sha256-cbc-des-caam",
  3442. .cra_blocksize = DES_BLOCK_SIZE,
  3443. },
  3444. .setkey = aead_setkey,
  3445. .setauthsize = aead_setauthsize,
  3446. .encrypt = aead_encrypt,
  3447. .decrypt = aead_givdecrypt,
  3448. .ivsize = DES_BLOCK_SIZE,
  3449. .maxauthsize = SHA256_DIGEST_SIZE,
  3450. },
  3451. .caam = {
  3452. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3453. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3454. OP_ALG_AAI_HMAC_PRECOMP,
  3455. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3456. .geniv = true,
  3457. },
  3458. },
  3459. {
  3460. .aead = {
  3461. .base = {
  3462. .cra_name = "authenc(hmac(sha384),cbc(des))",
  3463. .cra_driver_name = "authenc-hmac-sha384-"
  3464. "cbc-des-caam",
  3465. .cra_blocksize = DES_BLOCK_SIZE,
  3466. },
  3467. .setkey = aead_setkey,
  3468. .setauthsize = aead_setauthsize,
  3469. .encrypt = aead_encrypt,
  3470. .decrypt = aead_decrypt,
  3471. .ivsize = DES_BLOCK_SIZE,
  3472. .maxauthsize = SHA384_DIGEST_SIZE,
  3473. },
  3474. .caam = {
  3475. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3476. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3477. OP_ALG_AAI_HMAC_PRECOMP,
  3478. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3479. },
  3480. },
  3481. {
  3482. .aead = {
  3483. .base = {
  3484. .cra_name = "echainiv(authenc(hmac(sha384),"
  3485. "cbc(des)))",
  3486. .cra_driver_name = "echainiv-authenc-"
  3487. "hmac-sha384-cbc-des-caam",
  3488. .cra_blocksize = DES_BLOCK_SIZE,
  3489. },
  3490. .setkey = aead_setkey,
  3491. .setauthsize = aead_setauthsize,
  3492. .encrypt = aead_encrypt,
  3493. .decrypt = aead_givdecrypt,
  3494. .ivsize = DES_BLOCK_SIZE,
  3495. .maxauthsize = SHA384_DIGEST_SIZE,
  3496. },
  3497. .caam = {
  3498. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3499. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3500. OP_ALG_AAI_HMAC_PRECOMP,
  3501. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3502. .geniv = true,
  3503. },
  3504. },
  3505. {
  3506. .aead = {
  3507. .base = {
  3508. .cra_name = "authenc(hmac(sha512),cbc(des))",
  3509. .cra_driver_name = "authenc-hmac-sha512-"
  3510. "cbc-des-caam",
  3511. .cra_blocksize = DES_BLOCK_SIZE,
  3512. },
  3513. .setkey = aead_setkey,
  3514. .setauthsize = aead_setauthsize,
  3515. .encrypt = aead_encrypt,
  3516. .decrypt = aead_decrypt,
  3517. .ivsize = DES_BLOCK_SIZE,
  3518. .maxauthsize = SHA512_DIGEST_SIZE,
  3519. },
  3520. .caam = {
  3521. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3522. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3523. OP_ALG_AAI_HMAC_PRECOMP,
  3524. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3525. },
  3526. },
  3527. {
  3528. .aead = {
  3529. .base = {
  3530. .cra_name = "echainiv(authenc(hmac(sha512),"
  3531. "cbc(des)))",
  3532. .cra_driver_name = "echainiv-authenc-"
  3533. "hmac-sha512-cbc-des-caam",
  3534. .cra_blocksize = DES_BLOCK_SIZE,
  3535. },
  3536. .setkey = aead_setkey,
  3537. .setauthsize = aead_setauthsize,
  3538. .encrypt = aead_encrypt,
  3539. .decrypt = aead_givdecrypt,
  3540. .ivsize = DES_BLOCK_SIZE,
  3541. .maxauthsize = SHA512_DIGEST_SIZE,
  3542. },
  3543. .caam = {
  3544. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3545. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3546. OP_ALG_AAI_HMAC_PRECOMP,
  3547. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3548. .geniv = true,
  3549. },
  3550. },
  3551. {
  3552. .aead = {
  3553. .base = {
  3554. .cra_name = "authenc(hmac(md5),"
  3555. "rfc3686(ctr(aes)))",
  3556. .cra_driver_name = "authenc-hmac-md5-"
  3557. "rfc3686-ctr-aes-caam",
  3558. .cra_blocksize = 1,
  3559. },
  3560. .setkey = aead_setkey,
  3561. .setauthsize = aead_setauthsize,
  3562. .encrypt = aead_encrypt,
  3563. .decrypt = aead_decrypt,
  3564. .ivsize = CTR_RFC3686_IV_SIZE,
  3565. .maxauthsize = MD5_DIGEST_SIZE,
  3566. },
  3567. .caam = {
  3568. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3569. OP_ALG_AAI_CTR_MOD128,
  3570. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3571. OP_ALG_AAI_HMAC_PRECOMP,
  3572. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3573. .rfc3686 = true,
  3574. },
  3575. },
  3576. {
  3577. .aead = {
  3578. .base = {
  3579. .cra_name = "seqiv(authenc("
  3580. "hmac(md5),rfc3686(ctr(aes))))",
  3581. .cra_driver_name = "seqiv-authenc-hmac-md5-"
  3582. "rfc3686-ctr-aes-caam",
  3583. .cra_blocksize = 1,
  3584. },
  3585. .setkey = aead_setkey,
  3586. .setauthsize = aead_setauthsize,
  3587. .encrypt = aead_encrypt,
  3588. .decrypt = aead_givdecrypt,
  3589. .ivsize = CTR_RFC3686_IV_SIZE,
  3590. .maxauthsize = MD5_DIGEST_SIZE,
  3591. },
  3592. .caam = {
  3593. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3594. OP_ALG_AAI_CTR_MOD128,
  3595. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3596. OP_ALG_AAI_HMAC_PRECOMP,
  3597. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3598. .rfc3686 = true,
  3599. .geniv = true,
  3600. },
  3601. },
  3602. {
  3603. .aead = {
  3604. .base = {
  3605. .cra_name = "authenc(hmac(sha1),"
  3606. "rfc3686(ctr(aes)))",
  3607. .cra_driver_name = "authenc-hmac-sha1-"
  3608. "rfc3686-ctr-aes-caam",
  3609. .cra_blocksize = 1,
  3610. },
  3611. .setkey = aead_setkey,
  3612. .setauthsize = aead_setauthsize,
  3613. .encrypt = aead_encrypt,
  3614. .decrypt = aead_decrypt,
  3615. .ivsize = CTR_RFC3686_IV_SIZE,
  3616. .maxauthsize = SHA1_DIGEST_SIZE,
  3617. },
  3618. .caam = {
  3619. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3620. OP_ALG_AAI_CTR_MOD128,
  3621. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3622. OP_ALG_AAI_HMAC_PRECOMP,
  3623. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3624. .rfc3686 = true,
  3625. },
  3626. },
  3627. {
  3628. .aead = {
  3629. .base = {
  3630. .cra_name = "seqiv(authenc("
  3631. "hmac(sha1),rfc3686(ctr(aes))))",
  3632. .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  3633. "rfc3686-ctr-aes-caam",
  3634. .cra_blocksize = 1,
  3635. },
  3636. .setkey = aead_setkey,
  3637. .setauthsize = aead_setauthsize,
  3638. .encrypt = aead_encrypt,
  3639. .decrypt = aead_givdecrypt,
  3640. .ivsize = CTR_RFC3686_IV_SIZE,
  3641. .maxauthsize = SHA1_DIGEST_SIZE,
  3642. },
  3643. .caam = {
  3644. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3645. OP_ALG_AAI_CTR_MOD128,
  3646. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3647. OP_ALG_AAI_HMAC_PRECOMP,
  3648. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3649. .rfc3686 = true,
  3650. .geniv = true,
  3651. },
  3652. },
  3653. {
  3654. .aead = {
  3655. .base = {
  3656. .cra_name = "authenc(hmac(sha224),"
  3657. "rfc3686(ctr(aes)))",
  3658. .cra_driver_name = "authenc-hmac-sha224-"
  3659. "rfc3686-ctr-aes-caam",
  3660. .cra_blocksize = 1,
  3661. },
  3662. .setkey = aead_setkey,
  3663. .setauthsize = aead_setauthsize,
  3664. .encrypt = aead_encrypt,
  3665. .decrypt = aead_decrypt,
  3666. .ivsize = CTR_RFC3686_IV_SIZE,
  3667. .maxauthsize = SHA224_DIGEST_SIZE,
  3668. },
  3669. .caam = {
  3670. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3671. OP_ALG_AAI_CTR_MOD128,
  3672. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3673. OP_ALG_AAI_HMAC_PRECOMP,
  3674. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3675. .rfc3686 = true,
  3676. },
  3677. },
  3678. {
  3679. .aead = {
  3680. .base = {
  3681. .cra_name = "seqiv(authenc("
  3682. "hmac(sha224),rfc3686(ctr(aes))))",
  3683. .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  3684. "rfc3686-ctr-aes-caam",
  3685. .cra_blocksize = 1,
  3686. },
  3687. .setkey = aead_setkey,
  3688. .setauthsize = aead_setauthsize,
  3689. .encrypt = aead_encrypt,
  3690. .decrypt = aead_givdecrypt,
  3691. .ivsize = CTR_RFC3686_IV_SIZE,
  3692. .maxauthsize = SHA224_DIGEST_SIZE,
  3693. },
  3694. .caam = {
  3695. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3696. OP_ALG_AAI_CTR_MOD128,
  3697. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3698. OP_ALG_AAI_HMAC_PRECOMP,
  3699. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3700. .rfc3686 = true,
  3701. .geniv = true,
  3702. },
  3703. },
  3704. {
  3705. .aead = {
  3706. .base = {
  3707. .cra_name = "authenc(hmac(sha256),"
  3708. "rfc3686(ctr(aes)))",
  3709. .cra_driver_name = "authenc-hmac-sha256-"
  3710. "rfc3686-ctr-aes-caam",
  3711. .cra_blocksize = 1,
  3712. },
  3713. .setkey = aead_setkey,
  3714. .setauthsize = aead_setauthsize,
  3715. .encrypt = aead_encrypt,
  3716. .decrypt = aead_decrypt,
  3717. .ivsize = CTR_RFC3686_IV_SIZE,
  3718. .maxauthsize = SHA256_DIGEST_SIZE,
  3719. },
  3720. .caam = {
  3721. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3722. OP_ALG_AAI_CTR_MOD128,
  3723. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3724. OP_ALG_AAI_HMAC_PRECOMP,
  3725. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3726. .rfc3686 = true,
  3727. },
  3728. },
  3729. {
  3730. .aead = {
  3731. .base = {
  3732. .cra_name = "seqiv(authenc(hmac(sha256),"
  3733. "rfc3686(ctr(aes))))",
  3734. .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  3735. "rfc3686-ctr-aes-caam",
  3736. .cra_blocksize = 1,
  3737. },
  3738. .setkey = aead_setkey,
  3739. .setauthsize = aead_setauthsize,
  3740. .encrypt = aead_encrypt,
  3741. .decrypt = aead_givdecrypt,
  3742. .ivsize = CTR_RFC3686_IV_SIZE,
  3743. .maxauthsize = SHA256_DIGEST_SIZE,
  3744. },
  3745. .caam = {
  3746. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3747. OP_ALG_AAI_CTR_MOD128,
  3748. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3749. OP_ALG_AAI_HMAC_PRECOMP,
  3750. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3751. .rfc3686 = true,
  3752. .geniv = true,
  3753. },
  3754. },
  3755. {
  3756. .aead = {
  3757. .base = {
  3758. .cra_name = "authenc(hmac(sha384),"
  3759. "rfc3686(ctr(aes)))",
  3760. .cra_driver_name = "authenc-hmac-sha384-"
  3761. "rfc3686-ctr-aes-caam",
  3762. .cra_blocksize = 1,
  3763. },
  3764. .setkey = aead_setkey,
  3765. .setauthsize = aead_setauthsize,
  3766. .encrypt = aead_encrypt,
  3767. .decrypt = aead_decrypt,
  3768. .ivsize = CTR_RFC3686_IV_SIZE,
  3769. .maxauthsize = SHA384_DIGEST_SIZE,
  3770. },
  3771. .caam = {
  3772. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3773. OP_ALG_AAI_CTR_MOD128,
  3774. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3775. OP_ALG_AAI_HMAC_PRECOMP,
  3776. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3777. .rfc3686 = true,
  3778. },
  3779. },
  3780. {
  3781. .aead = {
  3782. .base = {
  3783. .cra_name = "seqiv(authenc(hmac(sha384),"
  3784. "rfc3686(ctr(aes))))",
  3785. .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  3786. "rfc3686-ctr-aes-caam",
  3787. .cra_blocksize = 1,
  3788. },
  3789. .setkey = aead_setkey,
  3790. .setauthsize = aead_setauthsize,
  3791. .encrypt = aead_encrypt,
  3792. .decrypt = aead_givdecrypt,
  3793. .ivsize = CTR_RFC3686_IV_SIZE,
  3794. .maxauthsize = SHA384_DIGEST_SIZE,
  3795. },
  3796. .caam = {
  3797. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3798. OP_ALG_AAI_CTR_MOD128,
  3799. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3800. OP_ALG_AAI_HMAC_PRECOMP,
  3801. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3802. .rfc3686 = true,
  3803. .geniv = true,
  3804. },
  3805. },
  3806. {
  3807. .aead = {
  3808. .base = {
  3809. .cra_name = "authenc(hmac(sha512),"
  3810. "rfc3686(ctr(aes)))",
  3811. .cra_driver_name = "authenc-hmac-sha512-"
  3812. "rfc3686-ctr-aes-caam",
  3813. .cra_blocksize = 1,
  3814. },
  3815. .setkey = aead_setkey,
  3816. .setauthsize = aead_setauthsize,
  3817. .encrypt = aead_encrypt,
  3818. .decrypt = aead_decrypt,
  3819. .ivsize = CTR_RFC3686_IV_SIZE,
  3820. .maxauthsize = SHA512_DIGEST_SIZE,
  3821. },
  3822. .caam = {
  3823. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3824. OP_ALG_AAI_CTR_MOD128,
  3825. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3826. OP_ALG_AAI_HMAC_PRECOMP,
  3827. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3828. .rfc3686 = true,
  3829. },
  3830. },
  3831. {
  3832. .aead = {
  3833. .base = {
  3834. .cra_name = "seqiv(authenc(hmac(sha512),"
  3835. "rfc3686(ctr(aes))))",
  3836. .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  3837. "rfc3686-ctr-aes-caam",
  3838. .cra_blocksize = 1,
  3839. },
  3840. .setkey = aead_setkey,
  3841. .setauthsize = aead_setauthsize,
  3842. .encrypt = aead_encrypt,
  3843. .decrypt = aead_givdecrypt,
  3844. .ivsize = CTR_RFC3686_IV_SIZE,
  3845. .maxauthsize = SHA512_DIGEST_SIZE,
  3846. },
  3847. .caam = {
  3848. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3849. OP_ALG_AAI_CTR_MOD128,
  3850. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3851. OP_ALG_AAI_HMAC_PRECOMP,
  3852. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3853. .rfc3686 = true,
  3854. .geniv = true,
  3855. },
  3856. },
  3857. };
  3858. struct caam_crypto_alg {
  3859. struct crypto_alg crypto_alg;
  3860. struct list_head entry;
  3861. struct caam_alg_entry caam;
  3862. };
  3863. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  3864. {
  3865. ctx->jrdev = caam_jr_alloc();
  3866. if (IS_ERR(ctx->jrdev)) {
  3867. pr_err("Job Ring Device allocation for transform failed\n");
  3868. return PTR_ERR(ctx->jrdev);
  3869. }
  3870. /* copy descriptor header template value */
  3871. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3872. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3873. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
  3874. return 0;
  3875. }
  3876. static int caam_cra_init(struct crypto_tfm *tfm)
  3877. {
  3878. struct crypto_alg *alg = tfm->__crt_alg;
  3879. struct caam_crypto_alg *caam_alg =
  3880. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3881. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3882. return caam_init_common(ctx, &caam_alg->caam);
  3883. }
  3884. static int caam_aead_init(struct crypto_aead *tfm)
  3885. {
  3886. struct aead_alg *alg = crypto_aead_alg(tfm);
  3887. struct caam_aead_alg *caam_alg =
  3888. container_of(alg, struct caam_aead_alg, aead);
  3889. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3890. return caam_init_common(ctx, &caam_alg->caam);
  3891. }
  3892. static void caam_exit_common(struct caam_ctx *ctx)
  3893. {
  3894. if (ctx->sh_desc_enc_dma &&
  3895. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  3896. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  3897. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  3898. if (ctx->sh_desc_dec_dma &&
  3899. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  3900. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  3901. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  3902. if (ctx->sh_desc_givenc_dma &&
  3903. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  3904. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  3905. desc_bytes(ctx->sh_desc_givenc),
  3906. DMA_TO_DEVICE);
  3907. if (ctx->key_dma &&
  3908. !dma_mapping_error(ctx->jrdev, ctx->key_dma))
  3909. dma_unmap_single(ctx->jrdev, ctx->key_dma,
  3910. ctx->enckeylen + ctx->split_key_pad_len,
  3911. DMA_TO_DEVICE);
  3912. caam_jr_free(ctx->jrdev);
  3913. }
  3914. static void caam_cra_exit(struct crypto_tfm *tfm)
  3915. {
  3916. caam_exit_common(crypto_tfm_ctx(tfm));
  3917. }
  3918. static void caam_aead_exit(struct crypto_aead *tfm)
  3919. {
  3920. caam_exit_common(crypto_aead_ctx(tfm));
  3921. }
  3922. static void __exit caam_algapi_exit(void)
  3923. {
  3924. struct caam_crypto_alg *t_alg, *n;
  3925. int i;
  3926. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3927. struct caam_aead_alg *t_alg = driver_aeads + i;
  3928. if (t_alg->registered)
  3929. crypto_unregister_aead(&t_alg->aead);
  3930. }
  3931. if (!alg_list.next)
  3932. return;
  3933. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3934. crypto_unregister_alg(&t_alg->crypto_alg);
  3935. list_del(&t_alg->entry);
  3936. kfree(t_alg);
  3937. }
  3938. }
  3939. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3940. *template)
  3941. {
  3942. struct caam_crypto_alg *t_alg;
  3943. struct crypto_alg *alg;
  3944. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  3945. if (!t_alg) {
  3946. pr_err("failed to allocate t_alg\n");
  3947. return ERR_PTR(-ENOMEM);
  3948. }
  3949. alg = &t_alg->crypto_alg;
  3950. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3951. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  3952. template->driver_name);
  3953. alg->cra_module = THIS_MODULE;
  3954. alg->cra_init = caam_cra_init;
  3955. alg->cra_exit = caam_cra_exit;
  3956. alg->cra_priority = CAAM_CRA_PRIORITY;
  3957. alg->cra_blocksize = template->blocksize;
  3958. alg->cra_alignmask = 0;
  3959. alg->cra_ctxsize = sizeof(struct caam_ctx);
  3960. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  3961. template->type;
  3962. switch (template->type) {
  3963. case CRYPTO_ALG_TYPE_GIVCIPHER:
  3964. alg->cra_type = &crypto_givcipher_type;
  3965. alg->cra_ablkcipher = template->template_ablkcipher;
  3966. break;
  3967. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3968. alg->cra_type = &crypto_ablkcipher_type;
  3969. alg->cra_ablkcipher = template->template_ablkcipher;
  3970. break;
  3971. }
  3972. t_alg->caam.class1_alg_type = template->class1_alg_type;
  3973. t_alg->caam.class2_alg_type = template->class2_alg_type;
  3974. t_alg->caam.alg_op = template->alg_op;
  3975. return t_alg;
  3976. }
  3977. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  3978. {
  3979. struct aead_alg *alg = &t_alg->aead;
  3980. alg->base.cra_module = THIS_MODULE;
  3981. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  3982. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  3983. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  3984. alg->init = caam_aead_init;
  3985. alg->exit = caam_aead_exit;
  3986. }
  3987. static int __init caam_algapi_init(void)
  3988. {
  3989. struct device_node *dev_node;
  3990. struct platform_device *pdev;
  3991. struct device *ctrldev;
  3992. struct caam_drv_private *priv;
  3993. int i = 0, err = 0;
  3994. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  3995. unsigned int md_limit = SHA512_DIGEST_SIZE;
  3996. bool registered = false;
  3997. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  3998. if (!dev_node) {
  3999. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  4000. if (!dev_node)
  4001. return -ENODEV;
  4002. }
  4003. pdev = of_find_device_by_node(dev_node);
  4004. if (!pdev) {
  4005. of_node_put(dev_node);
  4006. return -ENODEV;
  4007. }
  4008. ctrldev = &pdev->dev;
  4009. priv = dev_get_drvdata(ctrldev);
  4010. of_node_put(dev_node);
  4011. /*
  4012. * If priv is NULL, it's probably because the caam driver wasn't
  4013. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  4014. */
  4015. if (!priv)
  4016. return -ENODEV;
  4017. INIT_LIST_HEAD(&alg_list);
  4018. /*
  4019. * Register crypto algorithms the device supports.
  4020. * First, detect presence and attributes of DES, AES, and MD blocks.
  4021. */
  4022. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  4023. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  4024. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  4025. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  4026. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  4027. /* If MD is present, limit digest size based on LP256 */
  4028. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  4029. md_limit = SHA256_DIGEST_SIZE;
  4030. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  4031. struct caam_crypto_alg *t_alg;
  4032. struct caam_alg_template *alg = driver_algs + i;
  4033. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  4034. /* Skip DES algorithms if not supported by device */
  4035. if (!des_inst &&
  4036. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  4037. (alg_sel == OP_ALG_ALGSEL_DES)))
  4038. continue;
  4039. /* Skip AES algorithms if not supported by device */
  4040. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  4041. continue;
  4042. t_alg = caam_alg_alloc(alg);
  4043. if (IS_ERR(t_alg)) {
  4044. err = PTR_ERR(t_alg);
  4045. pr_warn("%s alg allocation failed\n", alg->driver_name);
  4046. continue;
  4047. }
  4048. err = crypto_register_alg(&t_alg->crypto_alg);
  4049. if (err) {
  4050. pr_warn("%s alg registration failed\n",
  4051. t_alg->crypto_alg.cra_driver_name);
  4052. kfree(t_alg);
  4053. continue;
  4054. }
  4055. list_add_tail(&t_alg->entry, &alg_list);
  4056. registered = true;
  4057. }
  4058. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  4059. struct caam_aead_alg *t_alg = driver_aeads + i;
  4060. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  4061. OP_ALG_ALGSEL_MASK;
  4062. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  4063. OP_ALG_ALGSEL_MASK;
  4064. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  4065. /* Skip DES algorithms if not supported by device */
  4066. if (!des_inst &&
  4067. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  4068. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  4069. continue;
  4070. /* Skip AES algorithms if not supported by device */
  4071. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  4072. continue;
  4073. /*
  4074. * Check support for AES algorithms not available
  4075. * on LP devices.
  4076. */
  4077. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  4078. if (alg_aai == OP_ALG_AAI_GCM)
  4079. continue;
  4080. /*
  4081. * Skip algorithms requiring message digests
  4082. * if MD or MD size is not supported by device.
  4083. */
  4084. if (c2_alg_sel &&
  4085. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  4086. continue;
  4087. caam_aead_alg_init(t_alg);
  4088. err = crypto_register_aead(&t_alg->aead);
  4089. if (err) {
  4090. pr_warn("%s alg registration failed\n",
  4091. t_alg->aead.base.cra_driver_name);
  4092. continue;
  4093. }
  4094. t_alg->registered = true;
  4095. registered = true;
  4096. }
  4097. if (registered)
  4098. pr_info("caam algorithms registered in /proc/crypto\n");
  4099. return err;
  4100. }
  4101. module_init(caam_algapi_init);
  4102. module_exit(caam_algapi_exit);
  4103. MODULE_LICENSE("GPL");
  4104. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  4105. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");