caamalg.c 125 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. CTR_RFC3686_NONCE_SIZE + \
  61. SHA512_DIGEST_SIZE * 2)
  62. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  63. #define CAAM_MAX_IV_LENGTH 16
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  68. CAAM_CMD_SZ * 5)
  69. /* length of descriptors text */
  70. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  71. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
  72. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
  73. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
  74. /* Note: Nonce is counted in enckeylen */
  75. #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
  76. #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
  77. #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
  78. #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
  79. #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
  80. #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
  81. #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
  82. #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
  83. #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  84. #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  85. #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
  86. #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
  87. #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
  88. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  89. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  90. 20 * CAAM_CMD_SZ)
  91. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  92. 15 * CAAM_CMD_SZ)
  93. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  94. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  95. #ifdef DEBUG
  96. /* for print_hex_dumps with line references */
  97. #define debug(format, arg...) printk(format, arg)
  98. #else
  99. #define debug(format, arg...)
  100. #endif
  101. static struct list_head alg_list;
  102. struct caam_alg_entry {
  103. int class1_alg_type;
  104. int class2_alg_type;
  105. int alg_op;
  106. bool rfc3686;
  107. bool geniv;
  108. };
  109. struct caam_aead_alg {
  110. struct aead_alg aead;
  111. struct caam_alg_entry caam;
  112. bool registered;
  113. };
  114. /* Set DK bit in class 1 operation if shared */
  115. static inline void append_dec_op1(u32 *desc, u32 type)
  116. {
  117. u32 *jump_cmd, *uncond_jump_cmd;
  118. /* DK bit is valid only for AES */
  119. if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
  120. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  121. OP_ALG_DECRYPT);
  122. return;
  123. }
  124. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  125. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  126. OP_ALG_DECRYPT);
  127. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  128. set_jump_tgt_here(desc, jump_cmd);
  129. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  130. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  131. set_jump_tgt_here(desc, uncond_jump_cmd);
  132. }
  133. /*
  134. * For aead functions, read payload and write payload,
  135. * both of which are specified in req->src and req->dst
  136. */
  137. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  138. {
  139. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  140. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  141. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  142. }
  143. /*
  144. * For ablkcipher encrypt and decrypt, read from req->src and
  145. * write to req->dst
  146. */
  147. static inline void ablkcipher_append_src_dst(u32 *desc)
  148. {
  149. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  150. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  151. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  152. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  153. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  154. }
  155. /*
  156. * per-session context
  157. */
  158. struct caam_ctx {
  159. struct device *jrdev;
  160. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  161. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  162. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  163. dma_addr_t sh_desc_enc_dma;
  164. dma_addr_t sh_desc_dec_dma;
  165. dma_addr_t sh_desc_givenc_dma;
  166. u32 class1_alg_type;
  167. u32 class2_alg_type;
  168. u32 alg_op;
  169. u8 key[CAAM_MAX_KEY_SIZE];
  170. dma_addr_t key_dma;
  171. unsigned int enckeylen;
  172. unsigned int split_key_len;
  173. unsigned int split_key_pad_len;
  174. unsigned int authsize;
  175. };
  176. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  177. int keys_fit_inline, bool is_rfc3686)
  178. {
  179. u32 *nonce;
  180. unsigned int enckeylen = ctx->enckeylen;
  181. /*
  182. * RFC3686 specific:
  183. * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
  184. * | enckeylen = encryption key size + nonce size
  185. */
  186. if (is_rfc3686)
  187. enckeylen -= CTR_RFC3686_NONCE_SIZE;
  188. if (keys_fit_inline) {
  189. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  190. ctx->split_key_len, CLASS_2 |
  191. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  192. append_key_as_imm(desc, (void *)ctx->key +
  193. ctx->split_key_pad_len, enckeylen,
  194. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  195. } else {
  196. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  197. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  198. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  199. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  200. }
  201. /* Load Counter into CONTEXT1 reg */
  202. if (is_rfc3686) {
  203. nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
  204. enckeylen);
  205. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  206. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  207. append_move(desc,
  208. MOVE_SRC_OUTFIFO |
  209. MOVE_DEST_CLASS1CTX |
  210. (16 << MOVE_OFFSET_SHIFT) |
  211. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  212. }
  213. }
  214. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  215. int keys_fit_inline, bool is_rfc3686)
  216. {
  217. u32 *key_jump_cmd;
  218. /* Note: Context registers are saved. */
  219. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  220. /* Skip if already shared */
  221. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  222. JUMP_COND_SHRD);
  223. append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  224. set_jump_tgt_here(desc, key_jump_cmd);
  225. }
  226. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  227. {
  228. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  229. struct device *jrdev = ctx->jrdev;
  230. bool keys_fit_inline = false;
  231. u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
  232. u32 *desc;
  233. /*
  234. * Job Descriptor and Shared Descriptors
  235. * must all fit into the 64-word Descriptor h/w Buffer
  236. */
  237. if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
  238. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  239. keys_fit_inline = true;
  240. /* aead_encrypt shared descriptor */
  241. desc = ctx->sh_desc_enc;
  242. init_sh_desc(desc, HDR_SHARE_SERIAL);
  243. /* Skip if already shared */
  244. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  245. JUMP_COND_SHRD);
  246. if (keys_fit_inline)
  247. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  248. ctx->split_key_len, CLASS_2 |
  249. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  250. else
  251. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  252. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  253. set_jump_tgt_here(desc, key_jump_cmd);
  254. /* assoclen + cryptlen = seqinlen */
  255. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  256. /* Prepare to read and write cryptlen + assoclen bytes */
  257. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  258. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  259. /*
  260. * MOVE_LEN opcode is not available in all SEC HW revisions,
  261. * thus need to do some magic, i.e. self-patch the descriptor
  262. * buffer.
  263. */
  264. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  265. MOVE_DEST_MATH3 |
  266. (0x6 << MOVE_LEN_SHIFT));
  267. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
  268. MOVE_DEST_DESCBUF |
  269. MOVE_WAITCOMP |
  270. (0x8 << MOVE_LEN_SHIFT));
  271. /* Class 2 operation */
  272. append_operation(desc, ctx->class2_alg_type |
  273. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  274. /* Read and write cryptlen bytes */
  275. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  276. set_move_tgt_here(desc, read_move_cmd);
  277. set_move_tgt_here(desc, write_move_cmd);
  278. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  279. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  280. MOVE_AUX_LS);
  281. /* Write ICV */
  282. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  283. LDST_SRCDST_BYTE_CONTEXT);
  284. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  285. desc_bytes(desc),
  286. DMA_TO_DEVICE);
  287. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  288. dev_err(jrdev, "unable to map shared descriptor\n");
  289. return -ENOMEM;
  290. }
  291. #ifdef DEBUG
  292. print_hex_dump(KERN_ERR,
  293. "aead null enc shdesc@"__stringify(__LINE__)": ",
  294. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  295. desc_bytes(desc), 1);
  296. #endif
  297. /*
  298. * Job Descriptor and Shared Descriptors
  299. * must all fit into the 64-word Descriptor h/w Buffer
  300. */
  301. keys_fit_inline = false;
  302. if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
  303. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  304. keys_fit_inline = true;
  305. desc = ctx->sh_desc_dec;
  306. /* aead_decrypt shared descriptor */
  307. init_sh_desc(desc, HDR_SHARE_SERIAL);
  308. /* Skip if already shared */
  309. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  310. JUMP_COND_SHRD);
  311. if (keys_fit_inline)
  312. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  313. ctx->split_key_len, CLASS_2 |
  314. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  315. else
  316. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  317. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  318. set_jump_tgt_here(desc, key_jump_cmd);
  319. /* Class 2 operation */
  320. append_operation(desc, ctx->class2_alg_type |
  321. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  322. /* assoclen + cryptlen = seqoutlen */
  323. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  324. /* Prepare to read and write cryptlen + assoclen bytes */
  325. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  326. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  327. /*
  328. * MOVE_LEN opcode is not available in all SEC HW revisions,
  329. * thus need to do some magic, i.e. self-patch the descriptor
  330. * buffer.
  331. */
  332. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  333. MOVE_DEST_MATH2 |
  334. (0x6 << MOVE_LEN_SHIFT));
  335. write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
  336. MOVE_DEST_DESCBUF |
  337. MOVE_WAITCOMP |
  338. (0x8 << MOVE_LEN_SHIFT));
  339. /* Read and write cryptlen bytes */
  340. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  341. /*
  342. * Insert a NOP here, since we need at least 4 instructions between
  343. * code patching the descriptor buffer and the location being patched.
  344. */
  345. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  346. set_jump_tgt_here(desc, jump_cmd);
  347. set_move_tgt_here(desc, read_move_cmd);
  348. set_move_tgt_here(desc, write_move_cmd);
  349. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  350. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  351. MOVE_AUX_LS);
  352. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  353. /* Load ICV */
  354. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  355. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  356. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  357. desc_bytes(desc),
  358. DMA_TO_DEVICE);
  359. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  360. dev_err(jrdev, "unable to map shared descriptor\n");
  361. return -ENOMEM;
  362. }
  363. #ifdef DEBUG
  364. print_hex_dump(KERN_ERR,
  365. "aead null dec shdesc@"__stringify(__LINE__)": ",
  366. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  367. desc_bytes(desc), 1);
  368. #endif
  369. return 0;
  370. }
  371. static int aead_set_sh_desc(struct crypto_aead *aead)
  372. {
  373. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  374. struct caam_aead_alg, aead);
  375. unsigned int ivsize = crypto_aead_ivsize(aead);
  376. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  377. struct device *jrdev = ctx->jrdev;
  378. bool keys_fit_inline;
  379. u32 geniv, moveiv;
  380. u32 ctx1_iv_off = 0;
  381. u32 *desc;
  382. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  383. OP_ALG_AAI_CTR_MOD128);
  384. const bool is_rfc3686 = alg->caam.rfc3686;
  385. /* NULL encryption / decryption */
  386. if (!ctx->enckeylen)
  387. return aead_null_set_sh_desc(aead);
  388. /*
  389. * AES-CTR needs to load IV in CONTEXT1 reg
  390. * at an offset of 128bits (16bytes)
  391. * CONTEXT1[255:128] = IV
  392. */
  393. if (ctr_mode)
  394. ctx1_iv_off = 16;
  395. /*
  396. * RFC3686 specific:
  397. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  398. */
  399. if (is_rfc3686)
  400. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  401. if (alg->caam.geniv)
  402. goto skip_enc;
  403. /*
  404. * Job Descriptor and Shared Descriptors
  405. * must all fit into the 64-word Descriptor h/w Buffer
  406. */
  407. keys_fit_inline = false;
  408. if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  409. ctx->split_key_pad_len + ctx->enckeylen +
  410. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  411. CAAM_DESC_BYTES_MAX)
  412. keys_fit_inline = true;
  413. /* aead_encrypt shared descriptor */
  414. desc = ctx->sh_desc_enc;
  415. /* Note: Context registers are saved. */
  416. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  417. /* Class 2 operation */
  418. append_operation(desc, ctx->class2_alg_type |
  419. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  420. /* Read and write assoclen bytes */
  421. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  422. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  423. /* Skip assoc data */
  424. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  425. /* read assoc before reading payload */
  426. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  427. FIFOLDST_VLF);
  428. /* Load Counter into CONTEXT1 reg */
  429. if (is_rfc3686)
  430. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  431. LDST_CLASS_1_CCB |
  432. LDST_SRCDST_BYTE_CONTEXT |
  433. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  434. LDST_OFFSET_SHIFT));
  435. /* Class 1 operation */
  436. append_operation(desc, ctx->class1_alg_type |
  437. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  438. /* Read and write cryptlen bytes */
  439. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  440. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  441. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  442. /* Write ICV */
  443. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  444. LDST_SRCDST_BYTE_CONTEXT);
  445. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  446. desc_bytes(desc),
  447. DMA_TO_DEVICE);
  448. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  449. dev_err(jrdev, "unable to map shared descriptor\n");
  450. return -ENOMEM;
  451. }
  452. #ifdef DEBUG
  453. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  454. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  455. desc_bytes(desc), 1);
  456. #endif
  457. skip_enc:
  458. /*
  459. * Job Descriptor and Shared Descriptors
  460. * must all fit into the 64-word Descriptor h/w Buffer
  461. */
  462. keys_fit_inline = false;
  463. if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  464. ctx->split_key_pad_len + ctx->enckeylen +
  465. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  466. CAAM_DESC_BYTES_MAX)
  467. keys_fit_inline = true;
  468. /* aead_decrypt shared descriptor */
  469. desc = ctx->sh_desc_dec;
  470. /* Note: Context registers are saved. */
  471. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  472. /* Class 2 operation */
  473. append_operation(desc, ctx->class2_alg_type |
  474. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  475. /* Read and write assoclen bytes */
  476. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  477. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  478. /* Skip assoc data */
  479. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  480. /* read assoc before reading payload */
  481. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  482. KEY_VLF);
  483. /* Load Counter into CONTEXT1 reg */
  484. if (is_rfc3686)
  485. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  486. LDST_CLASS_1_CCB |
  487. LDST_SRCDST_BYTE_CONTEXT |
  488. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  489. LDST_OFFSET_SHIFT));
  490. /* Choose operation */
  491. if (ctr_mode)
  492. append_operation(desc, ctx->class1_alg_type |
  493. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  494. else
  495. append_dec_op1(desc, ctx->class1_alg_type);
  496. /* Read and write cryptlen bytes */
  497. append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  498. append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  499. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  500. /* Load ICV */
  501. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  502. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  503. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  504. desc_bytes(desc),
  505. DMA_TO_DEVICE);
  506. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  507. dev_err(jrdev, "unable to map shared descriptor\n");
  508. return -ENOMEM;
  509. }
  510. #ifdef DEBUG
  511. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  512. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  513. desc_bytes(desc), 1);
  514. #endif
  515. if (!alg->caam.geniv)
  516. goto skip_givenc;
  517. /*
  518. * Job Descriptor and Shared Descriptors
  519. * must all fit into the 64-word Descriptor h/w Buffer
  520. */
  521. keys_fit_inline = false;
  522. if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  523. ctx->split_key_pad_len + ctx->enckeylen +
  524. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  525. CAAM_DESC_BYTES_MAX)
  526. keys_fit_inline = true;
  527. /* aead_givencrypt shared descriptor */
  528. desc = ctx->sh_desc_givenc;
  529. /* Note: Context registers are saved. */
  530. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  531. if (is_rfc3686)
  532. goto copy_iv;
  533. /* Generate IV */
  534. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  535. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  536. NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  537. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  538. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  539. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  540. append_move(desc, MOVE_WAITCOMP |
  541. MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
  542. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  543. (ivsize << MOVE_LEN_SHIFT));
  544. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  545. copy_iv:
  546. /* Copy IV to class 1 context */
  547. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
  548. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  549. (ivsize << MOVE_LEN_SHIFT));
  550. /* Return to encryption */
  551. append_operation(desc, ctx->class2_alg_type |
  552. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  553. /* ivsize + cryptlen = seqoutlen - authsize */
  554. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  555. /* Read and write assoclen bytes */
  556. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  557. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  558. /* Skip assoc data */
  559. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  560. /* read assoc before reading payload */
  561. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  562. KEY_VLF);
  563. /* Copy iv from outfifo to class 2 fifo */
  564. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  565. NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  566. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  567. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  568. append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
  569. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  570. /* Load Counter into CONTEXT1 reg */
  571. if (is_rfc3686)
  572. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  573. LDST_CLASS_1_CCB |
  574. LDST_SRCDST_BYTE_CONTEXT |
  575. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  576. LDST_OFFSET_SHIFT));
  577. /* Class 1 operation */
  578. append_operation(desc, ctx->class1_alg_type |
  579. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  580. /* Will write ivsize + cryptlen */
  581. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  582. /* Not need to reload iv */
  583. append_seq_fifo_load(desc, ivsize,
  584. FIFOLD_CLASS_SKIP);
  585. /* Will read cryptlen */
  586. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  587. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  588. /* Write ICV */
  589. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  590. LDST_SRCDST_BYTE_CONTEXT);
  591. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  592. desc_bytes(desc),
  593. DMA_TO_DEVICE);
  594. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  595. dev_err(jrdev, "unable to map shared descriptor\n");
  596. return -ENOMEM;
  597. }
  598. #ifdef DEBUG
  599. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  600. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  601. desc_bytes(desc), 1);
  602. #endif
  603. skip_givenc:
  604. return 0;
  605. }
  606. static int aead_setauthsize(struct crypto_aead *authenc,
  607. unsigned int authsize)
  608. {
  609. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  610. ctx->authsize = authsize;
  611. aead_set_sh_desc(authenc);
  612. return 0;
  613. }
  614. static int gcm_set_sh_desc(struct crypto_aead *aead)
  615. {
  616. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  617. struct device *jrdev = ctx->jrdev;
  618. bool keys_fit_inline = false;
  619. u32 *key_jump_cmd, *zero_payload_jump_cmd,
  620. *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
  621. u32 *desc;
  622. if (!ctx->enckeylen || !ctx->authsize)
  623. return 0;
  624. /*
  625. * AES GCM encrypt shared descriptor
  626. * Job Descriptor and Shared Descriptor
  627. * must fit into the 64-word Descriptor h/w Buffer
  628. */
  629. if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  630. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  631. keys_fit_inline = true;
  632. desc = ctx->sh_desc_enc;
  633. init_sh_desc(desc, HDR_SHARE_SERIAL);
  634. /* skip key loading if they are loaded due to sharing */
  635. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  636. JUMP_COND_SHRD | JUMP_COND_SELF);
  637. if (keys_fit_inline)
  638. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  639. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  640. else
  641. append_key(desc, ctx->key_dma, ctx->enckeylen,
  642. CLASS_1 | KEY_DEST_CLASS_REG);
  643. set_jump_tgt_here(desc, key_jump_cmd);
  644. /* class 1 operation */
  645. append_operation(desc, ctx->class1_alg_type |
  646. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  647. /* if assoclen + cryptlen is ZERO, skip to ICV write */
  648. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  649. zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
  650. JUMP_COND_MATH_Z);
  651. /* if assoclen is ZERO, skip reading the assoc data */
  652. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  653. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  654. JUMP_COND_MATH_Z);
  655. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  656. /* skip assoc data */
  657. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  658. /* cryptlen = seqinlen - assoclen */
  659. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  660. /* if cryptlen is ZERO jump to zero-payload commands */
  661. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  662. JUMP_COND_MATH_Z);
  663. /* read assoc data */
  664. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  665. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  666. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  667. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  668. /* write encrypted data */
  669. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  670. /* read payload data */
  671. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  672. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  673. /* jump the zero-payload commands */
  674. append_jump(desc, JUMP_TEST_ALL | 2);
  675. /* zero-payload commands */
  676. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  677. /* read assoc data */
  678. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  679. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
  680. /* There is no input data */
  681. set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
  682. /* write ICV */
  683. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  684. LDST_SRCDST_BYTE_CONTEXT);
  685. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  686. desc_bytes(desc),
  687. DMA_TO_DEVICE);
  688. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  689. dev_err(jrdev, "unable to map shared descriptor\n");
  690. return -ENOMEM;
  691. }
  692. #ifdef DEBUG
  693. print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
  694. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  695. desc_bytes(desc), 1);
  696. #endif
  697. /*
  698. * Job Descriptor and Shared Descriptors
  699. * must all fit into the 64-word Descriptor h/w Buffer
  700. */
  701. keys_fit_inline = false;
  702. if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  703. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  704. keys_fit_inline = true;
  705. desc = ctx->sh_desc_dec;
  706. init_sh_desc(desc, HDR_SHARE_SERIAL);
  707. /* skip key loading if they are loaded due to sharing */
  708. key_jump_cmd = append_jump(desc, JUMP_JSL |
  709. JUMP_TEST_ALL | JUMP_COND_SHRD |
  710. JUMP_COND_SELF);
  711. if (keys_fit_inline)
  712. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  713. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  714. else
  715. append_key(desc, ctx->key_dma, ctx->enckeylen,
  716. CLASS_1 | KEY_DEST_CLASS_REG);
  717. set_jump_tgt_here(desc, key_jump_cmd);
  718. /* class 1 operation */
  719. append_operation(desc, ctx->class1_alg_type |
  720. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  721. /* if assoclen is ZERO, skip reading the assoc data */
  722. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  723. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  724. JUMP_COND_MATH_Z);
  725. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  726. /* skip assoc data */
  727. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  728. /* read assoc data */
  729. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  730. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  731. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  732. /* cryptlen = seqoutlen - assoclen */
  733. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  734. /* jump to zero-payload command if cryptlen is zero */
  735. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  736. JUMP_COND_MATH_Z);
  737. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  738. /* store encrypted data */
  739. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  740. /* read payload data */
  741. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  742. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  743. /* zero-payload command */
  744. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  745. /* read ICV */
  746. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  747. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  748. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  749. desc_bytes(desc),
  750. DMA_TO_DEVICE);
  751. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  752. dev_err(jrdev, "unable to map shared descriptor\n");
  753. return -ENOMEM;
  754. }
  755. #ifdef DEBUG
  756. print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
  757. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  758. desc_bytes(desc), 1);
  759. #endif
  760. return 0;
  761. }
  762. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  763. {
  764. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  765. ctx->authsize = authsize;
  766. gcm_set_sh_desc(authenc);
  767. return 0;
  768. }
  769. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  770. {
  771. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  772. struct device *jrdev = ctx->jrdev;
  773. bool keys_fit_inline = false;
  774. u32 *key_jump_cmd;
  775. u32 *desc;
  776. if (!ctx->enckeylen || !ctx->authsize)
  777. return 0;
  778. /*
  779. * RFC4106 encrypt shared descriptor
  780. * Job Descriptor and Shared Descriptor
  781. * must fit into the 64-word Descriptor h/w Buffer
  782. */
  783. if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  784. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  785. keys_fit_inline = true;
  786. desc = ctx->sh_desc_enc;
  787. init_sh_desc(desc, HDR_SHARE_SERIAL);
  788. /* Skip key loading if it is loaded due to sharing */
  789. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  790. JUMP_COND_SHRD);
  791. if (keys_fit_inline)
  792. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  793. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  794. else
  795. append_key(desc, ctx->key_dma, ctx->enckeylen,
  796. CLASS_1 | KEY_DEST_CLASS_REG);
  797. set_jump_tgt_here(desc, key_jump_cmd);
  798. /* Class 1 operation */
  799. append_operation(desc, ctx->class1_alg_type |
  800. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  801. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  802. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  803. /* Read assoc data */
  804. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  805. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  806. /* Skip IV */
  807. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  808. /* Will read cryptlen bytes */
  809. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  810. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  811. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  812. /* Skip assoc data */
  813. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  814. /* cryptlen = seqoutlen - assoclen */
  815. append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
  816. /* Write encrypted data */
  817. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  818. /* Read payload data */
  819. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  820. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  821. /* Write ICV */
  822. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  823. LDST_SRCDST_BYTE_CONTEXT);
  824. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  825. desc_bytes(desc),
  826. DMA_TO_DEVICE);
  827. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  828. dev_err(jrdev, "unable to map shared descriptor\n");
  829. return -ENOMEM;
  830. }
  831. #ifdef DEBUG
  832. print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
  833. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  834. desc_bytes(desc), 1);
  835. #endif
  836. /*
  837. * Job Descriptor and Shared Descriptors
  838. * must all fit into the 64-word Descriptor h/w Buffer
  839. */
  840. keys_fit_inline = false;
  841. if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
  842. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  843. keys_fit_inline = true;
  844. desc = ctx->sh_desc_dec;
  845. init_sh_desc(desc, HDR_SHARE_SERIAL);
  846. /* Skip key loading if it is loaded due to sharing */
  847. key_jump_cmd = append_jump(desc, JUMP_JSL |
  848. JUMP_TEST_ALL | JUMP_COND_SHRD);
  849. if (keys_fit_inline)
  850. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  851. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  852. else
  853. append_key(desc, ctx->key_dma, ctx->enckeylen,
  854. CLASS_1 | KEY_DEST_CLASS_REG);
  855. set_jump_tgt_here(desc, key_jump_cmd);
  856. /* Class 1 operation */
  857. append_operation(desc, ctx->class1_alg_type |
  858. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  859. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  860. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  861. /* Read assoc data */
  862. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  863. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  864. /* Skip IV */
  865. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  866. /* Will read cryptlen bytes */
  867. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
  868. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  869. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  870. /* Skip assoc data */
  871. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  872. /* Will write cryptlen bytes */
  873. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  874. /* Store payload data */
  875. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  876. /* Read encrypted data */
  877. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  878. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  879. /* Read ICV */
  880. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  881. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  882. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  883. desc_bytes(desc),
  884. DMA_TO_DEVICE);
  885. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  886. dev_err(jrdev, "unable to map shared descriptor\n");
  887. return -ENOMEM;
  888. }
  889. #ifdef DEBUG
  890. print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
  891. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  892. desc_bytes(desc), 1);
  893. #endif
  894. return 0;
  895. }
  896. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  897. unsigned int authsize)
  898. {
  899. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  900. ctx->authsize = authsize;
  901. rfc4106_set_sh_desc(authenc);
  902. return 0;
  903. }
  904. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  905. {
  906. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  907. struct device *jrdev = ctx->jrdev;
  908. bool keys_fit_inline = false;
  909. u32 *key_jump_cmd;
  910. u32 *read_move_cmd, *write_move_cmd;
  911. u32 *desc;
  912. if (!ctx->enckeylen || !ctx->authsize)
  913. return 0;
  914. /*
  915. * RFC4543 encrypt shared descriptor
  916. * Job Descriptor and Shared Descriptor
  917. * must fit into the 64-word Descriptor h/w Buffer
  918. */
  919. if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  920. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  921. keys_fit_inline = true;
  922. desc = ctx->sh_desc_enc;
  923. init_sh_desc(desc, HDR_SHARE_SERIAL);
  924. /* Skip key loading if it is loaded due to sharing */
  925. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  926. JUMP_COND_SHRD);
  927. if (keys_fit_inline)
  928. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  929. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  930. else
  931. append_key(desc, ctx->key_dma, ctx->enckeylen,
  932. CLASS_1 | KEY_DEST_CLASS_REG);
  933. set_jump_tgt_here(desc, key_jump_cmd);
  934. /* Class 1 operation */
  935. append_operation(desc, ctx->class1_alg_type |
  936. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  937. /* assoclen + cryptlen = seqinlen */
  938. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  939. /*
  940. * MOVE_LEN opcode is not available in all SEC HW revisions,
  941. * thus need to do some magic, i.e. self-patch the descriptor
  942. * buffer.
  943. */
  944. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  945. (0x6 << MOVE_LEN_SHIFT));
  946. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  947. (0x8 << MOVE_LEN_SHIFT));
  948. /* Will read assoclen + cryptlen bytes */
  949. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  950. /* Will write assoclen + cryptlen bytes */
  951. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  952. /* Read and write assoclen + cryptlen bytes */
  953. aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
  954. set_move_tgt_here(desc, read_move_cmd);
  955. set_move_tgt_here(desc, write_move_cmd);
  956. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  957. /* Move payload data to OFIFO */
  958. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  959. /* Write ICV */
  960. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  961. LDST_SRCDST_BYTE_CONTEXT);
  962. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  963. desc_bytes(desc),
  964. DMA_TO_DEVICE);
  965. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  966. dev_err(jrdev, "unable to map shared descriptor\n");
  967. return -ENOMEM;
  968. }
  969. #ifdef DEBUG
  970. print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
  971. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  972. desc_bytes(desc), 1);
  973. #endif
  974. /*
  975. * Job Descriptor and Shared Descriptors
  976. * must all fit into the 64-word Descriptor h/w Buffer
  977. */
  978. keys_fit_inline = false;
  979. if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  980. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  981. keys_fit_inline = true;
  982. desc = ctx->sh_desc_dec;
  983. init_sh_desc(desc, HDR_SHARE_SERIAL);
  984. /* Skip key loading if it is loaded due to sharing */
  985. key_jump_cmd = append_jump(desc, JUMP_JSL |
  986. JUMP_TEST_ALL | JUMP_COND_SHRD);
  987. if (keys_fit_inline)
  988. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  989. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  990. else
  991. append_key(desc, ctx->key_dma, ctx->enckeylen,
  992. CLASS_1 | KEY_DEST_CLASS_REG);
  993. set_jump_tgt_here(desc, key_jump_cmd);
  994. /* Class 1 operation */
  995. append_operation(desc, ctx->class1_alg_type |
  996. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  997. /* assoclen + cryptlen = seqoutlen */
  998. append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  999. /*
  1000. * MOVE_LEN opcode is not available in all SEC HW revisions,
  1001. * thus need to do some magic, i.e. self-patch the descriptor
  1002. * buffer.
  1003. */
  1004. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  1005. (0x6 << MOVE_LEN_SHIFT));
  1006. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  1007. (0x8 << MOVE_LEN_SHIFT));
  1008. /* Will read assoclen + cryptlen bytes */
  1009. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1010. /* Will write assoclen + cryptlen bytes */
  1011. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1012. /* Store payload data */
  1013. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  1014. /* In-snoop assoclen + cryptlen data */
  1015. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
  1016. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
  1017. set_move_tgt_here(desc, read_move_cmd);
  1018. set_move_tgt_here(desc, write_move_cmd);
  1019. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1020. /* Move payload data to OFIFO */
  1021. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  1022. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1023. /* Read ICV */
  1024. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  1025. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  1026. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1027. desc_bytes(desc),
  1028. DMA_TO_DEVICE);
  1029. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1030. dev_err(jrdev, "unable to map shared descriptor\n");
  1031. return -ENOMEM;
  1032. }
  1033. #ifdef DEBUG
  1034. print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
  1035. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1036. desc_bytes(desc), 1);
  1037. #endif
  1038. return 0;
  1039. }
  1040. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  1041. unsigned int authsize)
  1042. {
  1043. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  1044. ctx->authsize = authsize;
  1045. rfc4543_set_sh_desc(authenc);
  1046. return 0;
  1047. }
  1048. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  1049. u32 authkeylen)
  1050. {
  1051. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  1052. ctx->split_key_pad_len, key_in, authkeylen,
  1053. ctx->alg_op);
  1054. }
  1055. static int aead_setkey(struct crypto_aead *aead,
  1056. const u8 *key, unsigned int keylen)
  1057. {
  1058. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  1059. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  1060. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1061. struct device *jrdev = ctx->jrdev;
  1062. struct crypto_authenc_keys keys;
  1063. int ret = 0;
  1064. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1065. goto badkey;
  1066. /* Pick class 2 key length from algorithm submask */
  1067. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  1068. OP_ALG_ALGSEL_SHIFT] * 2;
  1069. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  1070. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  1071. goto badkey;
  1072. #ifdef DEBUG
  1073. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  1074. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  1075. keys.authkeylen);
  1076. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  1077. ctx->split_key_len, ctx->split_key_pad_len);
  1078. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1079. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1080. #endif
  1081. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  1082. if (ret) {
  1083. goto badkey;
  1084. }
  1085. /* postpend encryption key to auth split key */
  1086. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  1087. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  1088. keys.enckeylen, DMA_TO_DEVICE);
  1089. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1090. dev_err(jrdev, "unable to map key i/o memory\n");
  1091. return -ENOMEM;
  1092. }
  1093. #ifdef DEBUG
  1094. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  1095. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  1096. ctx->split_key_pad_len + keys.enckeylen, 1);
  1097. #endif
  1098. ctx->enckeylen = keys.enckeylen;
  1099. ret = aead_set_sh_desc(aead);
  1100. if (ret) {
  1101. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  1102. keys.enckeylen, DMA_TO_DEVICE);
  1103. }
  1104. return ret;
  1105. badkey:
  1106. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1107. return -EINVAL;
  1108. }
  1109. static int gcm_setkey(struct crypto_aead *aead,
  1110. const u8 *key, unsigned int keylen)
  1111. {
  1112. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1113. struct device *jrdev = ctx->jrdev;
  1114. int ret = 0;
  1115. #ifdef DEBUG
  1116. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1117. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1118. #endif
  1119. memcpy(ctx->key, key, keylen);
  1120. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1121. DMA_TO_DEVICE);
  1122. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1123. dev_err(jrdev, "unable to map key i/o memory\n");
  1124. return -ENOMEM;
  1125. }
  1126. ctx->enckeylen = keylen;
  1127. ret = gcm_set_sh_desc(aead);
  1128. if (ret) {
  1129. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1130. DMA_TO_DEVICE);
  1131. }
  1132. return ret;
  1133. }
  1134. static int rfc4106_setkey(struct crypto_aead *aead,
  1135. const u8 *key, unsigned int keylen)
  1136. {
  1137. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1138. struct device *jrdev = ctx->jrdev;
  1139. int ret = 0;
  1140. if (keylen < 4)
  1141. return -EINVAL;
  1142. #ifdef DEBUG
  1143. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1144. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1145. #endif
  1146. memcpy(ctx->key, key, keylen);
  1147. /*
  1148. * The last four bytes of the key material are used as the salt value
  1149. * in the nonce. Update the AES key length.
  1150. */
  1151. ctx->enckeylen = keylen - 4;
  1152. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1153. DMA_TO_DEVICE);
  1154. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1155. dev_err(jrdev, "unable to map key i/o memory\n");
  1156. return -ENOMEM;
  1157. }
  1158. ret = rfc4106_set_sh_desc(aead);
  1159. if (ret) {
  1160. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1161. DMA_TO_DEVICE);
  1162. }
  1163. return ret;
  1164. }
  1165. static int rfc4543_setkey(struct crypto_aead *aead,
  1166. const u8 *key, unsigned int keylen)
  1167. {
  1168. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1169. struct device *jrdev = ctx->jrdev;
  1170. int ret = 0;
  1171. if (keylen < 4)
  1172. return -EINVAL;
  1173. #ifdef DEBUG
  1174. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1175. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1176. #endif
  1177. memcpy(ctx->key, key, keylen);
  1178. /*
  1179. * The last four bytes of the key material are used as the salt value
  1180. * in the nonce. Update the AES key length.
  1181. */
  1182. ctx->enckeylen = keylen - 4;
  1183. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1184. DMA_TO_DEVICE);
  1185. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1186. dev_err(jrdev, "unable to map key i/o memory\n");
  1187. return -ENOMEM;
  1188. }
  1189. ret = rfc4543_set_sh_desc(aead);
  1190. if (ret) {
  1191. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1192. DMA_TO_DEVICE);
  1193. }
  1194. return ret;
  1195. }
  1196. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1197. const u8 *key, unsigned int keylen)
  1198. {
  1199. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1200. struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
  1201. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  1202. const char *alg_name = crypto_tfm_alg_name(tfm);
  1203. struct device *jrdev = ctx->jrdev;
  1204. int ret = 0;
  1205. u32 *key_jump_cmd;
  1206. u32 *desc;
  1207. u32 *nonce;
  1208. u32 geniv;
  1209. u32 ctx1_iv_off = 0;
  1210. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1211. OP_ALG_AAI_CTR_MOD128);
  1212. const bool is_rfc3686 = (ctr_mode &&
  1213. (strstr(alg_name, "rfc3686") != NULL));
  1214. #ifdef DEBUG
  1215. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1216. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1217. #endif
  1218. /*
  1219. * AES-CTR needs to load IV in CONTEXT1 reg
  1220. * at an offset of 128bits (16bytes)
  1221. * CONTEXT1[255:128] = IV
  1222. */
  1223. if (ctr_mode)
  1224. ctx1_iv_off = 16;
  1225. /*
  1226. * RFC3686 specific:
  1227. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1228. * | *key = {KEY, NONCE}
  1229. */
  1230. if (is_rfc3686) {
  1231. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  1232. keylen -= CTR_RFC3686_NONCE_SIZE;
  1233. }
  1234. memcpy(ctx->key, key, keylen);
  1235. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1236. DMA_TO_DEVICE);
  1237. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1238. dev_err(jrdev, "unable to map key i/o memory\n");
  1239. return -ENOMEM;
  1240. }
  1241. ctx->enckeylen = keylen;
  1242. /* ablkcipher_encrypt shared descriptor */
  1243. desc = ctx->sh_desc_enc;
  1244. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1245. /* Skip if already shared */
  1246. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1247. JUMP_COND_SHRD);
  1248. /* Load class1 key only */
  1249. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1250. ctx->enckeylen, CLASS_1 |
  1251. KEY_DEST_CLASS_REG);
  1252. /* Load nonce into CONTEXT1 reg */
  1253. if (is_rfc3686) {
  1254. nonce = (u32 *)(key + keylen);
  1255. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1256. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1257. append_move(desc, MOVE_WAITCOMP |
  1258. MOVE_SRC_OUTFIFO |
  1259. MOVE_DEST_CLASS1CTX |
  1260. (16 << MOVE_OFFSET_SHIFT) |
  1261. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1262. }
  1263. set_jump_tgt_here(desc, key_jump_cmd);
  1264. /* Load iv */
  1265. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1266. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1267. /* Load counter into CONTEXT1 reg */
  1268. if (is_rfc3686)
  1269. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1270. LDST_CLASS_1_CCB |
  1271. LDST_SRCDST_BYTE_CONTEXT |
  1272. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1273. LDST_OFFSET_SHIFT));
  1274. /* Load operation */
  1275. append_operation(desc, ctx->class1_alg_type |
  1276. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1277. /* Perform operation */
  1278. ablkcipher_append_src_dst(desc);
  1279. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  1280. desc_bytes(desc),
  1281. DMA_TO_DEVICE);
  1282. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1283. dev_err(jrdev, "unable to map shared descriptor\n");
  1284. return -ENOMEM;
  1285. }
  1286. #ifdef DEBUG
  1287. print_hex_dump(KERN_ERR,
  1288. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  1289. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1290. desc_bytes(desc), 1);
  1291. #endif
  1292. /* ablkcipher_decrypt shared descriptor */
  1293. desc = ctx->sh_desc_dec;
  1294. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1295. /* Skip if already shared */
  1296. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1297. JUMP_COND_SHRD);
  1298. /* Load class1 key only */
  1299. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1300. ctx->enckeylen, CLASS_1 |
  1301. KEY_DEST_CLASS_REG);
  1302. /* Load nonce into CONTEXT1 reg */
  1303. if (is_rfc3686) {
  1304. nonce = (u32 *)(key + keylen);
  1305. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1306. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1307. append_move(desc, MOVE_WAITCOMP |
  1308. MOVE_SRC_OUTFIFO |
  1309. MOVE_DEST_CLASS1CTX |
  1310. (16 << MOVE_OFFSET_SHIFT) |
  1311. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1312. }
  1313. set_jump_tgt_here(desc, key_jump_cmd);
  1314. /* load IV */
  1315. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1316. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1317. /* Load counter into CONTEXT1 reg */
  1318. if (is_rfc3686)
  1319. append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
  1320. LDST_CLASS_1_CCB |
  1321. LDST_SRCDST_BYTE_CONTEXT |
  1322. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1323. LDST_OFFSET_SHIFT));
  1324. /* Choose operation */
  1325. if (ctr_mode)
  1326. append_operation(desc, ctx->class1_alg_type |
  1327. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  1328. else
  1329. append_dec_op1(desc, ctx->class1_alg_type);
  1330. /* Perform operation */
  1331. ablkcipher_append_src_dst(desc);
  1332. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1333. desc_bytes(desc),
  1334. DMA_TO_DEVICE);
  1335. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1336. dev_err(jrdev, "unable to map shared descriptor\n");
  1337. return -ENOMEM;
  1338. }
  1339. #ifdef DEBUG
  1340. print_hex_dump(KERN_ERR,
  1341. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  1342. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1343. desc_bytes(desc), 1);
  1344. #endif
  1345. /* ablkcipher_givencrypt shared descriptor */
  1346. desc = ctx->sh_desc_givenc;
  1347. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1348. /* Skip if already shared */
  1349. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1350. JUMP_COND_SHRD);
  1351. /* Load class1 key only */
  1352. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1353. ctx->enckeylen, CLASS_1 |
  1354. KEY_DEST_CLASS_REG);
  1355. /* Load Nonce into CONTEXT1 reg */
  1356. if (is_rfc3686) {
  1357. nonce = (u32 *)(key + keylen);
  1358. append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
  1359. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1360. append_move(desc, MOVE_WAITCOMP |
  1361. MOVE_SRC_OUTFIFO |
  1362. MOVE_DEST_CLASS1CTX |
  1363. (16 << MOVE_OFFSET_SHIFT) |
  1364. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1365. }
  1366. set_jump_tgt_here(desc, key_jump_cmd);
  1367. /* Generate IV */
  1368. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  1369. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  1370. NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
  1371. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  1372. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  1373. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1374. append_move(desc, MOVE_WAITCOMP |
  1375. MOVE_SRC_INFIFO |
  1376. MOVE_DEST_CLASS1CTX |
  1377. (crt->ivsize << MOVE_LEN_SHIFT) |
  1378. (ctx1_iv_off << MOVE_OFFSET_SHIFT));
  1379. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1380. /* Copy generated IV to memory */
  1381. append_seq_store(desc, crt->ivsize,
  1382. LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
  1383. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1384. /* Load Counter into CONTEXT1 reg */
  1385. if (is_rfc3686)
  1386. append_load_imm_u32(desc, (u32)1, LDST_IMM |
  1387. LDST_CLASS_1_CCB |
  1388. LDST_SRCDST_BYTE_CONTEXT |
  1389. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1390. LDST_OFFSET_SHIFT));
  1391. if (ctx1_iv_off)
  1392. append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
  1393. (1 << JUMP_OFFSET_SHIFT));
  1394. /* Load operation */
  1395. append_operation(desc, ctx->class1_alg_type |
  1396. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1397. /* Perform operation */
  1398. ablkcipher_append_src_dst(desc);
  1399. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  1400. desc_bytes(desc),
  1401. DMA_TO_DEVICE);
  1402. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  1403. dev_err(jrdev, "unable to map shared descriptor\n");
  1404. return -ENOMEM;
  1405. }
  1406. #ifdef DEBUG
  1407. print_hex_dump(KERN_ERR,
  1408. "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
  1409. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1410. desc_bytes(desc), 1);
  1411. #endif
  1412. return ret;
  1413. }
  1414. /*
  1415. * aead_edesc - s/w-extended aead descriptor
  1416. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  1417. * @assoc_chained: if source is chained
  1418. * @src_nents: number of segments in input scatterlist
  1419. * @src_chained: if source is chained
  1420. * @dst_nents: number of segments in output scatterlist
  1421. * @dst_chained: if destination is chained
  1422. * @iv_dma: dma address of iv for checking continuity and link table
  1423. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1424. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1425. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1426. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1427. */
  1428. struct aead_edesc {
  1429. int assoc_nents;
  1430. bool assoc_chained;
  1431. int src_nents;
  1432. bool src_chained;
  1433. int dst_nents;
  1434. bool dst_chained;
  1435. dma_addr_t iv_dma;
  1436. int sec4_sg_bytes;
  1437. dma_addr_t sec4_sg_dma;
  1438. struct sec4_sg_entry *sec4_sg;
  1439. u32 hw_desc[];
  1440. };
  1441. /*
  1442. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  1443. * @src_nents: number of segments in input scatterlist
  1444. * @src_chained: if source is chained
  1445. * @dst_nents: number of segments in output scatterlist
  1446. * @dst_chained: if destination is chained
  1447. * @iv_dma: dma address of iv for checking continuity and link table
  1448. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1449. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1450. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1451. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1452. */
  1453. struct ablkcipher_edesc {
  1454. int src_nents;
  1455. bool src_chained;
  1456. int dst_nents;
  1457. bool dst_chained;
  1458. dma_addr_t iv_dma;
  1459. int sec4_sg_bytes;
  1460. dma_addr_t sec4_sg_dma;
  1461. struct sec4_sg_entry *sec4_sg;
  1462. u32 hw_desc[0];
  1463. };
  1464. static void caam_unmap(struct device *dev, struct scatterlist *src,
  1465. struct scatterlist *dst, int src_nents,
  1466. bool src_chained, int dst_nents, bool dst_chained,
  1467. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  1468. int sec4_sg_bytes)
  1469. {
  1470. if (dst != src) {
  1471. dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
  1472. src_chained);
  1473. dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
  1474. dst_chained);
  1475. } else {
  1476. dma_unmap_sg_chained(dev, src, src_nents ? : 1,
  1477. DMA_BIDIRECTIONAL, src_chained);
  1478. }
  1479. if (iv_dma)
  1480. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  1481. if (sec4_sg_bytes)
  1482. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  1483. DMA_TO_DEVICE);
  1484. }
  1485. static void aead_unmap(struct device *dev,
  1486. struct aead_edesc *edesc,
  1487. struct aead_request *req)
  1488. {
  1489. caam_unmap(dev, req->src, req->dst,
  1490. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  1491. edesc->dst_chained, 0, 0,
  1492. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1493. }
  1494. static void ablkcipher_unmap(struct device *dev,
  1495. struct ablkcipher_edesc *edesc,
  1496. struct ablkcipher_request *req)
  1497. {
  1498. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1499. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1500. caam_unmap(dev, req->src, req->dst,
  1501. edesc->src_nents, edesc->src_chained, edesc->dst_nents,
  1502. edesc->dst_chained, edesc->iv_dma, ivsize,
  1503. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1504. }
  1505. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1506. void *context)
  1507. {
  1508. struct aead_request *req = context;
  1509. struct aead_edesc *edesc;
  1510. #ifdef DEBUG
  1511. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1512. #endif
  1513. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1514. if (err)
  1515. caam_jr_strstatus(jrdev, err);
  1516. aead_unmap(jrdev, edesc, req);
  1517. kfree(edesc);
  1518. aead_request_complete(req, err);
  1519. }
  1520. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1521. void *context)
  1522. {
  1523. struct aead_request *req = context;
  1524. struct aead_edesc *edesc;
  1525. #ifdef DEBUG
  1526. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1527. #endif
  1528. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1529. if (err)
  1530. caam_jr_strstatus(jrdev, err);
  1531. aead_unmap(jrdev, edesc, req);
  1532. /*
  1533. * verify hw auth check passed else return -EBADMSG
  1534. */
  1535. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  1536. err = -EBADMSG;
  1537. kfree(edesc);
  1538. aead_request_complete(req, err);
  1539. }
  1540. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1541. void *context)
  1542. {
  1543. struct ablkcipher_request *req = context;
  1544. struct ablkcipher_edesc *edesc;
  1545. #ifdef DEBUG
  1546. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1547. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1548. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1549. #endif
  1550. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1551. offsetof(struct ablkcipher_edesc, hw_desc));
  1552. if (err)
  1553. caam_jr_strstatus(jrdev, err);
  1554. #ifdef DEBUG
  1555. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1556. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1557. edesc->src_nents > 1 ? 100 : ivsize, 1);
  1558. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1559. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1560. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1561. #endif
  1562. ablkcipher_unmap(jrdev, edesc, req);
  1563. kfree(edesc);
  1564. ablkcipher_request_complete(req, err);
  1565. }
  1566. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1567. void *context)
  1568. {
  1569. struct ablkcipher_request *req = context;
  1570. struct ablkcipher_edesc *edesc;
  1571. #ifdef DEBUG
  1572. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1573. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1574. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1575. #endif
  1576. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1577. offsetof(struct ablkcipher_edesc, hw_desc));
  1578. if (err)
  1579. caam_jr_strstatus(jrdev, err);
  1580. #ifdef DEBUG
  1581. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1582. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1583. ivsize, 1);
  1584. print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1585. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1586. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  1587. #endif
  1588. ablkcipher_unmap(jrdev, edesc, req);
  1589. kfree(edesc);
  1590. ablkcipher_request_complete(req, err);
  1591. }
  1592. /*
  1593. * Fill in aead job descriptor
  1594. */
  1595. static void init_aead_job(struct aead_request *req,
  1596. struct aead_edesc *edesc,
  1597. bool all_contig, bool encrypt)
  1598. {
  1599. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1600. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1601. int authsize = ctx->authsize;
  1602. u32 *desc = edesc->hw_desc;
  1603. u32 out_options, in_options;
  1604. dma_addr_t dst_dma, src_dma;
  1605. int len, sec4_sg_index = 0;
  1606. dma_addr_t ptr;
  1607. u32 *sh_desc;
  1608. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  1609. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  1610. len = desc_len(sh_desc);
  1611. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1612. if (all_contig) {
  1613. src_dma = sg_dma_address(req->src);
  1614. in_options = 0;
  1615. } else {
  1616. src_dma = edesc->sec4_sg_dma;
  1617. sec4_sg_index += edesc->src_nents;
  1618. in_options = LDST_SGF;
  1619. }
  1620. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  1621. in_options);
  1622. dst_dma = src_dma;
  1623. out_options = in_options;
  1624. if (unlikely(req->src != req->dst)) {
  1625. if (!edesc->dst_nents) {
  1626. dst_dma = sg_dma_address(req->dst);
  1627. } else {
  1628. dst_dma = edesc->sec4_sg_dma +
  1629. sec4_sg_index *
  1630. sizeof(struct sec4_sg_entry);
  1631. out_options = LDST_SGF;
  1632. }
  1633. }
  1634. if (encrypt)
  1635. append_seq_out_ptr(desc, dst_dma,
  1636. req->assoclen + req->cryptlen + authsize,
  1637. out_options);
  1638. else
  1639. append_seq_out_ptr(desc, dst_dma,
  1640. req->assoclen + req->cryptlen - authsize,
  1641. out_options);
  1642. /* REG3 = assoclen */
  1643. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1644. }
  1645. static void init_gcm_job(struct aead_request *req,
  1646. struct aead_edesc *edesc,
  1647. bool all_contig, bool encrypt)
  1648. {
  1649. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1650. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1651. unsigned int ivsize = crypto_aead_ivsize(aead);
  1652. u32 *desc = edesc->hw_desc;
  1653. bool generic_gcm = (ivsize == 12);
  1654. unsigned int last;
  1655. init_aead_job(req, edesc, all_contig, encrypt);
  1656. /* BUG This should not be specific to generic GCM. */
  1657. last = 0;
  1658. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  1659. last = FIFOLD_TYPE_LAST1;
  1660. /* Read GCM IV */
  1661. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  1662. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
  1663. /* Append Salt */
  1664. if (!generic_gcm)
  1665. append_data(desc, ctx->key + ctx->enckeylen, 4);
  1666. /* Append IV */
  1667. append_data(desc, req->iv, ivsize);
  1668. /* End of blank commands */
  1669. }
  1670. static void init_authenc_job(struct aead_request *req,
  1671. struct aead_edesc *edesc,
  1672. bool all_contig, bool encrypt)
  1673. {
  1674. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1675. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  1676. struct caam_aead_alg, aead);
  1677. unsigned int ivsize = crypto_aead_ivsize(aead);
  1678. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1679. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1680. OP_ALG_AAI_CTR_MOD128);
  1681. const bool is_rfc3686 = alg->caam.rfc3686;
  1682. u32 *desc = edesc->hw_desc;
  1683. u32 ivoffset = 0;
  1684. /*
  1685. * AES-CTR needs to load IV in CONTEXT1 reg
  1686. * at an offset of 128bits (16bytes)
  1687. * CONTEXT1[255:128] = IV
  1688. */
  1689. if (ctr_mode)
  1690. ivoffset = 16;
  1691. /*
  1692. * RFC3686 specific:
  1693. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1694. */
  1695. if (is_rfc3686)
  1696. ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
  1697. init_aead_job(req, edesc, all_contig, encrypt);
  1698. if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
  1699. append_load_as_imm(desc, req->iv, ivsize,
  1700. LDST_CLASS_1_CCB |
  1701. LDST_SRCDST_BYTE_CONTEXT |
  1702. (ivoffset << LDST_OFFSET_SHIFT));
  1703. }
  1704. /*
  1705. * Fill in ablkcipher job descriptor
  1706. */
  1707. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  1708. struct ablkcipher_edesc *edesc,
  1709. struct ablkcipher_request *req,
  1710. bool iv_contig)
  1711. {
  1712. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1713. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1714. u32 *desc = edesc->hw_desc;
  1715. u32 out_options = 0, in_options;
  1716. dma_addr_t dst_dma, src_dma;
  1717. int len, sec4_sg_index = 0;
  1718. #ifdef DEBUG
  1719. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1720. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1721. ivsize, 1);
  1722. print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1723. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1724. edesc->src_nents ? 100 : req->nbytes, 1);
  1725. #endif
  1726. len = desc_len(sh_desc);
  1727. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1728. if (iv_contig) {
  1729. src_dma = edesc->iv_dma;
  1730. in_options = 0;
  1731. } else {
  1732. src_dma = edesc->sec4_sg_dma;
  1733. sec4_sg_index += edesc->src_nents + 1;
  1734. in_options = LDST_SGF;
  1735. }
  1736. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  1737. if (likely(req->src == req->dst)) {
  1738. if (!edesc->src_nents && iv_contig) {
  1739. dst_dma = sg_dma_address(req->src);
  1740. } else {
  1741. dst_dma = edesc->sec4_sg_dma +
  1742. sizeof(struct sec4_sg_entry);
  1743. out_options = LDST_SGF;
  1744. }
  1745. } else {
  1746. if (!edesc->dst_nents) {
  1747. dst_dma = sg_dma_address(req->dst);
  1748. } else {
  1749. dst_dma = edesc->sec4_sg_dma +
  1750. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1751. out_options = LDST_SGF;
  1752. }
  1753. }
  1754. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  1755. }
  1756. /*
  1757. * Fill in ablkcipher givencrypt job descriptor
  1758. */
  1759. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  1760. struct ablkcipher_edesc *edesc,
  1761. struct ablkcipher_request *req,
  1762. bool iv_contig)
  1763. {
  1764. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1765. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1766. u32 *desc = edesc->hw_desc;
  1767. u32 out_options, in_options;
  1768. dma_addr_t dst_dma, src_dma;
  1769. int len, sec4_sg_index = 0;
  1770. #ifdef DEBUG
  1771. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  1772. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1773. ivsize, 1);
  1774. print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1775. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  1776. edesc->src_nents ? 100 : req->nbytes, 1);
  1777. #endif
  1778. len = desc_len(sh_desc);
  1779. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1780. if (!edesc->src_nents) {
  1781. src_dma = sg_dma_address(req->src);
  1782. in_options = 0;
  1783. } else {
  1784. src_dma = edesc->sec4_sg_dma;
  1785. sec4_sg_index += edesc->src_nents;
  1786. in_options = LDST_SGF;
  1787. }
  1788. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1789. if (iv_contig) {
  1790. dst_dma = edesc->iv_dma;
  1791. out_options = 0;
  1792. } else {
  1793. dst_dma = edesc->sec4_sg_dma +
  1794. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1795. out_options = LDST_SGF;
  1796. }
  1797. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
  1798. }
  1799. /*
  1800. * allocate and map the aead extended descriptor
  1801. */
  1802. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1803. int desc_bytes, bool *all_contig_ptr,
  1804. bool encrypt)
  1805. {
  1806. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1807. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1808. struct device *jrdev = ctx->jrdev;
  1809. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1810. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1811. int src_nents, dst_nents = 0;
  1812. struct aead_edesc *edesc;
  1813. int sgc;
  1814. bool all_contig = true;
  1815. bool src_chained = false, dst_chained = false;
  1816. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1817. unsigned int authsize = ctx->authsize;
  1818. if (unlikely(req->dst != req->src)) {
  1819. src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
  1820. &src_chained);
  1821. dst_nents = sg_count(req->dst,
  1822. req->assoclen + req->cryptlen +
  1823. (encrypt ? authsize : (-authsize)),
  1824. &dst_chained);
  1825. } else {
  1826. src_nents = sg_count(req->src,
  1827. req->assoclen + req->cryptlen +
  1828. (encrypt ? authsize : 0),
  1829. &src_chained);
  1830. }
  1831. /* Check if data are contiguous. */
  1832. all_contig = !src_nents;
  1833. if (!all_contig) {
  1834. src_nents = src_nents ? : 1;
  1835. sec4_sg_len = src_nents;
  1836. }
  1837. sec4_sg_len += dst_nents;
  1838. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1839. /* allocate space for base edesc and hw desc commands, link tables */
  1840. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1841. GFP_DMA | flags);
  1842. if (!edesc) {
  1843. dev_err(jrdev, "could not allocate extended descriptor\n");
  1844. return ERR_PTR(-ENOMEM);
  1845. }
  1846. if (likely(req->src == req->dst)) {
  1847. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1848. DMA_BIDIRECTIONAL, src_chained);
  1849. if (unlikely(!sgc)) {
  1850. dev_err(jrdev, "unable to map source\n");
  1851. kfree(edesc);
  1852. return ERR_PTR(-ENOMEM);
  1853. }
  1854. } else {
  1855. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  1856. DMA_TO_DEVICE, src_chained);
  1857. if (unlikely(!sgc)) {
  1858. dev_err(jrdev, "unable to map source\n");
  1859. kfree(edesc);
  1860. return ERR_PTR(-ENOMEM);
  1861. }
  1862. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  1863. DMA_FROM_DEVICE, dst_chained);
  1864. if (unlikely(!sgc)) {
  1865. dev_err(jrdev, "unable to map destination\n");
  1866. dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
  1867. DMA_TO_DEVICE, src_chained);
  1868. kfree(edesc);
  1869. return ERR_PTR(-ENOMEM);
  1870. }
  1871. }
  1872. edesc->src_nents = src_nents;
  1873. edesc->src_chained = src_chained;
  1874. edesc->dst_nents = dst_nents;
  1875. edesc->dst_chained = dst_chained;
  1876. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  1877. desc_bytes;
  1878. *all_contig_ptr = all_contig;
  1879. sec4_sg_index = 0;
  1880. if (!all_contig) {
  1881. sg_to_sec4_sg_last(req->src, src_nents,
  1882. edesc->sec4_sg + sec4_sg_index, 0);
  1883. sec4_sg_index += src_nents;
  1884. }
  1885. if (dst_nents) {
  1886. sg_to_sec4_sg_last(req->dst, dst_nents,
  1887. edesc->sec4_sg + sec4_sg_index, 0);
  1888. }
  1889. if (!sec4_sg_bytes)
  1890. return edesc;
  1891. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  1892. sec4_sg_bytes, DMA_TO_DEVICE);
  1893. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  1894. dev_err(jrdev, "unable to map S/G table\n");
  1895. aead_unmap(jrdev, edesc, req);
  1896. kfree(edesc);
  1897. return ERR_PTR(-ENOMEM);
  1898. }
  1899. edesc->sec4_sg_bytes = sec4_sg_bytes;
  1900. return edesc;
  1901. }
  1902. static int gcm_encrypt(struct aead_request *req)
  1903. {
  1904. struct aead_edesc *edesc;
  1905. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1906. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1907. struct device *jrdev = ctx->jrdev;
  1908. bool all_contig;
  1909. u32 *desc;
  1910. int ret = 0;
  1911. /* allocate extended descriptor */
  1912. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  1913. if (IS_ERR(edesc))
  1914. return PTR_ERR(edesc);
  1915. /* Create and submit job descriptor */
  1916. init_gcm_job(req, edesc, all_contig, true);
  1917. #ifdef DEBUG
  1918. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1919. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1920. desc_bytes(edesc->hw_desc), 1);
  1921. #endif
  1922. desc = edesc->hw_desc;
  1923. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1924. if (!ret) {
  1925. ret = -EINPROGRESS;
  1926. } else {
  1927. aead_unmap(jrdev, edesc, req);
  1928. kfree(edesc);
  1929. }
  1930. return ret;
  1931. }
  1932. static int ipsec_gcm_encrypt(struct aead_request *req)
  1933. {
  1934. if (req->assoclen < 8)
  1935. return -EINVAL;
  1936. return gcm_encrypt(req);
  1937. }
  1938. static int aead_encrypt(struct aead_request *req)
  1939. {
  1940. struct aead_edesc *edesc;
  1941. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1942. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1943. struct device *jrdev = ctx->jrdev;
  1944. bool all_contig;
  1945. u32 *desc;
  1946. int ret = 0;
  1947. /* allocate extended descriptor */
  1948. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  1949. &all_contig, true);
  1950. if (IS_ERR(edesc))
  1951. return PTR_ERR(edesc);
  1952. /* Create and submit job descriptor */
  1953. init_authenc_job(req, edesc, all_contig, true);
  1954. #ifdef DEBUG
  1955. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1956. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1957. desc_bytes(edesc->hw_desc), 1);
  1958. #endif
  1959. desc = edesc->hw_desc;
  1960. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  1961. if (!ret) {
  1962. ret = -EINPROGRESS;
  1963. } else {
  1964. aead_unmap(jrdev, edesc, req);
  1965. kfree(edesc);
  1966. }
  1967. return ret;
  1968. }
  1969. static int gcm_decrypt(struct aead_request *req)
  1970. {
  1971. struct aead_edesc *edesc;
  1972. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1973. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1974. struct device *jrdev = ctx->jrdev;
  1975. bool all_contig;
  1976. u32 *desc;
  1977. int ret = 0;
  1978. /* allocate extended descriptor */
  1979. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  1980. if (IS_ERR(edesc))
  1981. return PTR_ERR(edesc);
  1982. /* Create and submit job descriptor*/
  1983. init_gcm_job(req, edesc, all_contig, false);
  1984. #ifdef DEBUG
  1985. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  1986. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  1987. desc_bytes(edesc->hw_desc), 1);
  1988. #endif
  1989. desc = edesc->hw_desc;
  1990. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  1991. if (!ret) {
  1992. ret = -EINPROGRESS;
  1993. } else {
  1994. aead_unmap(jrdev, edesc, req);
  1995. kfree(edesc);
  1996. }
  1997. return ret;
  1998. }
  1999. static int ipsec_gcm_decrypt(struct aead_request *req)
  2000. {
  2001. if (req->assoclen < 8)
  2002. return -EINVAL;
  2003. return gcm_decrypt(req);
  2004. }
  2005. static int aead_decrypt(struct aead_request *req)
  2006. {
  2007. struct aead_edesc *edesc;
  2008. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2009. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2010. struct device *jrdev = ctx->jrdev;
  2011. bool all_contig;
  2012. u32 *desc;
  2013. int ret = 0;
  2014. /* allocate extended descriptor */
  2015. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2016. &all_contig, false);
  2017. if (IS_ERR(edesc))
  2018. return PTR_ERR(edesc);
  2019. #ifdef DEBUG
  2020. print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  2021. DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
  2022. req->assoclen + req->cryptlen, 1);
  2023. #endif
  2024. /* Create and submit job descriptor*/
  2025. init_authenc_job(req, edesc, all_contig, false);
  2026. #ifdef DEBUG
  2027. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2028. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2029. desc_bytes(edesc->hw_desc), 1);
  2030. #endif
  2031. desc = edesc->hw_desc;
  2032. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2033. if (!ret) {
  2034. ret = -EINPROGRESS;
  2035. } else {
  2036. aead_unmap(jrdev, edesc, req);
  2037. kfree(edesc);
  2038. }
  2039. return ret;
  2040. }
  2041. static int aead_givdecrypt(struct aead_request *req)
  2042. {
  2043. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2044. unsigned int ivsize = crypto_aead_ivsize(aead);
  2045. if (req->cryptlen < ivsize)
  2046. return -EINVAL;
  2047. req->cryptlen -= ivsize;
  2048. req->assoclen += ivsize;
  2049. return aead_decrypt(req);
  2050. }
  2051. /*
  2052. * allocate and map the ablkcipher extended descriptor for ablkcipher
  2053. */
  2054. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  2055. *req, int desc_bytes,
  2056. bool *iv_contig_out)
  2057. {
  2058. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2059. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2060. struct device *jrdev = ctx->jrdev;
  2061. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2062. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2063. GFP_KERNEL : GFP_ATOMIC;
  2064. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2065. struct ablkcipher_edesc *edesc;
  2066. dma_addr_t iv_dma = 0;
  2067. bool iv_contig = false;
  2068. int sgc;
  2069. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2070. bool src_chained = false, dst_chained = false;
  2071. int sec4_sg_index;
  2072. src_nents = sg_count(req->src, req->nbytes, &src_chained);
  2073. if (req->dst != req->src)
  2074. dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
  2075. if (likely(req->src == req->dst)) {
  2076. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2077. DMA_BIDIRECTIONAL, src_chained);
  2078. } else {
  2079. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2080. DMA_TO_DEVICE, src_chained);
  2081. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2082. DMA_FROM_DEVICE, dst_chained);
  2083. }
  2084. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  2085. if (dma_mapping_error(jrdev, iv_dma)) {
  2086. dev_err(jrdev, "unable to map IV\n");
  2087. return ERR_PTR(-ENOMEM);
  2088. }
  2089. /*
  2090. * Check if iv can be contiguous with source and destination.
  2091. * If so, include it. If not, create scatterlist.
  2092. */
  2093. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  2094. iv_contig = true;
  2095. else
  2096. src_nents = src_nents ? : 1;
  2097. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2098. sizeof(struct sec4_sg_entry);
  2099. /* allocate space for base edesc and hw desc commands, link tables */
  2100. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2101. GFP_DMA | flags);
  2102. if (!edesc) {
  2103. dev_err(jrdev, "could not allocate extended descriptor\n");
  2104. return ERR_PTR(-ENOMEM);
  2105. }
  2106. edesc->src_nents = src_nents;
  2107. edesc->src_chained = src_chained;
  2108. edesc->dst_nents = dst_nents;
  2109. edesc->dst_chained = dst_chained;
  2110. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2111. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2112. desc_bytes;
  2113. sec4_sg_index = 0;
  2114. if (!iv_contig) {
  2115. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  2116. sg_to_sec4_sg_last(req->src, src_nents,
  2117. edesc->sec4_sg + 1, 0);
  2118. sec4_sg_index += 1 + src_nents;
  2119. }
  2120. if (dst_nents) {
  2121. sg_to_sec4_sg_last(req->dst, dst_nents,
  2122. edesc->sec4_sg + sec4_sg_index, 0);
  2123. }
  2124. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2125. sec4_sg_bytes, DMA_TO_DEVICE);
  2126. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2127. dev_err(jrdev, "unable to map S/G table\n");
  2128. return ERR_PTR(-ENOMEM);
  2129. }
  2130. edesc->iv_dma = iv_dma;
  2131. #ifdef DEBUG
  2132. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  2133. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2134. sec4_sg_bytes, 1);
  2135. #endif
  2136. *iv_contig_out = iv_contig;
  2137. return edesc;
  2138. }
  2139. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  2140. {
  2141. struct ablkcipher_edesc *edesc;
  2142. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2143. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2144. struct device *jrdev = ctx->jrdev;
  2145. bool iv_contig;
  2146. u32 *desc;
  2147. int ret = 0;
  2148. /* allocate extended descriptor */
  2149. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2150. CAAM_CMD_SZ, &iv_contig);
  2151. if (IS_ERR(edesc))
  2152. return PTR_ERR(edesc);
  2153. /* Create and submit job descriptor*/
  2154. init_ablkcipher_job(ctx->sh_desc_enc,
  2155. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  2156. #ifdef DEBUG
  2157. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2158. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2159. desc_bytes(edesc->hw_desc), 1);
  2160. #endif
  2161. desc = edesc->hw_desc;
  2162. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2163. if (!ret) {
  2164. ret = -EINPROGRESS;
  2165. } else {
  2166. ablkcipher_unmap(jrdev, edesc, req);
  2167. kfree(edesc);
  2168. }
  2169. return ret;
  2170. }
  2171. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  2172. {
  2173. struct ablkcipher_edesc *edesc;
  2174. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2175. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2176. struct device *jrdev = ctx->jrdev;
  2177. bool iv_contig;
  2178. u32 *desc;
  2179. int ret = 0;
  2180. /* allocate extended descriptor */
  2181. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2182. CAAM_CMD_SZ, &iv_contig);
  2183. if (IS_ERR(edesc))
  2184. return PTR_ERR(edesc);
  2185. /* Create and submit job descriptor*/
  2186. init_ablkcipher_job(ctx->sh_desc_dec,
  2187. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  2188. desc = edesc->hw_desc;
  2189. #ifdef DEBUG
  2190. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2191. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2192. desc_bytes(edesc->hw_desc), 1);
  2193. #endif
  2194. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  2195. if (!ret) {
  2196. ret = -EINPROGRESS;
  2197. } else {
  2198. ablkcipher_unmap(jrdev, edesc, req);
  2199. kfree(edesc);
  2200. }
  2201. return ret;
  2202. }
  2203. /*
  2204. * allocate and map the ablkcipher extended descriptor
  2205. * for ablkcipher givencrypt
  2206. */
  2207. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  2208. struct skcipher_givcrypt_request *greq,
  2209. int desc_bytes,
  2210. bool *iv_contig_out)
  2211. {
  2212. struct ablkcipher_request *req = &greq->creq;
  2213. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2214. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2215. struct device *jrdev = ctx->jrdev;
  2216. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2217. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2218. GFP_KERNEL : GFP_ATOMIC;
  2219. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2220. struct ablkcipher_edesc *edesc;
  2221. dma_addr_t iv_dma = 0;
  2222. bool iv_contig = false;
  2223. int sgc;
  2224. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2225. bool src_chained = false, dst_chained = false;
  2226. int sec4_sg_index;
  2227. src_nents = sg_count(req->src, req->nbytes, &src_chained);
  2228. if (unlikely(req->dst != req->src))
  2229. dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
  2230. if (likely(req->src == req->dst)) {
  2231. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2232. DMA_BIDIRECTIONAL, src_chained);
  2233. } else {
  2234. sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
  2235. DMA_TO_DEVICE, src_chained);
  2236. sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
  2237. DMA_FROM_DEVICE, dst_chained);
  2238. }
  2239. /*
  2240. * Check if iv can be contiguous with source and destination.
  2241. * If so, include it. If not, create scatterlist.
  2242. */
  2243. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  2244. if (dma_mapping_error(jrdev, iv_dma)) {
  2245. dev_err(jrdev, "unable to map IV\n");
  2246. return ERR_PTR(-ENOMEM);
  2247. }
  2248. if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
  2249. iv_contig = true;
  2250. else
  2251. dst_nents = dst_nents ? : 1;
  2252. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2253. sizeof(struct sec4_sg_entry);
  2254. /* allocate space for base edesc and hw desc commands, link tables */
  2255. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2256. GFP_DMA | flags);
  2257. if (!edesc) {
  2258. dev_err(jrdev, "could not allocate extended descriptor\n");
  2259. return ERR_PTR(-ENOMEM);
  2260. }
  2261. edesc->src_nents = src_nents;
  2262. edesc->src_chained = src_chained;
  2263. edesc->dst_nents = dst_nents;
  2264. edesc->dst_chained = dst_chained;
  2265. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2266. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2267. desc_bytes;
  2268. sec4_sg_index = 0;
  2269. if (src_nents) {
  2270. sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
  2271. sec4_sg_index += src_nents;
  2272. }
  2273. if (!iv_contig) {
  2274. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2275. iv_dma, ivsize, 0);
  2276. sec4_sg_index += 1;
  2277. sg_to_sec4_sg_last(req->dst, dst_nents,
  2278. edesc->sec4_sg + sec4_sg_index, 0);
  2279. }
  2280. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2281. sec4_sg_bytes, DMA_TO_DEVICE);
  2282. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2283. dev_err(jrdev, "unable to map S/G table\n");
  2284. return ERR_PTR(-ENOMEM);
  2285. }
  2286. edesc->iv_dma = iv_dma;
  2287. #ifdef DEBUG
  2288. print_hex_dump(KERN_ERR,
  2289. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  2290. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2291. sec4_sg_bytes, 1);
  2292. #endif
  2293. *iv_contig_out = iv_contig;
  2294. return edesc;
  2295. }
  2296. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  2297. {
  2298. struct ablkcipher_request *req = &creq->creq;
  2299. struct ablkcipher_edesc *edesc;
  2300. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2301. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2302. struct device *jrdev = ctx->jrdev;
  2303. bool iv_contig;
  2304. u32 *desc;
  2305. int ret = 0;
  2306. /* allocate extended descriptor */
  2307. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
  2308. CAAM_CMD_SZ, &iv_contig);
  2309. if (IS_ERR(edesc))
  2310. return PTR_ERR(edesc);
  2311. /* Create and submit job descriptor*/
  2312. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  2313. edesc, req, iv_contig);
  2314. #ifdef DEBUG
  2315. print_hex_dump(KERN_ERR,
  2316. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  2317. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2318. desc_bytes(edesc->hw_desc), 1);
  2319. #endif
  2320. desc = edesc->hw_desc;
  2321. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2322. if (!ret) {
  2323. ret = -EINPROGRESS;
  2324. } else {
  2325. ablkcipher_unmap(jrdev, edesc, req);
  2326. kfree(edesc);
  2327. }
  2328. return ret;
  2329. }
  2330. #define template_aead template_u.aead
  2331. #define template_ablkcipher template_u.ablkcipher
  2332. struct caam_alg_template {
  2333. char name[CRYPTO_MAX_ALG_NAME];
  2334. char driver_name[CRYPTO_MAX_ALG_NAME];
  2335. unsigned int blocksize;
  2336. u32 type;
  2337. union {
  2338. struct ablkcipher_alg ablkcipher;
  2339. } template_u;
  2340. u32 class1_alg_type;
  2341. u32 class2_alg_type;
  2342. u32 alg_op;
  2343. };
  2344. static struct caam_alg_template driver_algs[] = {
  2345. /* ablkcipher descriptor */
  2346. {
  2347. .name = "cbc(aes)",
  2348. .driver_name = "cbc-aes-caam",
  2349. .blocksize = AES_BLOCK_SIZE,
  2350. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2351. .template_ablkcipher = {
  2352. .setkey = ablkcipher_setkey,
  2353. .encrypt = ablkcipher_encrypt,
  2354. .decrypt = ablkcipher_decrypt,
  2355. .givencrypt = ablkcipher_givencrypt,
  2356. .geniv = "<built-in>",
  2357. .min_keysize = AES_MIN_KEY_SIZE,
  2358. .max_keysize = AES_MAX_KEY_SIZE,
  2359. .ivsize = AES_BLOCK_SIZE,
  2360. },
  2361. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2362. },
  2363. {
  2364. .name = "cbc(des3_ede)",
  2365. .driver_name = "cbc-3des-caam",
  2366. .blocksize = DES3_EDE_BLOCK_SIZE,
  2367. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2368. .template_ablkcipher = {
  2369. .setkey = ablkcipher_setkey,
  2370. .encrypt = ablkcipher_encrypt,
  2371. .decrypt = ablkcipher_decrypt,
  2372. .givencrypt = ablkcipher_givencrypt,
  2373. .geniv = "<built-in>",
  2374. .min_keysize = DES3_EDE_KEY_SIZE,
  2375. .max_keysize = DES3_EDE_KEY_SIZE,
  2376. .ivsize = DES3_EDE_BLOCK_SIZE,
  2377. },
  2378. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2379. },
  2380. {
  2381. .name = "cbc(des)",
  2382. .driver_name = "cbc-des-caam",
  2383. .blocksize = DES_BLOCK_SIZE,
  2384. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2385. .template_ablkcipher = {
  2386. .setkey = ablkcipher_setkey,
  2387. .encrypt = ablkcipher_encrypt,
  2388. .decrypt = ablkcipher_decrypt,
  2389. .givencrypt = ablkcipher_givencrypt,
  2390. .geniv = "<built-in>",
  2391. .min_keysize = DES_KEY_SIZE,
  2392. .max_keysize = DES_KEY_SIZE,
  2393. .ivsize = DES_BLOCK_SIZE,
  2394. },
  2395. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2396. },
  2397. {
  2398. .name = "ctr(aes)",
  2399. .driver_name = "ctr-aes-caam",
  2400. .blocksize = 1,
  2401. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2402. .template_ablkcipher = {
  2403. .setkey = ablkcipher_setkey,
  2404. .encrypt = ablkcipher_encrypt,
  2405. .decrypt = ablkcipher_decrypt,
  2406. .geniv = "chainiv",
  2407. .min_keysize = AES_MIN_KEY_SIZE,
  2408. .max_keysize = AES_MAX_KEY_SIZE,
  2409. .ivsize = AES_BLOCK_SIZE,
  2410. },
  2411. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2412. },
  2413. {
  2414. .name = "rfc3686(ctr(aes))",
  2415. .driver_name = "rfc3686-ctr-aes-caam",
  2416. .blocksize = 1,
  2417. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2418. .template_ablkcipher = {
  2419. .setkey = ablkcipher_setkey,
  2420. .encrypt = ablkcipher_encrypt,
  2421. .decrypt = ablkcipher_decrypt,
  2422. .givencrypt = ablkcipher_givencrypt,
  2423. .geniv = "<built-in>",
  2424. .min_keysize = AES_MIN_KEY_SIZE +
  2425. CTR_RFC3686_NONCE_SIZE,
  2426. .max_keysize = AES_MAX_KEY_SIZE +
  2427. CTR_RFC3686_NONCE_SIZE,
  2428. .ivsize = CTR_RFC3686_IV_SIZE,
  2429. },
  2430. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2431. }
  2432. };
  2433. static struct caam_aead_alg driver_aeads[] = {
  2434. {
  2435. .aead = {
  2436. .base = {
  2437. .cra_name = "rfc4106(gcm(aes))",
  2438. .cra_driver_name = "rfc4106-gcm-aes-caam",
  2439. .cra_blocksize = 1,
  2440. },
  2441. .setkey = rfc4106_setkey,
  2442. .setauthsize = rfc4106_setauthsize,
  2443. .encrypt = ipsec_gcm_encrypt,
  2444. .decrypt = ipsec_gcm_decrypt,
  2445. .ivsize = 8,
  2446. .maxauthsize = AES_BLOCK_SIZE,
  2447. },
  2448. .caam = {
  2449. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2450. },
  2451. },
  2452. {
  2453. .aead = {
  2454. .base = {
  2455. .cra_name = "rfc4543(gcm(aes))",
  2456. .cra_driver_name = "rfc4543-gcm-aes-caam",
  2457. .cra_blocksize = 1,
  2458. },
  2459. .setkey = rfc4543_setkey,
  2460. .setauthsize = rfc4543_setauthsize,
  2461. .encrypt = ipsec_gcm_encrypt,
  2462. .decrypt = ipsec_gcm_decrypt,
  2463. .ivsize = 8,
  2464. .maxauthsize = AES_BLOCK_SIZE,
  2465. },
  2466. .caam = {
  2467. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2468. },
  2469. },
  2470. /* Galois Counter Mode */
  2471. {
  2472. .aead = {
  2473. .base = {
  2474. .cra_name = "gcm(aes)",
  2475. .cra_driver_name = "gcm-aes-caam",
  2476. .cra_blocksize = 1,
  2477. },
  2478. .setkey = gcm_setkey,
  2479. .setauthsize = gcm_setauthsize,
  2480. .encrypt = gcm_encrypt,
  2481. .decrypt = gcm_decrypt,
  2482. .ivsize = 12,
  2483. .maxauthsize = AES_BLOCK_SIZE,
  2484. },
  2485. .caam = {
  2486. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2487. },
  2488. },
  2489. /* single-pass ipsec_esp descriptor */
  2490. {
  2491. .aead = {
  2492. .base = {
  2493. .cra_name = "authenc(hmac(md5),"
  2494. "ecb(cipher_null))",
  2495. .cra_driver_name = "authenc-hmac-md5-"
  2496. "ecb-cipher_null-caam",
  2497. .cra_blocksize = NULL_BLOCK_SIZE,
  2498. },
  2499. .setkey = aead_setkey,
  2500. .setauthsize = aead_setauthsize,
  2501. .encrypt = aead_encrypt,
  2502. .decrypt = aead_decrypt,
  2503. .ivsize = NULL_IV_SIZE,
  2504. .maxauthsize = MD5_DIGEST_SIZE,
  2505. },
  2506. .caam = {
  2507. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2508. OP_ALG_AAI_HMAC_PRECOMP,
  2509. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2510. },
  2511. },
  2512. {
  2513. .aead = {
  2514. .base = {
  2515. .cra_name = "authenc(hmac(sha1),"
  2516. "ecb(cipher_null))",
  2517. .cra_driver_name = "authenc-hmac-sha1-"
  2518. "ecb-cipher_null-caam",
  2519. .cra_blocksize = NULL_BLOCK_SIZE,
  2520. },
  2521. .setkey = aead_setkey,
  2522. .setauthsize = aead_setauthsize,
  2523. .encrypt = aead_encrypt,
  2524. .decrypt = aead_decrypt,
  2525. .ivsize = NULL_IV_SIZE,
  2526. .maxauthsize = SHA1_DIGEST_SIZE,
  2527. },
  2528. .caam = {
  2529. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2530. OP_ALG_AAI_HMAC_PRECOMP,
  2531. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2532. },
  2533. },
  2534. {
  2535. .aead = {
  2536. .base = {
  2537. .cra_name = "authenc(hmac(sha224),"
  2538. "ecb(cipher_null))",
  2539. .cra_driver_name = "authenc-hmac-sha224-"
  2540. "ecb-cipher_null-caam",
  2541. .cra_blocksize = NULL_BLOCK_SIZE,
  2542. },
  2543. .setkey = aead_setkey,
  2544. .setauthsize = aead_setauthsize,
  2545. .encrypt = aead_encrypt,
  2546. .decrypt = aead_decrypt,
  2547. .ivsize = NULL_IV_SIZE,
  2548. .maxauthsize = SHA224_DIGEST_SIZE,
  2549. },
  2550. .caam = {
  2551. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2552. OP_ALG_AAI_HMAC_PRECOMP,
  2553. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2554. },
  2555. },
  2556. {
  2557. .aead = {
  2558. .base = {
  2559. .cra_name = "authenc(hmac(sha256),"
  2560. "ecb(cipher_null))",
  2561. .cra_driver_name = "authenc-hmac-sha256-"
  2562. "ecb-cipher_null-caam",
  2563. .cra_blocksize = NULL_BLOCK_SIZE,
  2564. },
  2565. .setkey = aead_setkey,
  2566. .setauthsize = aead_setauthsize,
  2567. .encrypt = aead_encrypt,
  2568. .decrypt = aead_decrypt,
  2569. .ivsize = NULL_IV_SIZE,
  2570. .maxauthsize = SHA256_DIGEST_SIZE,
  2571. },
  2572. .caam = {
  2573. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2574. OP_ALG_AAI_HMAC_PRECOMP,
  2575. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2576. },
  2577. },
  2578. {
  2579. .aead = {
  2580. .base = {
  2581. .cra_name = "authenc(hmac(sha384),"
  2582. "ecb(cipher_null))",
  2583. .cra_driver_name = "authenc-hmac-sha384-"
  2584. "ecb-cipher_null-caam",
  2585. .cra_blocksize = NULL_BLOCK_SIZE,
  2586. },
  2587. .setkey = aead_setkey,
  2588. .setauthsize = aead_setauthsize,
  2589. .encrypt = aead_encrypt,
  2590. .decrypt = aead_decrypt,
  2591. .ivsize = NULL_IV_SIZE,
  2592. .maxauthsize = SHA384_DIGEST_SIZE,
  2593. },
  2594. .caam = {
  2595. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2596. OP_ALG_AAI_HMAC_PRECOMP,
  2597. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2598. },
  2599. },
  2600. {
  2601. .aead = {
  2602. .base = {
  2603. .cra_name = "authenc(hmac(sha512),"
  2604. "ecb(cipher_null))",
  2605. .cra_driver_name = "authenc-hmac-sha512-"
  2606. "ecb-cipher_null-caam",
  2607. .cra_blocksize = NULL_BLOCK_SIZE,
  2608. },
  2609. .setkey = aead_setkey,
  2610. .setauthsize = aead_setauthsize,
  2611. .encrypt = aead_encrypt,
  2612. .decrypt = aead_decrypt,
  2613. .ivsize = NULL_IV_SIZE,
  2614. .maxauthsize = SHA512_DIGEST_SIZE,
  2615. },
  2616. .caam = {
  2617. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2618. OP_ALG_AAI_HMAC_PRECOMP,
  2619. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2620. },
  2621. },
  2622. {
  2623. .aead = {
  2624. .base = {
  2625. .cra_name = "authenc(hmac(md5),cbc(aes))",
  2626. .cra_driver_name = "authenc-hmac-md5-"
  2627. "cbc-aes-caam",
  2628. .cra_blocksize = AES_BLOCK_SIZE,
  2629. },
  2630. .setkey = aead_setkey,
  2631. .setauthsize = aead_setauthsize,
  2632. .encrypt = aead_encrypt,
  2633. .decrypt = aead_decrypt,
  2634. .ivsize = AES_BLOCK_SIZE,
  2635. .maxauthsize = MD5_DIGEST_SIZE,
  2636. },
  2637. .caam = {
  2638. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2639. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2640. OP_ALG_AAI_HMAC_PRECOMP,
  2641. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2642. },
  2643. },
  2644. {
  2645. .aead = {
  2646. .base = {
  2647. .cra_name = "echainiv(authenc(hmac(md5),"
  2648. "cbc(aes)))",
  2649. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2650. "cbc-aes-caam",
  2651. .cra_blocksize = AES_BLOCK_SIZE,
  2652. },
  2653. .setkey = aead_setkey,
  2654. .setauthsize = aead_setauthsize,
  2655. .encrypt = aead_encrypt,
  2656. .decrypt = aead_givdecrypt,
  2657. .ivsize = AES_BLOCK_SIZE,
  2658. .maxauthsize = MD5_DIGEST_SIZE,
  2659. },
  2660. .caam = {
  2661. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2662. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2663. OP_ALG_AAI_HMAC_PRECOMP,
  2664. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2665. .geniv = true,
  2666. },
  2667. },
  2668. {
  2669. .aead = {
  2670. .base = {
  2671. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  2672. .cra_driver_name = "authenc-hmac-sha1-"
  2673. "cbc-aes-caam",
  2674. .cra_blocksize = AES_BLOCK_SIZE,
  2675. },
  2676. .setkey = aead_setkey,
  2677. .setauthsize = aead_setauthsize,
  2678. .encrypt = aead_encrypt,
  2679. .decrypt = aead_decrypt,
  2680. .ivsize = AES_BLOCK_SIZE,
  2681. .maxauthsize = SHA1_DIGEST_SIZE,
  2682. },
  2683. .caam = {
  2684. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2685. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2686. OP_ALG_AAI_HMAC_PRECOMP,
  2687. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2688. },
  2689. },
  2690. {
  2691. .aead = {
  2692. .base = {
  2693. .cra_name = "echainiv(authenc(hmac(sha1),"
  2694. "cbc(aes)))",
  2695. .cra_driver_name = "echainiv-authenc-"
  2696. "hmac-sha1-cbc-aes-caam",
  2697. .cra_blocksize = AES_BLOCK_SIZE,
  2698. },
  2699. .setkey = aead_setkey,
  2700. .setauthsize = aead_setauthsize,
  2701. .encrypt = aead_encrypt,
  2702. .decrypt = aead_givdecrypt,
  2703. .ivsize = AES_BLOCK_SIZE,
  2704. .maxauthsize = SHA1_DIGEST_SIZE,
  2705. },
  2706. .caam = {
  2707. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2708. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2709. OP_ALG_AAI_HMAC_PRECOMP,
  2710. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2711. .geniv = true,
  2712. },
  2713. },
  2714. {
  2715. .aead = {
  2716. .base = {
  2717. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  2718. .cra_driver_name = "authenc-hmac-sha224-"
  2719. "cbc-aes-caam",
  2720. .cra_blocksize = AES_BLOCK_SIZE,
  2721. },
  2722. .setkey = aead_setkey,
  2723. .setauthsize = aead_setauthsize,
  2724. .encrypt = aead_encrypt,
  2725. .decrypt = aead_decrypt,
  2726. .ivsize = AES_BLOCK_SIZE,
  2727. .maxauthsize = SHA224_DIGEST_SIZE,
  2728. },
  2729. .caam = {
  2730. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2731. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2732. OP_ALG_AAI_HMAC_PRECOMP,
  2733. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2734. },
  2735. },
  2736. {
  2737. .aead = {
  2738. .base = {
  2739. .cra_name = "echainiv(authenc(hmac(sha224),"
  2740. "cbc(aes)))",
  2741. .cra_driver_name = "echainiv-authenc-"
  2742. "hmac-sha224-cbc-aes-caam",
  2743. .cra_blocksize = AES_BLOCK_SIZE,
  2744. },
  2745. .setkey = aead_setkey,
  2746. .setauthsize = aead_setauthsize,
  2747. .encrypt = aead_encrypt,
  2748. .decrypt = aead_givdecrypt,
  2749. .ivsize = AES_BLOCK_SIZE,
  2750. .maxauthsize = SHA224_DIGEST_SIZE,
  2751. },
  2752. .caam = {
  2753. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2754. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2755. OP_ALG_AAI_HMAC_PRECOMP,
  2756. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2757. .geniv = true,
  2758. },
  2759. },
  2760. {
  2761. .aead = {
  2762. .base = {
  2763. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2764. .cra_driver_name = "authenc-hmac-sha256-"
  2765. "cbc-aes-caam",
  2766. .cra_blocksize = AES_BLOCK_SIZE,
  2767. },
  2768. .setkey = aead_setkey,
  2769. .setauthsize = aead_setauthsize,
  2770. .encrypt = aead_encrypt,
  2771. .decrypt = aead_decrypt,
  2772. .ivsize = AES_BLOCK_SIZE,
  2773. .maxauthsize = SHA256_DIGEST_SIZE,
  2774. },
  2775. .caam = {
  2776. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2777. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2778. OP_ALG_AAI_HMAC_PRECOMP,
  2779. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2780. },
  2781. },
  2782. {
  2783. .aead = {
  2784. .base = {
  2785. .cra_name = "echainiv(authenc(hmac(sha256),"
  2786. "cbc(aes)))",
  2787. .cra_driver_name = "echainiv-authenc-"
  2788. "hmac-sha256-cbc-aes-caam",
  2789. .cra_blocksize = AES_BLOCK_SIZE,
  2790. },
  2791. .setkey = aead_setkey,
  2792. .setauthsize = aead_setauthsize,
  2793. .encrypt = aead_encrypt,
  2794. .decrypt = aead_givdecrypt,
  2795. .ivsize = AES_BLOCK_SIZE,
  2796. .maxauthsize = SHA256_DIGEST_SIZE,
  2797. },
  2798. .caam = {
  2799. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2800. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2801. OP_ALG_AAI_HMAC_PRECOMP,
  2802. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2803. .geniv = true,
  2804. },
  2805. },
  2806. {
  2807. .aead = {
  2808. .base = {
  2809. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2810. .cra_driver_name = "authenc-hmac-sha384-"
  2811. "cbc-aes-caam",
  2812. .cra_blocksize = AES_BLOCK_SIZE,
  2813. },
  2814. .setkey = aead_setkey,
  2815. .setauthsize = aead_setauthsize,
  2816. .encrypt = aead_encrypt,
  2817. .decrypt = aead_decrypt,
  2818. .ivsize = AES_BLOCK_SIZE,
  2819. .maxauthsize = SHA384_DIGEST_SIZE,
  2820. },
  2821. .caam = {
  2822. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2823. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2824. OP_ALG_AAI_HMAC_PRECOMP,
  2825. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2826. },
  2827. },
  2828. {
  2829. .aead = {
  2830. .base = {
  2831. .cra_name = "echainiv(authenc(hmac(sha384),"
  2832. "cbc(aes)))",
  2833. .cra_driver_name = "echainiv-authenc-"
  2834. "hmac-sha384-cbc-aes-caam",
  2835. .cra_blocksize = AES_BLOCK_SIZE,
  2836. },
  2837. .setkey = aead_setkey,
  2838. .setauthsize = aead_setauthsize,
  2839. .encrypt = aead_encrypt,
  2840. .decrypt = aead_givdecrypt,
  2841. .ivsize = AES_BLOCK_SIZE,
  2842. .maxauthsize = SHA384_DIGEST_SIZE,
  2843. },
  2844. .caam = {
  2845. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2846. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2847. OP_ALG_AAI_HMAC_PRECOMP,
  2848. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2849. .geniv = true,
  2850. },
  2851. },
  2852. {
  2853. .aead = {
  2854. .base = {
  2855. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2856. .cra_driver_name = "authenc-hmac-sha512-"
  2857. "cbc-aes-caam",
  2858. .cra_blocksize = AES_BLOCK_SIZE,
  2859. },
  2860. .setkey = aead_setkey,
  2861. .setauthsize = aead_setauthsize,
  2862. .encrypt = aead_encrypt,
  2863. .decrypt = aead_decrypt,
  2864. .ivsize = AES_BLOCK_SIZE,
  2865. .maxauthsize = SHA512_DIGEST_SIZE,
  2866. },
  2867. .caam = {
  2868. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2869. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2870. OP_ALG_AAI_HMAC_PRECOMP,
  2871. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2872. },
  2873. },
  2874. {
  2875. .aead = {
  2876. .base = {
  2877. .cra_name = "echainiv(authenc(hmac(sha512),"
  2878. "cbc(aes)))",
  2879. .cra_driver_name = "echainiv-authenc-"
  2880. "hmac-sha512-cbc-aes-caam",
  2881. .cra_blocksize = AES_BLOCK_SIZE,
  2882. },
  2883. .setkey = aead_setkey,
  2884. .setauthsize = aead_setauthsize,
  2885. .encrypt = aead_encrypt,
  2886. .decrypt = aead_givdecrypt,
  2887. .ivsize = AES_BLOCK_SIZE,
  2888. .maxauthsize = SHA512_DIGEST_SIZE,
  2889. },
  2890. .caam = {
  2891. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2892. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2893. OP_ALG_AAI_HMAC_PRECOMP,
  2894. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2895. .geniv = true,
  2896. },
  2897. },
  2898. {
  2899. .aead = {
  2900. .base = {
  2901. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  2902. .cra_driver_name = "authenc-hmac-md5-"
  2903. "cbc-des3_ede-caam",
  2904. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2905. },
  2906. .setkey = aead_setkey,
  2907. .setauthsize = aead_setauthsize,
  2908. .encrypt = aead_encrypt,
  2909. .decrypt = aead_decrypt,
  2910. .ivsize = DES3_EDE_BLOCK_SIZE,
  2911. .maxauthsize = MD5_DIGEST_SIZE,
  2912. },
  2913. .caam = {
  2914. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2915. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2916. OP_ALG_AAI_HMAC_PRECOMP,
  2917. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2918. }
  2919. },
  2920. {
  2921. .aead = {
  2922. .base = {
  2923. .cra_name = "echainiv(authenc(hmac(md5),"
  2924. "cbc(des3_ede)))",
  2925. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2926. "cbc-des3_ede-caam",
  2927. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2928. },
  2929. .setkey = aead_setkey,
  2930. .setauthsize = aead_setauthsize,
  2931. .encrypt = aead_encrypt,
  2932. .decrypt = aead_givdecrypt,
  2933. .ivsize = DES3_EDE_BLOCK_SIZE,
  2934. .maxauthsize = MD5_DIGEST_SIZE,
  2935. },
  2936. .caam = {
  2937. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2938. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2939. OP_ALG_AAI_HMAC_PRECOMP,
  2940. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2941. .geniv = true,
  2942. }
  2943. },
  2944. {
  2945. .aead = {
  2946. .base = {
  2947. .cra_name = "authenc(hmac(sha1),"
  2948. "cbc(des3_ede))",
  2949. .cra_driver_name = "authenc-hmac-sha1-"
  2950. "cbc-des3_ede-caam",
  2951. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2952. },
  2953. .setkey = aead_setkey,
  2954. .setauthsize = aead_setauthsize,
  2955. .encrypt = aead_encrypt,
  2956. .decrypt = aead_decrypt,
  2957. .ivsize = DES3_EDE_BLOCK_SIZE,
  2958. .maxauthsize = SHA1_DIGEST_SIZE,
  2959. },
  2960. .caam = {
  2961. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2962. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2963. OP_ALG_AAI_HMAC_PRECOMP,
  2964. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2965. },
  2966. },
  2967. {
  2968. .aead = {
  2969. .base = {
  2970. .cra_name = "echainiv(authenc(hmac(sha1),"
  2971. "cbc(des3_ede)))",
  2972. .cra_driver_name = "echainiv-authenc-"
  2973. "hmac-sha1-"
  2974. "cbc-des3_ede-caam",
  2975. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  2976. },
  2977. .setkey = aead_setkey,
  2978. .setauthsize = aead_setauthsize,
  2979. .encrypt = aead_encrypt,
  2980. .decrypt = aead_givdecrypt,
  2981. .ivsize = DES3_EDE_BLOCK_SIZE,
  2982. .maxauthsize = SHA1_DIGEST_SIZE,
  2983. },
  2984. .caam = {
  2985. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2986. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2987. OP_ALG_AAI_HMAC_PRECOMP,
  2988. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2989. .geniv = true,
  2990. },
  2991. },
  2992. {
  2993. .aead = {
  2994. .base = {
  2995. .cra_name = "authenc(hmac(sha224),"
  2996. "cbc(des3_ede))",
  2997. .cra_driver_name = "authenc-hmac-sha224-"
  2998. "cbc-des3_ede-caam",
  2999. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3000. },
  3001. .setkey = aead_setkey,
  3002. .setauthsize = aead_setauthsize,
  3003. .encrypt = aead_encrypt,
  3004. .decrypt = aead_decrypt,
  3005. .ivsize = DES3_EDE_BLOCK_SIZE,
  3006. .maxauthsize = SHA224_DIGEST_SIZE,
  3007. },
  3008. .caam = {
  3009. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3010. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3011. OP_ALG_AAI_HMAC_PRECOMP,
  3012. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3013. },
  3014. },
  3015. {
  3016. .aead = {
  3017. .base = {
  3018. .cra_name = "echainiv(authenc(hmac(sha224),"
  3019. "cbc(des3_ede)))",
  3020. .cra_driver_name = "echainiv-authenc-"
  3021. "hmac-sha224-"
  3022. "cbc-des3_ede-caam",
  3023. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3024. },
  3025. .setkey = aead_setkey,
  3026. .setauthsize = aead_setauthsize,
  3027. .encrypt = aead_encrypt,
  3028. .decrypt = aead_givdecrypt,
  3029. .ivsize = DES3_EDE_BLOCK_SIZE,
  3030. .maxauthsize = SHA224_DIGEST_SIZE,
  3031. },
  3032. .caam = {
  3033. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3034. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3035. OP_ALG_AAI_HMAC_PRECOMP,
  3036. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3037. .geniv = true,
  3038. },
  3039. },
  3040. {
  3041. .aead = {
  3042. .base = {
  3043. .cra_name = "authenc(hmac(sha256),"
  3044. "cbc(des3_ede))",
  3045. .cra_driver_name = "authenc-hmac-sha256-"
  3046. "cbc-des3_ede-caam",
  3047. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3048. },
  3049. .setkey = aead_setkey,
  3050. .setauthsize = aead_setauthsize,
  3051. .encrypt = aead_encrypt,
  3052. .decrypt = aead_decrypt,
  3053. .ivsize = DES3_EDE_BLOCK_SIZE,
  3054. .maxauthsize = SHA256_DIGEST_SIZE,
  3055. },
  3056. .caam = {
  3057. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3058. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3059. OP_ALG_AAI_HMAC_PRECOMP,
  3060. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3061. },
  3062. },
  3063. {
  3064. .aead = {
  3065. .base = {
  3066. .cra_name = "echainiv(authenc(hmac(sha256),"
  3067. "cbc(des3_ede)))",
  3068. .cra_driver_name = "echainiv-authenc-"
  3069. "hmac-sha256-"
  3070. "cbc-des3_ede-caam",
  3071. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3072. },
  3073. .setkey = aead_setkey,
  3074. .setauthsize = aead_setauthsize,
  3075. .encrypt = aead_encrypt,
  3076. .decrypt = aead_givdecrypt,
  3077. .ivsize = DES3_EDE_BLOCK_SIZE,
  3078. .maxauthsize = SHA256_DIGEST_SIZE,
  3079. },
  3080. .caam = {
  3081. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3082. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3083. OP_ALG_AAI_HMAC_PRECOMP,
  3084. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3085. .geniv = true,
  3086. },
  3087. },
  3088. {
  3089. .aead = {
  3090. .base = {
  3091. .cra_name = "authenc(hmac(sha384),"
  3092. "cbc(des3_ede))",
  3093. .cra_driver_name = "authenc-hmac-sha384-"
  3094. "cbc-des3_ede-caam",
  3095. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3096. },
  3097. .setkey = aead_setkey,
  3098. .setauthsize = aead_setauthsize,
  3099. .encrypt = aead_encrypt,
  3100. .decrypt = aead_decrypt,
  3101. .ivsize = DES3_EDE_BLOCK_SIZE,
  3102. .maxauthsize = SHA384_DIGEST_SIZE,
  3103. },
  3104. .caam = {
  3105. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3106. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3107. OP_ALG_AAI_HMAC_PRECOMP,
  3108. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3109. },
  3110. },
  3111. {
  3112. .aead = {
  3113. .base = {
  3114. .cra_name = "echainiv(authenc(hmac(sha384),"
  3115. "cbc(des3_ede)))",
  3116. .cra_driver_name = "echainiv-authenc-"
  3117. "hmac-sha384-"
  3118. "cbc-des3_ede-caam",
  3119. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3120. },
  3121. .setkey = aead_setkey,
  3122. .setauthsize = aead_setauthsize,
  3123. .encrypt = aead_encrypt,
  3124. .decrypt = aead_givdecrypt,
  3125. .ivsize = DES3_EDE_BLOCK_SIZE,
  3126. .maxauthsize = SHA384_DIGEST_SIZE,
  3127. },
  3128. .caam = {
  3129. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3130. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3131. OP_ALG_AAI_HMAC_PRECOMP,
  3132. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3133. .geniv = true,
  3134. },
  3135. },
  3136. {
  3137. .aead = {
  3138. .base = {
  3139. .cra_name = "authenc(hmac(sha512),"
  3140. "cbc(des3_ede))",
  3141. .cra_driver_name = "authenc-hmac-sha512-"
  3142. "cbc-des3_ede-caam",
  3143. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3144. },
  3145. .setkey = aead_setkey,
  3146. .setauthsize = aead_setauthsize,
  3147. .encrypt = aead_encrypt,
  3148. .decrypt = aead_decrypt,
  3149. .ivsize = DES3_EDE_BLOCK_SIZE,
  3150. .maxauthsize = SHA512_DIGEST_SIZE,
  3151. },
  3152. .caam = {
  3153. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3154. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3155. OP_ALG_AAI_HMAC_PRECOMP,
  3156. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3157. },
  3158. },
  3159. {
  3160. .aead = {
  3161. .base = {
  3162. .cra_name = "echainiv(authenc(hmac(sha512),"
  3163. "cbc(des3_ede)))",
  3164. .cra_driver_name = "echainiv-authenc-"
  3165. "hmac-sha512-"
  3166. "cbc-des3_ede-caam",
  3167. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3168. },
  3169. .setkey = aead_setkey,
  3170. .setauthsize = aead_setauthsize,
  3171. .encrypt = aead_encrypt,
  3172. .decrypt = aead_givdecrypt,
  3173. .ivsize = DES3_EDE_BLOCK_SIZE,
  3174. .maxauthsize = SHA512_DIGEST_SIZE,
  3175. },
  3176. .caam = {
  3177. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3178. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3179. OP_ALG_AAI_HMAC_PRECOMP,
  3180. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3181. .geniv = true,
  3182. },
  3183. },
  3184. {
  3185. .aead = {
  3186. .base = {
  3187. .cra_name = "authenc(hmac(md5),cbc(des))",
  3188. .cra_driver_name = "authenc-hmac-md5-"
  3189. "cbc-des-caam",
  3190. .cra_blocksize = DES_BLOCK_SIZE,
  3191. },
  3192. .setkey = aead_setkey,
  3193. .setauthsize = aead_setauthsize,
  3194. .encrypt = aead_encrypt,
  3195. .decrypt = aead_decrypt,
  3196. .ivsize = DES_BLOCK_SIZE,
  3197. .maxauthsize = MD5_DIGEST_SIZE,
  3198. },
  3199. .caam = {
  3200. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3201. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3202. OP_ALG_AAI_HMAC_PRECOMP,
  3203. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3204. },
  3205. },
  3206. {
  3207. .aead = {
  3208. .base = {
  3209. .cra_name = "echainiv(authenc(hmac(md5),"
  3210. "cbc(des)))",
  3211. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3212. "cbc-des-caam",
  3213. .cra_blocksize = DES_BLOCK_SIZE,
  3214. },
  3215. .setkey = aead_setkey,
  3216. .setauthsize = aead_setauthsize,
  3217. .encrypt = aead_encrypt,
  3218. .decrypt = aead_givdecrypt,
  3219. .ivsize = DES_BLOCK_SIZE,
  3220. .maxauthsize = MD5_DIGEST_SIZE,
  3221. },
  3222. .caam = {
  3223. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3224. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3225. OP_ALG_AAI_HMAC_PRECOMP,
  3226. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3227. .geniv = true,
  3228. },
  3229. },
  3230. {
  3231. .aead = {
  3232. .base = {
  3233. .cra_name = "authenc(hmac(sha1),cbc(des))",
  3234. .cra_driver_name = "authenc-hmac-sha1-"
  3235. "cbc-des-caam",
  3236. .cra_blocksize = DES_BLOCK_SIZE,
  3237. },
  3238. .setkey = aead_setkey,
  3239. .setauthsize = aead_setauthsize,
  3240. .encrypt = aead_encrypt,
  3241. .decrypt = aead_decrypt,
  3242. .ivsize = DES_BLOCK_SIZE,
  3243. .maxauthsize = SHA1_DIGEST_SIZE,
  3244. },
  3245. .caam = {
  3246. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3247. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3248. OP_ALG_AAI_HMAC_PRECOMP,
  3249. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3250. },
  3251. },
  3252. {
  3253. .aead = {
  3254. .base = {
  3255. .cra_name = "echainiv(authenc(hmac(sha1),"
  3256. "cbc(des)))",
  3257. .cra_driver_name = "echainiv-authenc-"
  3258. "hmac-sha1-cbc-des-caam",
  3259. .cra_blocksize = DES_BLOCK_SIZE,
  3260. },
  3261. .setkey = aead_setkey,
  3262. .setauthsize = aead_setauthsize,
  3263. .encrypt = aead_encrypt,
  3264. .decrypt = aead_givdecrypt,
  3265. .ivsize = DES_BLOCK_SIZE,
  3266. .maxauthsize = SHA1_DIGEST_SIZE,
  3267. },
  3268. .caam = {
  3269. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3270. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3271. OP_ALG_AAI_HMAC_PRECOMP,
  3272. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3273. .geniv = true,
  3274. },
  3275. },
  3276. {
  3277. .aead = {
  3278. .base = {
  3279. .cra_name = "authenc(hmac(sha224),cbc(des))",
  3280. .cra_driver_name = "authenc-hmac-sha224-"
  3281. "cbc-des-caam",
  3282. .cra_blocksize = DES_BLOCK_SIZE,
  3283. },
  3284. .setkey = aead_setkey,
  3285. .setauthsize = aead_setauthsize,
  3286. .encrypt = aead_encrypt,
  3287. .decrypt = aead_decrypt,
  3288. .ivsize = DES_BLOCK_SIZE,
  3289. .maxauthsize = SHA224_DIGEST_SIZE,
  3290. },
  3291. .caam = {
  3292. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3293. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3294. OP_ALG_AAI_HMAC_PRECOMP,
  3295. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3296. },
  3297. },
  3298. {
  3299. .aead = {
  3300. .base = {
  3301. .cra_name = "echainiv(authenc(hmac(sha224),"
  3302. "cbc(des)))",
  3303. .cra_driver_name = "echainiv-authenc-"
  3304. "hmac-sha224-cbc-des-caam",
  3305. .cra_blocksize = DES_BLOCK_SIZE,
  3306. },
  3307. .setkey = aead_setkey,
  3308. .setauthsize = aead_setauthsize,
  3309. .encrypt = aead_encrypt,
  3310. .decrypt = aead_givdecrypt,
  3311. .ivsize = DES_BLOCK_SIZE,
  3312. .maxauthsize = SHA224_DIGEST_SIZE,
  3313. },
  3314. .caam = {
  3315. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3316. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3317. OP_ALG_AAI_HMAC_PRECOMP,
  3318. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3319. .geniv = true,
  3320. },
  3321. },
  3322. {
  3323. .aead = {
  3324. .base = {
  3325. .cra_name = "authenc(hmac(sha256),cbc(des))",
  3326. .cra_driver_name = "authenc-hmac-sha256-"
  3327. "cbc-des-caam",
  3328. .cra_blocksize = DES_BLOCK_SIZE,
  3329. },
  3330. .setkey = aead_setkey,
  3331. .setauthsize = aead_setauthsize,
  3332. .encrypt = aead_encrypt,
  3333. .decrypt = aead_decrypt,
  3334. .ivsize = DES_BLOCK_SIZE,
  3335. .maxauthsize = SHA256_DIGEST_SIZE,
  3336. },
  3337. .caam = {
  3338. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3339. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3340. OP_ALG_AAI_HMAC_PRECOMP,
  3341. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3342. },
  3343. },
  3344. {
  3345. .aead = {
  3346. .base = {
  3347. .cra_name = "echainiv(authenc(hmac(sha256),"
  3348. "cbc(des)))",
  3349. .cra_driver_name = "echainiv-authenc-"
  3350. "hmac-sha256-cbc-des-caam",
  3351. .cra_blocksize = DES_BLOCK_SIZE,
  3352. },
  3353. .setkey = aead_setkey,
  3354. .setauthsize = aead_setauthsize,
  3355. .encrypt = aead_encrypt,
  3356. .decrypt = aead_givdecrypt,
  3357. .ivsize = DES_BLOCK_SIZE,
  3358. .maxauthsize = SHA256_DIGEST_SIZE,
  3359. },
  3360. .caam = {
  3361. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3362. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3363. OP_ALG_AAI_HMAC_PRECOMP,
  3364. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3365. .geniv = true,
  3366. },
  3367. },
  3368. {
  3369. .aead = {
  3370. .base = {
  3371. .cra_name = "authenc(hmac(sha384),cbc(des))",
  3372. .cra_driver_name = "authenc-hmac-sha384-"
  3373. "cbc-des-caam",
  3374. .cra_blocksize = DES_BLOCK_SIZE,
  3375. },
  3376. .setkey = aead_setkey,
  3377. .setauthsize = aead_setauthsize,
  3378. .encrypt = aead_encrypt,
  3379. .decrypt = aead_decrypt,
  3380. .ivsize = DES_BLOCK_SIZE,
  3381. .maxauthsize = SHA384_DIGEST_SIZE,
  3382. },
  3383. .caam = {
  3384. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3385. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3386. OP_ALG_AAI_HMAC_PRECOMP,
  3387. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3388. },
  3389. },
  3390. {
  3391. .aead = {
  3392. .base = {
  3393. .cra_name = "echainiv(authenc(hmac(sha384),"
  3394. "cbc(des)))",
  3395. .cra_driver_name = "echainiv-authenc-"
  3396. "hmac-sha384-cbc-des-caam",
  3397. .cra_blocksize = DES_BLOCK_SIZE,
  3398. },
  3399. .setkey = aead_setkey,
  3400. .setauthsize = aead_setauthsize,
  3401. .encrypt = aead_encrypt,
  3402. .decrypt = aead_givdecrypt,
  3403. .ivsize = DES_BLOCK_SIZE,
  3404. .maxauthsize = SHA384_DIGEST_SIZE,
  3405. },
  3406. .caam = {
  3407. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3408. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3409. OP_ALG_AAI_HMAC_PRECOMP,
  3410. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3411. .geniv = true,
  3412. },
  3413. },
  3414. {
  3415. .aead = {
  3416. .base = {
  3417. .cra_name = "authenc(hmac(sha512),cbc(des))",
  3418. .cra_driver_name = "authenc-hmac-sha512-"
  3419. "cbc-des-caam",
  3420. .cra_blocksize = DES_BLOCK_SIZE,
  3421. },
  3422. .setkey = aead_setkey,
  3423. .setauthsize = aead_setauthsize,
  3424. .encrypt = aead_encrypt,
  3425. .decrypt = aead_decrypt,
  3426. .ivsize = DES_BLOCK_SIZE,
  3427. .maxauthsize = SHA512_DIGEST_SIZE,
  3428. },
  3429. .caam = {
  3430. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3431. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3432. OP_ALG_AAI_HMAC_PRECOMP,
  3433. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3434. },
  3435. },
  3436. {
  3437. .aead = {
  3438. .base = {
  3439. .cra_name = "echainiv(authenc(hmac(sha512),"
  3440. "cbc(des)))",
  3441. .cra_driver_name = "echainiv-authenc-"
  3442. "hmac-sha512-cbc-des-caam",
  3443. .cra_blocksize = DES_BLOCK_SIZE,
  3444. },
  3445. .setkey = aead_setkey,
  3446. .setauthsize = aead_setauthsize,
  3447. .encrypt = aead_encrypt,
  3448. .decrypt = aead_givdecrypt,
  3449. .ivsize = DES_BLOCK_SIZE,
  3450. .maxauthsize = SHA512_DIGEST_SIZE,
  3451. },
  3452. .caam = {
  3453. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3454. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3455. OP_ALG_AAI_HMAC_PRECOMP,
  3456. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3457. .geniv = true,
  3458. },
  3459. },
  3460. {
  3461. .aead = {
  3462. .base = {
  3463. .cra_name = "authenc(hmac(md5),"
  3464. "rfc3686(ctr(aes)))",
  3465. .cra_driver_name = "authenc-hmac-md5-"
  3466. "rfc3686-ctr-aes-caam",
  3467. .cra_blocksize = 1,
  3468. },
  3469. .setkey = aead_setkey,
  3470. .setauthsize = aead_setauthsize,
  3471. .encrypt = aead_encrypt,
  3472. .decrypt = aead_decrypt,
  3473. .ivsize = CTR_RFC3686_IV_SIZE,
  3474. .maxauthsize = MD5_DIGEST_SIZE,
  3475. },
  3476. .caam = {
  3477. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3478. OP_ALG_AAI_CTR_MOD128,
  3479. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3480. OP_ALG_AAI_HMAC_PRECOMP,
  3481. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3482. .rfc3686 = true,
  3483. },
  3484. },
  3485. {
  3486. .aead = {
  3487. .base = {
  3488. .cra_name = "seqiv(authenc("
  3489. "hmac(md5),rfc3686(ctr(aes))))",
  3490. .cra_driver_name = "seqiv-authenc-hmac-md5-"
  3491. "rfc3686-ctr-aes-caam",
  3492. .cra_blocksize = 1,
  3493. },
  3494. .setkey = aead_setkey,
  3495. .setauthsize = aead_setauthsize,
  3496. .encrypt = aead_encrypt,
  3497. .decrypt = aead_givdecrypt,
  3498. .ivsize = CTR_RFC3686_IV_SIZE,
  3499. .maxauthsize = MD5_DIGEST_SIZE,
  3500. },
  3501. .caam = {
  3502. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3503. OP_ALG_AAI_CTR_MOD128,
  3504. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3505. OP_ALG_AAI_HMAC_PRECOMP,
  3506. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3507. .rfc3686 = true,
  3508. .geniv = true,
  3509. },
  3510. },
  3511. {
  3512. .aead = {
  3513. .base = {
  3514. .cra_name = "authenc(hmac(sha1),"
  3515. "rfc3686(ctr(aes)))",
  3516. .cra_driver_name = "authenc-hmac-sha1-"
  3517. "rfc3686-ctr-aes-caam",
  3518. .cra_blocksize = 1,
  3519. },
  3520. .setkey = aead_setkey,
  3521. .setauthsize = aead_setauthsize,
  3522. .encrypt = aead_encrypt,
  3523. .decrypt = aead_decrypt,
  3524. .ivsize = CTR_RFC3686_IV_SIZE,
  3525. .maxauthsize = SHA1_DIGEST_SIZE,
  3526. },
  3527. .caam = {
  3528. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3529. OP_ALG_AAI_CTR_MOD128,
  3530. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3531. OP_ALG_AAI_HMAC_PRECOMP,
  3532. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3533. .rfc3686 = true,
  3534. },
  3535. },
  3536. {
  3537. .aead = {
  3538. .base = {
  3539. .cra_name = "seqiv(authenc("
  3540. "hmac(sha1),rfc3686(ctr(aes))))",
  3541. .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  3542. "rfc3686-ctr-aes-caam",
  3543. .cra_blocksize = 1,
  3544. },
  3545. .setkey = aead_setkey,
  3546. .setauthsize = aead_setauthsize,
  3547. .encrypt = aead_encrypt,
  3548. .decrypt = aead_givdecrypt,
  3549. .ivsize = CTR_RFC3686_IV_SIZE,
  3550. .maxauthsize = SHA1_DIGEST_SIZE,
  3551. },
  3552. .caam = {
  3553. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3554. OP_ALG_AAI_CTR_MOD128,
  3555. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3556. OP_ALG_AAI_HMAC_PRECOMP,
  3557. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3558. .rfc3686 = true,
  3559. .geniv = true,
  3560. },
  3561. },
  3562. {
  3563. .aead = {
  3564. .base = {
  3565. .cra_name = "authenc(hmac(sha224),"
  3566. "rfc3686(ctr(aes)))",
  3567. .cra_driver_name = "authenc-hmac-sha224-"
  3568. "rfc3686-ctr-aes-caam",
  3569. .cra_blocksize = 1,
  3570. },
  3571. .setkey = aead_setkey,
  3572. .setauthsize = aead_setauthsize,
  3573. .encrypt = aead_encrypt,
  3574. .decrypt = aead_decrypt,
  3575. .ivsize = CTR_RFC3686_IV_SIZE,
  3576. .maxauthsize = SHA224_DIGEST_SIZE,
  3577. },
  3578. .caam = {
  3579. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3580. OP_ALG_AAI_CTR_MOD128,
  3581. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3582. OP_ALG_AAI_HMAC_PRECOMP,
  3583. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3584. .rfc3686 = true,
  3585. },
  3586. },
  3587. {
  3588. .aead = {
  3589. .base = {
  3590. .cra_name = "seqiv(authenc("
  3591. "hmac(sha224),rfc3686(ctr(aes))))",
  3592. .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  3593. "rfc3686-ctr-aes-caam",
  3594. .cra_blocksize = 1,
  3595. },
  3596. .setkey = aead_setkey,
  3597. .setauthsize = aead_setauthsize,
  3598. .encrypt = aead_encrypt,
  3599. .decrypt = aead_givdecrypt,
  3600. .ivsize = CTR_RFC3686_IV_SIZE,
  3601. .maxauthsize = SHA224_DIGEST_SIZE,
  3602. },
  3603. .caam = {
  3604. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3605. OP_ALG_AAI_CTR_MOD128,
  3606. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3607. OP_ALG_AAI_HMAC_PRECOMP,
  3608. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3609. .rfc3686 = true,
  3610. .geniv = true,
  3611. },
  3612. },
  3613. {
  3614. .aead = {
  3615. .base = {
  3616. .cra_name = "authenc(hmac(sha256),"
  3617. "rfc3686(ctr(aes)))",
  3618. .cra_driver_name = "authenc-hmac-sha256-"
  3619. "rfc3686-ctr-aes-caam",
  3620. .cra_blocksize = 1,
  3621. },
  3622. .setkey = aead_setkey,
  3623. .setauthsize = aead_setauthsize,
  3624. .encrypt = aead_encrypt,
  3625. .decrypt = aead_decrypt,
  3626. .ivsize = CTR_RFC3686_IV_SIZE,
  3627. .maxauthsize = SHA256_DIGEST_SIZE,
  3628. },
  3629. .caam = {
  3630. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3631. OP_ALG_AAI_CTR_MOD128,
  3632. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3633. OP_ALG_AAI_HMAC_PRECOMP,
  3634. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3635. .rfc3686 = true,
  3636. },
  3637. },
  3638. {
  3639. .aead = {
  3640. .base = {
  3641. .cra_name = "seqiv(authenc(hmac(sha256),"
  3642. "rfc3686(ctr(aes))))",
  3643. .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  3644. "rfc3686-ctr-aes-caam",
  3645. .cra_blocksize = 1,
  3646. },
  3647. .setkey = aead_setkey,
  3648. .setauthsize = aead_setauthsize,
  3649. .encrypt = aead_encrypt,
  3650. .decrypt = aead_givdecrypt,
  3651. .ivsize = CTR_RFC3686_IV_SIZE,
  3652. .maxauthsize = SHA256_DIGEST_SIZE,
  3653. },
  3654. .caam = {
  3655. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3656. OP_ALG_AAI_CTR_MOD128,
  3657. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3658. OP_ALG_AAI_HMAC_PRECOMP,
  3659. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3660. .rfc3686 = true,
  3661. .geniv = true,
  3662. },
  3663. },
  3664. {
  3665. .aead = {
  3666. .base = {
  3667. .cra_name = "authenc(hmac(sha384),"
  3668. "rfc3686(ctr(aes)))",
  3669. .cra_driver_name = "authenc-hmac-sha384-"
  3670. "rfc3686-ctr-aes-caam",
  3671. .cra_blocksize = 1,
  3672. },
  3673. .setkey = aead_setkey,
  3674. .setauthsize = aead_setauthsize,
  3675. .encrypt = aead_encrypt,
  3676. .decrypt = aead_decrypt,
  3677. .ivsize = CTR_RFC3686_IV_SIZE,
  3678. .maxauthsize = SHA384_DIGEST_SIZE,
  3679. },
  3680. .caam = {
  3681. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3682. OP_ALG_AAI_CTR_MOD128,
  3683. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3684. OP_ALG_AAI_HMAC_PRECOMP,
  3685. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3686. .rfc3686 = true,
  3687. },
  3688. },
  3689. {
  3690. .aead = {
  3691. .base = {
  3692. .cra_name = "seqiv(authenc(hmac(sha384),"
  3693. "rfc3686(ctr(aes))))",
  3694. .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  3695. "rfc3686-ctr-aes-caam",
  3696. .cra_blocksize = 1,
  3697. },
  3698. .setkey = aead_setkey,
  3699. .setauthsize = aead_setauthsize,
  3700. .encrypt = aead_encrypt,
  3701. .decrypt = aead_givdecrypt,
  3702. .ivsize = CTR_RFC3686_IV_SIZE,
  3703. .maxauthsize = SHA384_DIGEST_SIZE,
  3704. },
  3705. .caam = {
  3706. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3707. OP_ALG_AAI_CTR_MOD128,
  3708. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3709. OP_ALG_AAI_HMAC_PRECOMP,
  3710. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3711. .rfc3686 = true,
  3712. .geniv = true,
  3713. },
  3714. },
  3715. {
  3716. .aead = {
  3717. .base = {
  3718. .cra_name = "authenc(hmac(sha512),"
  3719. "rfc3686(ctr(aes)))",
  3720. .cra_driver_name = "authenc-hmac-sha512-"
  3721. "rfc3686-ctr-aes-caam",
  3722. .cra_blocksize = 1,
  3723. },
  3724. .setkey = aead_setkey,
  3725. .setauthsize = aead_setauthsize,
  3726. .encrypt = aead_encrypt,
  3727. .decrypt = aead_decrypt,
  3728. .ivsize = CTR_RFC3686_IV_SIZE,
  3729. .maxauthsize = SHA512_DIGEST_SIZE,
  3730. },
  3731. .caam = {
  3732. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3733. OP_ALG_AAI_CTR_MOD128,
  3734. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3735. OP_ALG_AAI_HMAC_PRECOMP,
  3736. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3737. .rfc3686 = true,
  3738. },
  3739. },
  3740. {
  3741. .aead = {
  3742. .base = {
  3743. .cra_name = "seqiv(authenc(hmac(sha512),"
  3744. "rfc3686(ctr(aes))))",
  3745. .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  3746. "rfc3686-ctr-aes-caam",
  3747. .cra_blocksize = 1,
  3748. },
  3749. .setkey = aead_setkey,
  3750. .setauthsize = aead_setauthsize,
  3751. .encrypt = aead_encrypt,
  3752. .decrypt = aead_givdecrypt,
  3753. .ivsize = CTR_RFC3686_IV_SIZE,
  3754. .maxauthsize = SHA512_DIGEST_SIZE,
  3755. },
  3756. .caam = {
  3757. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3758. OP_ALG_AAI_CTR_MOD128,
  3759. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3760. OP_ALG_AAI_HMAC_PRECOMP,
  3761. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3762. .rfc3686 = true,
  3763. .geniv = true,
  3764. },
  3765. },
  3766. };
  3767. struct caam_crypto_alg {
  3768. struct crypto_alg crypto_alg;
  3769. struct list_head entry;
  3770. struct caam_alg_entry caam;
  3771. };
  3772. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  3773. {
  3774. ctx->jrdev = caam_jr_alloc();
  3775. if (IS_ERR(ctx->jrdev)) {
  3776. pr_err("Job Ring Device allocation for transform failed\n");
  3777. return PTR_ERR(ctx->jrdev);
  3778. }
  3779. /* copy descriptor header template value */
  3780. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3781. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3782. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
  3783. return 0;
  3784. }
  3785. static int caam_cra_init(struct crypto_tfm *tfm)
  3786. {
  3787. struct crypto_alg *alg = tfm->__crt_alg;
  3788. struct caam_crypto_alg *caam_alg =
  3789. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3790. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3791. return caam_init_common(ctx, &caam_alg->caam);
  3792. }
  3793. static int caam_aead_init(struct crypto_aead *tfm)
  3794. {
  3795. struct aead_alg *alg = crypto_aead_alg(tfm);
  3796. struct caam_aead_alg *caam_alg =
  3797. container_of(alg, struct caam_aead_alg, aead);
  3798. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3799. return caam_init_common(ctx, &caam_alg->caam);
  3800. }
  3801. static void caam_exit_common(struct caam_ctx *ctx)
  3802. {
  3803. if (ctx->sh_desc_enc_dma &&
  3804. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  3805. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  3806. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  3807. if (ctx->sh_desc_dec_dma &&
  3808. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  3809. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  3810. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  3811. if (ctx->sh_desc_givenc_dma &&
  3812. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  3813. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  3814. desc_bytes(ctx->sh_desc_givenc),
  3815. DMA_TO_DEVICE);
  3816. if (ctx->key_dma &&
  3817. !dma_mapping_error(ctx->jrdev, ctx->key_dma))
  3818. dma_unmap_single(ctx->jrdev, ctx->key_dma,
  3819. ctx->enckeylen + ctx->split_key_pad_len,
  3820. DMA_TO_DEVICE);
  3821. caam_jr_free(ctx->jrdev);
  3822. }
  3823. static void caam_cra_exit(struct crypto_tfm *tfm)
  3824. {
  3825. caam_exit_common(crypto_tfm_ctx(tfm));
  3826. }
  3827. static void caam_aead_exit(struct crypto_aead *tfm)
  3828. {
  3829. caam_exit_common(crypto_aead_ctx(tfm));
  3830. }
  3831. static void __exit caam_algapi_exit(void)
  3832. {
  3833. struct caam_crypto_alg *t_alg, *n;
  3834. int i;
  3835. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3836. struct caam_aead_alg *t_alg = driver_aeads + i;
  3837. if (t_alg->registered)
  3838. crypto_unregister_aead(&t_alg->aead);
  3839. }
  3840. if (!alg_list.next)
  3841. return;
  3842. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3843. crypto_unregister_alg(&t_alg->crypto_alg);
  3844. list_del(&t_alg->entry);
  3845. kfree(t_alg);
  3846. }
  3847. }
  3848. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3849. *template)
  3850. {
  3851. struct caam_crypto_alg *t_alg;
  3852. struct crypto_alg *alg;
  3853. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  3854. if (!t_alg) {
  3855. pr_err("failed to allocate t_alg\n");
  3856. return ERR_PTR(-ENOMEM);
  3857. }
  3858. alg = &t_alg->crypto_alg;
  3859. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3860. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  3861. template->driver_name);
  3862. alg->cra_module = THIS_MODULE;
  3863. alg->cra_init = caam_cra_init;
  3864. alg->cra_exit = caam_cra_exit;
  3865. alg->cra_priority = CAAM_CRA_PRIORITY;
  3866. alg->cra_blocksize = template->blocksize;
  3867. alg->cra_alignmask = 0;
  3868. alg->cra_ctxsize = sizeof(struct caam_ctx);
  3869. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  3870. template->type;
  3871. switch (template->type) {
  3872. case CRYPTO_ALG_TYPE_GIVCIPHER:
  3873. alg->cra_type = &crypto_givcipher_type;
  3874. alg->cra_ablkcipher = template->template_ablkcipher;
  3875. break;
  3876. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3877. alg->cra_type = &crypto_ablkcipher_type;
  3878. alg->cra_ablkcipher = template->template_ablkcipher;
  3879. break;
  3880. }
  3881. t_alg->caam.class1_alg_type = template->class1_alg_type;
  3882. t_alg->caam.class2_alg_type = template->class2_alg_type;
  3883. t_alg->caam.alg_op = template->alg_op;
  3884. return t_alg;
  3885. }
  3886. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  3887. {
  3888. struct aead_alg *alg = &t_alg->aead;
  3889. alg->base.cra_module = THIS_MODULE;
  3890. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  3891. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  3892. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  3893. alg->init = caam_aead_init;
  3894. alg->exit = caam_aead_exit;
  3895. }
  3896. static int __init caam_algapi_init(void)
  3897. {
  3898. struct device_node *dev_node;
  3899. struct platform_device *pdev;
  3900. struct device *ctrldev;
  3901. struct caam_drv_private *priv;
  3902. int i = 0, err = 0;
  3903. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  3904. unsigned int md_limit = SHA512_DIGEST_SIZE;
  3905. bool registered = false;
  3906. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  3907. if (!dev_node) {
  3908. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  3909. if (!dev_node)
  3910. return -ENODEV;
  3911. }
  3912. pdev = of_find_device_by_node(dev_node);
  3913. if (!pdev) {
  3914. of_node_put(dev_node);
  3915. return -ENODEV;
  3916. }
  3917. ctrldev = &pdev->dev;
  3918. priv = dev_get_drvdata(ctrldev);
  3919. of_node_put(dev_node);
  3920. /*
  3921. * If priv is NULL, it's probably because the caam driver wasn't
  3922. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  3923. */
  3924. if (!priv)
  3925. return -ENODEV;
  3926. INIT_LIST_HEAD(&alg_list);
  3927. /*
  3928. * Register crypto algorithms the device supports.
  3929. * First, detect presence and attributes of DES, AES, and MD blocks.
  3930. */
  3931. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  3932. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  3933. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  3934. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  3935. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  3936. /* If MD is present, limit digest size based on LP256 */
  3937. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  3938. md_limit = SHA256_DIGEST_SIZE;
  3939. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3940. struct caam_crypto_alg *t_alg;
  3941. struct caam_alg_template *alg = driver_algs + i;
  3942. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  3943. /* Skip DES algorithms if not supported by device */
  3944. if (!des_inst &&
  3945. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  3946. (alg_sel == OP_ALG_ALGSEL_DES)))
  3947. continue;
  3948. /* Skip AES algorithms if not supported by device */
  3949. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  3950. continue;
  3951. t_alg = caam_alg_alloc(alg);
  3952. if (IS_ERR(t_alg)) {
  3953. err = PTR_ERR(t_alg);
  3954. pr_warn("%s alg allocation failed\n", alg->driver_name);
  3955. continue;
  3956. }
  3957. err = crypto_register_alg(&t_alg->crypto_alg);
  3958. if (err) {
  3959. pr_warn("%s alg registration failed\n",
  3960. t_alg->crypto_alg.cra_driver_name);
  3961. kfree(t_alg);
  3962. continue;
  3963. }
  3964. list_add_tail(&t_alg->entry, &alg_list);
  3965. registered = true;
  3966. }
  3967. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3968. struct caam_aead_alg *t_alg = driver_aeads + i;
  3969. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  3970. OP_ALG_ALGSEL_MASK;
  3971. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  3972. OP_ALG_ALGSEL_MASK;
  3973. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  3974. /* Skip DES algorithms if not supported by device */
  3975. if (!des_inst &&
  3976. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  3977. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  3978. continue;
  3979. /* Skip AES algorithms if not supported by device */
  3980. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  3981. continue;
  3982. /*
  3983. * Check support for AES algorithms not available
  3984. * on LP devices.
  3985. */
  3986. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  3987. if (alg_aai == OP_ALG_AAI_GCM)
  3988. continue;
  3989. /*
  3990. * Skip algorithms requiring message digests
  3991. * if MD or MD size is not supported by device.
  3992. */
  3993. if (c2_alg_sel &&
  3994. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  3995. continue;
  3996. caam_aead_alg_init(t_alg);
  3997. err = crypto_register_aead(&t_alg->aead);
  3998. if (err) {
  3999. pr_warn("%s alg registration failed\n",
  4000. t_alg->aead.base.cra_driver_name);
  4001. continue;
  4002. }
  4003. t_alg->registered = true;
  4004. registered = true;
  4005. }
  4006. if (registered)
  4007. pr_info("caam algorithms registered in /proc/crypto\n");
  4008. return err;
  4009. }
  4010. module_init(caam_algapi_init);
  4011. module_exit(caam_algapi_exit);
  4012. MODULE_LICENSE("GPL");
  4013. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  4014. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");