main.c 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/highmem.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/pci.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/slab.h>
  39. #if defined(CONFIG_X86)
  40. #include <asm/pat.h>
  41. #endif
  42. #include <linux/sched.h>
  43. #include <linux/sched/mm.h>
  44. #include <linux/sched/task.h>
  45. #include <linux/delay.h>
  46. #include <rdma/ib_user_verbs.h>
  47. #include <rdma/ib_addr.h>
  48. #include <rdma/ib_cache.h>
  49. #include <linux/mlx5/port.h>
  50. #include <linux/mlx5/vport.h>
  51. #include <linux/list.h>
  52. #include <rdma/ib_smi.h>
  53. #include <rdma/ib_umem.h>
  54. #include <linux/in.h>
  55. #include <linux/etherdevice.h>
  56. #include <linux/mlx5/fs.h>
  57. #include <linux/mlx5/vport.h>
  58. #include "mlx5_ib.h"
  59. #include "cmd.h"
  60. #define DRIVER_NAME "mlx5_ib"
  61. #define DRIVER_VERSION "5.0-0"
  62. MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
  63. MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
  64. MODULE_LICENSE("Dual BSD/GPL");
  65. MODULE_VERSION(DRIVER_VERSION);
  66. static char mlx5_version[] =
  67. DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
  68. DRIVER_VERSION "\n";
  69. enum {
  70. MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
  71. };
  72. static enum rdma_link_layer
  73. mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
  74. {
  75. switch (port_type_cap) {
  76. case MLX5_CAP_PORT_TYPE_IB:
  77. return IB_LINK_LAYER_INFINIBAND;
  78. case MLX5_CAP_PORT_TYPE_ETH:
  79. return IB_LINK_LAYER_ETHERNET;
  80. default:
  81. return IB_LINK_LAYER_UNSPECIFIED;
  82. }
  83. }
  84. static enum rdma_link_layer
  85. mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
  86. {
  87. struct mlx5_ib_dev *dev = to_mdev(device);
  88. int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
  89. return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  90. }
  91. static int mlx5_netdev_event(struct notifier_block *this,
  92. unsigned long event, void *ptr)
  93. {
  94. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  95. struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
  96. roce.nb);
  97. switch (event) {
  98. case NETDEV_REGISTER:
  99. case NETDEV_UNREGISTER:
  100. write_lock(&ibdev->roce.netdev_lock);
  101. if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
  102. ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
  103. NULL : ndev;
  104. write_unlock(&ibdev->roce.netdev_lock);
  105. break;
  106. case NETDEV_UP:
  107. case NETDEV_DOWN: {
  108. struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
  109. struct net_device *upper = NULL;
  110. if (lag_ndev) {
  111. upper = netdev_master_upper_dev_get(lag_ndev);
  112. dev_put(lag_ndev);
  113. }
  114. if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
  115. && ibdev->ib_active) {
  116. struct ib_event ibev = { };
  117. ibev.device = &ibdev->ib_dev;
  118. ibev.event = (event == NETDEV_UP) ?
  119. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  120. ibev.element.port_num = 1;
  121. ib_dispatch_event(&ibev);
  122. }
  123. break;
  124. }
  125. default:
  126. break;
  127. }
  128. return NOTIFY_DONE;
  129. }
  130. static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
  131. u8 port_num)
  132. {
  133. struct mlx5_ib_dev *ibdev = to_mdev(device);
  134. struct net_device *ndev;
  135. ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
  136. if (ndev)
  137. return ndev;
  138. /* Ensure ndev does not disappear before we invoke dev_hold()
  139. */
  140. read_lock(&ibdev->roce.netdev_lock);
  141. ndev = ibdev->roce.netdev;
  142. if (ndev)
  143. dev_hold(ndev);
  144. read_unlock(&ibdev->roce.netdev_lock);
  145. return ndev;
  146. }
  147. static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
  148. u8 *active_width)
  149. {
  150. switch (eth_proto_oper) {
  151. case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
  152. case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
  153. case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
  154. case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
  155. *active_width = IB_WIDTH_1X;
  156. *active_speed = IB_SPEED_SDR;
  157. break;
  158. case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
  159. case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
  160. case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
  161. case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
  162. case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
  163. case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
  164. case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
  165. *active_width = IB_WIDTH_1X;
  166. *active_speed = IB_SPEED_QDR;
  167. break;
  168. case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
  169. case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
  170. case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
  171. *active_width = IB_WIDTH_1X;
  172. *active_speed = IB_SPEED_EDR;
  173. break;
  174. case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
  175. case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
  176. case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
  177. case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
  178. *active_width = IB_WIDTH_4X;
  179. *active_speed = IB_SPEED_QDR;
  180. break;
  181. case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
  182. case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
  183. case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
  184. *active_width = IB_WIDTH_1X;
  185. *active_speed = IB_SPEED_HDR;
  186. break;
  187. case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
  188. *active_width = IB_WIDTH_4X;
  189. *active_speed = IB_SPEED_FDR;
  190. break;
  191. case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
  192. case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
  193. case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
  194. case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
  195. *active_width = IB_WIDTH_4X;
  196. *active_speed = IB_SPEED_EDR;
  197. break;
  198. default:
  199. return -EINVAL;
  200. }
  201. return 0;
  202. }
  203. static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
  204. struct ib_port_attr *props)
  205. {
  206. struct mlx5_ib_dev *dev = to_mdev(device);
  207. struct mlx5_core_dev *mdev = dev->mdev;
  208. struct net_device *ndev, *upper;
  209. enum ib_mtu ndev_ib_mtu;
  210. u16 qkey_viol_cntr;
  211. u32 eth_prot_oper;
  212. /* Possible bad flows are checked before filling out props so in case
  213. * of an error it will still be zeroed out.
  214. */
  215. if (mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num))
  216. return;
  217. translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
  218. &props->active_width);
  219. props->port_cap_flags |= IB_PORT_CM_SUP;
  220. props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
  221. props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
  222. roce_address_table_size);
  223. props->max_mtu = IB_MTU_4096;
  224. props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
  225. props->pkey_tbl_len = 1;
  226. props->state = IB_PORT_DOWN;
  227. props->phys_state = 3;
  228. mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
  229. props->qkey_viol_cntr = qkey_viol_cntr;
  230. ndev = mlx5_ib_get_netdev(device, port_num);
  231. if (!ndev)
  232. return;
  233. if (mlx5_lag_is_active(dev->mdev)) {
  234. rcu_read_lock();
  235. upper = netdev_master_upper_dev_get_rcu(ndev);
  236. if (upper) {
  237. dev_put(ndev);
  238. ndev = upper;
  239. dev_hold(ndev);
  240. }
  241. rcu_read_unlock();
  242. }
  243. if (netif_running(ndev) && netif_carrier_ok(ndev)) {
  244. props->state = IB_PORT_ACTIVE;
  245. props->phys_state = 5;
  246. }
  247. ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
  248. dev_put(ndev);
  249. props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
  250. }
  251. static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
  252. const struct ib_gid_attr *attr,
  253. void *mlx5_addr)
  254. {
  255. #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
  256. char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
  257. source_l3_address);
  258. void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
  259. source_mac_47_32);
  260. if (!gid)
  261. return;
  262. ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
  263. if (is_vlan_dev(attr->ndev)) {
  264. MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
  265. MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
  266. }
  267. switch (attr->gid_type) {
  268. case IB_GID_TYPE_IB:
  269. MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
  270. break;
  271. case IB_GID_TYPE_ROCE_UDP_ENCAP:
  272. MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
  273. break;
  274. default:
  275. WARN_ON(true);
  276. }
  277. if (attr->gid_type != IB_GID_TYPE_IB) {
  278. if (ipv6_addr_v4mapped((void *)gid))
  279. MLX5_SET_RA(mlx5_addr, roce_l3_type,
  280. MLX5_ROCE_L3_TYPE_IPV4);
  281. else
  282. MLX5_SET_RA(mlx5_addr, roce_l3_type,
  283. MLX5_ROCE_L3_TYPE_IPV6);
  284. }
  285. if ((attr->gid_type == IB_GID_TYPE_IB) ||
  286. !ipv6_addr_v4mapped((void *)gid))
  287. memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
  288. else
  289. memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
  290. }
  291. static int set_roce_addr(struct ib_device *device, u8 port_num,
  292. unsigned int index,
  293. const union ib_gid *gid,
  294. const struct ib_gid_attr *attr)
  295. {
  296. struct mlx5_ib_dev *dev = to_mdev(device);
  297. u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
  298. u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
  299. void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
  300. enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
  301. if (ll != IB_LINK_LAYER_ETHERNET)
  302. return -EINVAL;
  303. ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
  304. MLX5_SET(set_roce_address_in, in, roce_address_index, index);
  305. MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
  306. return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  307. }
  308. static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
  309. unsigned int index, const union ib_gid *gid,
  310. const struct ib_gid_attr *attr,
  311. __always_unused void **context)
  312. {
  313. return set_roce_addr(device, port_num, index, gid, attr);
  314. }
  315. static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
  316. unsigned int index, __always_unused void **context)
  317. {
  318. return set_roce_addr(device, port_num, index, NULL, NULL);
  319. }
  320. __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
  321. int index)
  322. {
  323. struct ib_gid_attr attr;
  324. union ib_gid gid;
  325. if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
  326. return 0;
  327. if (!attr.ndev)
  328. return 0;
  329. dev_put(attr.ndev);
  330. if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
  331. return 0;
  332. return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
  333. }
  334. int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
  335. int index, enum ib_gid_type *gid_type)
  336. {
  337. struct ib_gid_attr attr;
  338. union ib_gid gid;
  339. int ret;
  340. ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
  341. if (ret)
  342. return ret;
  343. if (!attr.ndev)
  344. return -ENODEV;
  345. dev_put(attr.ndev);
  346. *gid_type = attr.gid_type;
  347. return 0;
  348. }
  349. static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
  350. {
  351. if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
  352. return !MLX5_CAP_GEN(dev->mdev, ib_virt);
  353. return 0;
  354. }
  355. enum {
  356. MLX5_VPORT_ACCESS_METHOD_MAD,
  357. MLX5_VPORT_ACCESS_METHOD_HCA,
  358. MLX5_VPORT_ACCESS_METHOD_NIC,
  359. };
  360. static int mlx5_get_vport_access_method(struct ib_device *ibdev)
  361. {
  362. if (mlx5_use_mad_ifc(to_mdev(ibdev)))
  363. return MLX5_VPORT_ACCESS_METHOD_MAD;
  364. if (mlx5_ib_port_link_layer(ibdev, 1) ==
  365. IB_LINK_LAYER_ETHERNET)
  366. return MLX5_VPORT_ACCESS_METHOD_NIC;
  367. return MLX5_VPORT_ACCESS_METHOD_HCA;
  368. }
  369. static void get_atomic_caps(struct mlx5_ib_dev *dev,
  370. struct ib_device_attr *props)
  371. {
  372. u8 tmp;
  373. u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
  374. u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
  375. u8 atomic_req_8B_endianness_mode =
  376. MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
  377. /* Check if HW supports 8 bytes standard atomic operations and capable
  378. * of host endianness respond
  379. */
  380. tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
  381. if (((atomic_operations & tmp) == tmp) &&
  382. (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
  383. (atomic_req_8B_endianness_mode)) {
  384. props->atomic_cap = IB_ATOMIC_HCA;
  385. } else {
  386. props->atomic_cap = IB_ATOMIC_NONE;
  387. }
  388. }
  389. static int mlx5_query_system_image_guid(struct ib_device *ibdev,
  390. __be64 *sys_image_guid)
  391. {
  392. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  393. struct mlx5_core_dev *mdev = dev->mdev;
  394. u64 tmp;
  395. int err;
  396. switch (mlx5_get_vport_access_method(ibdev)) {
  397. case MLX5_VPORT_ACCESS_METHOD_MAD:
  398. return mlx5_query_mad_ifc_system_image_guid(ibdev,
  399. sys_image_guid);
  400. case MLX5_VPORT_ACCESS_METHOD_HCA:
  401. err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
  402. break;
  403. case MLX5_VPORT_ACCESS_METHOD_NIC:
  404. err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
  405. break;
  406. default:
  407. return -EINVAL;
  408. }
  409. if (!err)
  410. *sys_image_guid = cpu_to_be64(tmp);
  411. return err;
  412. }
  413. static int mlx5_query_max_pkeys(struct ib_device *ibdev,
  414. u16 *max_pkeys)
  415. {
  416. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  417. struct mlx5_core_dev *mdev = dev->mdev;
  418. switch (mlx5_get_vport_access_method(ibdev)) {
  419. case MLX5_VPORT_ACCESS_METHOD_MAD:
  420. return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
  421. case MLX5_VPORT_ACCESS_METHOD_HCA:
  422. case MLX5_VPORT_ACCESS_METHOD_NIC:
  423. *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
  424. pkey_table_size));
  425. return 0;
  426. default:
  427. return -EINVAL;
  428. }
  429. }
  430. static int mlx5_query_vendor_id(struct ib_device *ibdev,
  431. u32 *vendor_id)
  432. {
  433. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  434. switch (mlx5_get_vport_access_method(ibdev)) {
  435. case MLX5_VPORT_ACCESS_METHOD_MAD:
  436. return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
  437. case MLX5_VPORT_ACCESS_METHOD_HCA:
  438. case MLX5_VPORT_ACCESS_METHOD_NIC:
  439. return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
  440. default:
  441. return -EINVAL;
  442. }
  443. }
  444. static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
  445. __be64 *node_guid)
  446. {
  447. u64 tmp;
  448. int err;
  449. switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
  450. case MLX5_VPORT_ACCESS_METHOD_MAD:
  451. return mlx5_query_mad_ifc_node_guid(dev, node_guid);
  452. case MLX5_VPORT_ACCESS_METHOD_HCA:
  453. err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
  454. break;
  455. case MLX5_VPORT_ACCESS_METHOD_NIC:
  456. err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
  457. break;
  458. default:
  459. return -EINVAL;
  460. }
  461. if (!err)
  462. *node_guid = cpu_to_be64(tmp);
  463. return err;
  464. }
  465. struct mlx5_reg_node_desc {
  466. u8 desc[IB_DEVICE_NODE_DESC_MAX];
  467. };
  468. static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
  469. {
  470. struct mlx5_reg_node_desc in;
  471. if (mlx5_use_mad_ifc(dev))
  472. return mlx5_query_mad_ifc_node_desc(dev, node_desc);
  473. memset(&in, 0, sizeof(in));
  474. return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
  475. sizeof(struct mlx5_reg_node_desc),
  476. MLX5_REG_NODE_DESC, 0, 0);
  477. }
  478. static int mlx5_ib_query_device(struct ib_device *ibdev,
  479. struct ib_device_attr *props,
  480. struct ib_udata *uhw)
  481. {
  482. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  483. struct mlx5_core_dev *mdev = dev->mdev;
  484. int err = -ENOMEM;
  485. int max_sq_desc;
  486. int max_rq_sg;
  487. int max_sq_sg;
  488. u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
  489. struct mlx5_ib_query_device_resp resp = {};
  490. size_t resp_len;
  491. u64 max_tso;
  492. resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
  493. if (uhw->outlen && uhw->outlen < resp_len)
  494. return -EINVAL;
  495. else
  496. resp.response_length = resp_len;
  497. if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
  498. return -EINVAL;
  499. memset(props, 0, sizeof(*props));
  500. err = mlx5_query_system_image_guid(ibdev,
  501. &props->sys_image_guid);
  502. if (err)
  503. return err;
  504. err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
  505. if (err)
  506. return err;
  507. err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
  508. if (err)
  509. return err;
  510. props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
  511. (fw_rev_min(dev->mdev) << 16) |
  512. fw_rev_sub(dev->mdev);
  513. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  514. IB_DEVICE_PORT_ACTIVE_EVENT |
  515. IB_DEVICE_SYS_IMAGE_GUID |
  516. IB_DEVICE_RC_RNR_NAK_GEN;
  517. if (MLX5_CAP_GEN(mdev, pkv))
  518. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  519. if (MLX5_CAP_GEN(mdev, qkv))
  520. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  521. if (MLX5_CAP_GEN(mdev, apm))
  522. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  523. if (MLX5_CAP_GEN(mdev, xrc))
  524. props->device_cap_flags |= IB_DEVICE_XRC;
  525. if (MLX5_CAP_GEN(mdev, imaicl)) {
  526. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
  527. IB_DEVICE_MEM_WINDOW_TYPE_2B;
  528. props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
  529. /* We support 'Gappy' memory registration too */
  530. props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
  531. }
  532. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  533. if (MLX5_CAP_GEN(mdev, sho)) {
  534. props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
  535. /* At this stage no support for signature handover */
  536. props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
  537. IB_PROT_T10DIF_TYPE_2 |
  538. IB_PROT_T10DIF_TYPE_3;
  539. props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
  540. IB_GUARD_T10DIF_CSUM;
  541. }
  542. if (MLX5_CAP_GEN(mdev, block_lb_mc))
  543. props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  544. if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
  545. if (MLX5_CAP_ETH(mdev, csum_cap)) {
  546. /* Legacy bit to support old userspace libraries */
  547. props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
  548. props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
  549. }
  550. if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
  551. props->raw_packet_caps |=
  552. IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
  553. if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
  554. max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
  555. if (max_tso) {
  556. resp.tso_caps.max_tso = 1 << max_tso;
  557. resp.tso_caps.supported_qpts |=
  558. 1 << IB_QPT_RAW_PACKET;
  559. resp.response_length += sizeof(resp.tso_caps);
  560. }
  561. }
  562. if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
  563. resp.rss_caps.rx_hash_function =
  564. MLX5_RX_HASH_FUNC_TOEPLITZ;
  565. resp.rss_caps.rx_hash_fields_mask =
  566. MLX5_RX_HASH_SRC_IPV4 |
  567. MLX5_RX_HASH_DST_IPV4 |
  568. MLX5_RX_HASH_SRC_IPV6 |
  569. MLX5_RX_HASH_DST_IPV6 |
  570. MLX5_RX_HASH_SRC_PORT_TCP |
  571. MLX5_RX_HASH_DST_PORT_TCP |
  572. MLX5_RX_HASH_SRC_PORT_UDP |
  573. MLX5_RX_HASH_DST_PORT_UDP;
  574. resp.response_length += sizeof(resp.rss_caps);
  575. }
  576. } else {
  577. if (field_avail(typeof(resp), tso_caps, uhw->outlen))
  578. resp.response_length += sizeof(resp.tso_caps);
  579. if (field_avail(typeof(resp), rss_caps, uhw->outlen))
  580. resp.response_length += sizeof(resp.rss_caps);
  581. }
  582. if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
  583. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  584. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  585. }
  586. if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
  587. MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
  588. /* Legacy bit to support old userspace libraries */
  589. props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
  590. props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
  591. }
  592. if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
  593. props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
  594. props->vendor_part_id = mdev->pdev->device;
  595. props->hw_ver = mdev->pdev->revision;
  596. props->max_mr_size = ~0ull;
  597. props->page_size_cap = ~(min_page_size - 1);
  598. props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
  599. props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
  600. max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
  601. sizeof(struct mlx5_wqe_data_seg);
  602. max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
  603. max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
  604. sizeof(struct mlx5_wqe_raddr_seg)) /
  605. sizeof(struct mlx5_wqe_data_seg);
  606. props->max_sge = min(max_rq_sg, max_sq_sg);
  607. props->max_sge_rd = MLX5_MAX_SGE_RD;
  608. props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
  609. props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
  610. props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
  611. props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
  612. props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
  613. props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
  614. props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
  615. props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
  616. props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
  617. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  618. props->max_srq_sge = max_rq_sg - 1;
  619. props->max_fast_reg_page_list_len =
  620. 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
  621. get_atomic_caps(dev, props);
  622. props->masked_atomic_cap = IB_ATOMIC_NONE;
  623. props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
  624. props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
  625. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  626. props->max_mcast_grp;
  627. props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
  628. props->max_ah = INT_MAX;
  629. props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
  630. props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
  631. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  632. if (MLX5_CAP_GEN(mdev, pg))
  633. props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
  634. props->odp_caps = dev->odp_caps;
  635. #endif
  636. if (MLX5_CAP_GEN(mdev, cd))
  637. props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
  638. if (!mlx5_core_is_pf(mdev))
  639. props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
  640. if (mlx5_ib_port_link_layer(ibdev, 1) ==
  641. IB_LINK_LAYER_ETHERNET) {
  642. props->rss_caps.max_rwq_indirection_tables =
  643. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
  644. props->rss_caps.max_rwq_indirection_table_size =
  645. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
  646. props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
  647. props->max_wq_type_rq =
  648. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
  649. }
  650. if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
  651. resp.cqe_comp_caps.max_num =
  652. MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
  653. MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
  654. resp.cqe_comp_caps.supported_format =
  655. MLX5_IB_CQE_RES_FORMAT_HASH |
  656. MLX5_IB_CQE_RES_FORMAT_CSUM;
  657. resp.response_length += sizeof(resp.cqe_comp_caps);
  658. }
  659. if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
  660. if (MLX5_CAP_QOS(mdev, packet_pacing) &&
  661. MLX5_CAP_GEN(mdev, qos)) {
  662. resp.packet_pacing_caps.qp_rate_limit_max =
  663. MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
  664. resp.packet_pacing_caps.qp_rate_limit_min =
  665. MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
  666. resp.packet_pacing_caps.supported_qpts |=
  667. 1 << IB_QPT_RAW_PACKET;
  668. }
  669. resp.response_length += sizeof(resp.packet_pacing_caps);
  670. }
  671. if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
  672. uhw->outlen)) {
  673. resp.mlx5_ib_support_multi_pkt_send_wqes =
  674. MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
  675. resp.response_length +=
  676. sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
  677. }
  678. if (field_avail(typeof(resp), reserved, uhw->outlen))
  679. resp.response_length += sizeof(resp.reserved);
  680. if (uhw->outlen) {
  681. err = ib_copy_to_udata(uhw, &resp, resp.response_length);
  682. if (err)
  683. return err;
  684. }
  685. return 0;
  686. }
  687. enum mlx5_ib_width {
  688. MLX5_IB_WIDTH_1X = 1 << 0,
  689. MLX5_IB_WIDTH_2X = 1 << 1,
  690. MLX5_IB_WIDTH_4X = 1 << 2,
  691. MLX5_IB_WIDTH_8X = 1 << 3,
  692. MLX5_IB_WIDTH_12X = 1 << 4
  693. };
  694. static int translate_active_width(struct ib_device *ibdev, u8 active_width,
  695. u8 *ib_width)
  696. {
  697. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  698. int err = 0;
  699. if (active_width & MLX5_IB_WIDTH_1X) {
  700. *ib_width = IB_WIDTH_1X;
  701. } else if (active_width & MLX5_IB_WIDTH_2X) {
  702. mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
  703. (int)active_width);
  704. err = -EINVAL;
  705. } else if (active_width & MLX5_IB_WIDTH_4X) {
  706. *ib_width = IB_WIDTH_4X;
  707. } else if (active_width & MLX5_IB_WIDTH_8X) {
  708. *ib_width = IB_WIDTH_8X;
  709. } else if (active_width & MLX5_IB_WIDTH_12X) {
  710. *ib_width = IB_WIDTH_12X;
  711. } else {
  712. mlx5_ib_dbg(dev, "Invalid active_width %d\n",
  713. (int)active_width);
  714. err = -EINVAL;
  715. }
  716. return err;
  717. }
  718. static int mlx5_mtu_to_ib_mtu(int mtu)
  719. {
  720. switch (mtu) {
  721. case 256: return 1;
  722. case 512: return 2;
  723. case 1024: return 3;
  724. case 2048: return 4;
  725. case 4096: return 5;
  726. default:
  727. pr_warn("invalid mtu\n");
  728. return -1;
  729. }
  730. }
  731. enum ib_max_vl_num {
  732. __IB_MAX_VL_0 = 1,
  733. __IB_MAX_VL_0_1 = 2,
  734. __IB_MAX_VL_0_3 = 3,
  735. __IB_MAX_VL_0_7 = 4,
  736. __IB_MAX_VL_0_14 = 5,
  737. };
  738. enum mlx5_vl_hw_cap {
  739. MLX5_VL_HW_0 = 1,
  740. MLX5_VL_HW_0_1 = 2,
  741. MLX5_VL_HW_0_2 = 3,
  742. MLX5_VL_HW_0_3 = 4,
  743. MLX5_VL_HW_0_4 = 5,
  744. MLX5_VL_HW_0_5 = 6,
  745. MLX5_VL_HW_0_6 = 7,
  746. MLX5_VL_HW_0_7 = 8,
  747. MLX5_VL_HW_0_14 = 15
  748. };
  749. static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
  750. u8 *max_vl_num)
  751. {
  752. switch (vl_hw_cap) {
  753. case MLX5_VL_HW_0:
  754. *max_vl_num = __IB_MAX_VL_0;
  755. break;
  756. case MLX5_VL_HW_0_1:
  757. *max_vl_num = __IB_MAX_VL_0_1;
  758. break;
  759. case MLX5_VL_HW_0_3:
  760. *max_vl_num = __IB_MAX_VL_0_3;
  761. break;
  762. case MLX5_VL_HW_0_7:
  763. *max_vl_num = __IB_MAX_VL_0_7;
  764. break;
  765. case MLX5_VL_HW_0_14:
  766. *max_vl_num = __IB_MAX_VL_0_14;
  767. break;
  768. default:
  769. return -EINVAL;
  770. }
  771. return 0;
  772. }
  773. static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
  774. struct ib_port_attr *props)
  775. {
  776. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  777. struct mlx5_core_dev *mdev = dev->mdev;
  778. struct mlx5_hca_vport_context *rep;
  779. u16 max_mtu;
  780. u16 oper_mtu;
  781. int err;
  782. u8 ib_link_width_oper;
  783. u8 vl_hw_cap;
  784. rep = kzalloc(sizeof(*rep), GFP_KERNEL);
  785. if (!rep) {
  786. err = -ENOMEM;
  787. goto out;
  788. }
  789. /* props being zeroed by the caller, avoid zeroing it here */
  790. err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
  791. if (err)
  792. goto out;
  793. props->lid = rep->lid;
  794. props->lmc = rep->lmc;
  795. props->sm_lid = rep->sm_lid;
  796. props->sm_sl = rep->sm_sl;
  797. props->state = rep->vport_state;
  798. props->phys_state = rep->port_physical_state;
  799. props->port_cap_flags = rep->cap_mask1;
  800. props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
  801. props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
  802. props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
  803. props->bad_pkey_cntr = rep->pkey_violation_counter;
  804. props->qkey_viol_cntr = rep->qkey_violation_counter;
  805. props->subnet_timeout = rep->subnet_timeout;
  806. props->init_type_reply = rep->init_type_reply;
  807. props->grh_required = rep->grh_required;
  808. err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
  809. if (err)
  810. goto out;
  811. err = translate_active_width(ibdev, ib_link_width_oper,
  812. &props->active_width);
  813. if (err)
  814. goto out;
  815. err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
  816. if (err)
  817. goto out;
  818. mlx5_query_port_max_mtu(mdev, &max_mtu, port);
  819. props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
  820. mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
  821. props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
  822. err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
  823. if (err)
  824. goto out;
  825. err = translate_max_vl_num(ibdev, vl_hw_cap,
  826. &props->max_vl_num);
  827. out:
  828. kfree(rep);
  829. return err;
  830. }
  831. int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
  832. struct ib_port_attr *props)
  833. {
  834. switch (mlx5_get_vport_access_method(ibdev)) {
  835. case MLX5_VPORT_ACCESS_METHOD_MAD:
  836. return mlx5_query_mad_ifc_port(ibdev, port, props);
  837. case MLX5_VPORT_ACCESS_METHOD_HCA:
  838. return mlx5_query_hca_port(ibdev, port, props);
  839. case MLX5_VPORT_ACCESS_METHOD_NIC:
  840. mlx5_query_port_roce(ibdev, port, props);
  841. return 0;
  842. default:
  843. return -EINVAL;
  844. }
  845. }
  846. static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  847. union ib_gid *gid)
  848. {
  849. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  850. struct mlx5_core_dev *mdev = dev->mdev;
  851. switch (mlx5_get_vport_access_method(ibdev)) {
  852. case MLX5_VPORT_ACCESS_METHOD_MAD:
  853. return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
  854. case MLX5_VPORT_ACCESS_METHOD_HCA:
  855. return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
  856. default:
  857. return -EINVAL;
  858. }
  859. }
  860. static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  861. u16 *pkey)
  862. {
  863. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  864. struct mlx5_core_dev *mdev = dev->mdev;
  865. switch (mlx5_get_vport_access_method(ibdev)) {
  866. case MLX5_VPORT_ACCESS_METHOD_MAD:
  867. return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
  868. case MLX5_VPORT_ACCESS_METHOD_HCA:
  869. case MLX5_VPORT_ACCESS_METHOD_NIC:
  870. return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
  871. pkey);
  872. default:
  873. return -EINVAL;
  874. }
  875. }
  876. static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
  877. struct ib_device_modify *props)
  878. {
  879. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  880. struct mlx5_reg_node_desc in;
  881. struct mlx5_reg_node_desc out;
  882. int err;
  883. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  884. return -EOPNOTSUPP;
  885. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  886. return 0;
  887. /*
  888. * If possible, pass node desc to FW, so it can generate
  889. * a 144 trap. If cmd fails, just ignore.
  890. */
  891. memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
  892. err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
  893. sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
  894. if (err)
  895. return err;
  896. memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
  897. return err;
  898. }
  899. static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
  900. u32 value)
  901. {
  902. struct mlx5_hca_vport_context ctx = {};
  903. int err;
  904. err = mlx5_query_hca_vport_context(dev->mdev, 0,
  905. port_num, 0, &ctx);
  906. if (err)
  907. return err;
  908. if (~ctx.cap_mask1_perm & mask) {
  909. mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
  910. mask, ctx.cap_mask1_perm);
  911. return -EINVAL;
  912. }
  913. ctx.cap_mask1 = value;
  914. ctx.cap_mask1_perm = mask;
  915. err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
  916. port_num, 0, &ctx);
  917. return err;
  918. }
  919. static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  920. struct ib_port_modify *props)
  921. {
  922. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  923. struct ib_port_attr attr;
  924. u32 tmp;
  925. int err;
  926. u32 change_mask;
  927. u32 value;
  928. bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
  929. IB_LINK_LAYER_INFINIBAND);
  930. if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
  931. change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
  932. value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
  933. return set_port_caps_atomic(dev, port, change_mask, value);
  934. }
  935. mutex_lock(&dev->cap_mask_mutex);
  936. err = ib_query_port(ibdev, port, &attr);
  937. if (err)
  938. goto out;
  939. tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
  940. ~props->clr_port_cap_mask;
  941. err = mlx5_set_port_caps(dev->mdev, port, tmp);
  942. out:
  943. mutex_unlock(&dev->cap_mask_mutex);
  944. return err;
  945. }
  946. static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
  947. {
  948. mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
  949. caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
  950. }
  951. static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
  952. struct mlx5_ib_alloc_ucontext_req_v2 *req,
  953. u32 *num_sys_pages)
  954. {
  955. int uars_per_sys_page;
  956. int bfregs_per_sys_page;
  957. int ref_bfregs = req->total_num_bfregs;
  958. if (req->total_num_bfregs == 0)
  959. return -EINVAL;
  960. BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
  961. BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
  962. if (req->total_num_bfregs > MLX5_MAX_BFREGS)
  963. return -ENOMEM;
  964. uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
  965. bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
  966. req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
  967. *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
  968. if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
  969. return -EINVAL;
  970. mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
  971. MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
  972. lib_uar_4k ? "yes" : "no", ref_bfregs,
  973. req->total_num_bfregs, *num_sys_pages);
  974. return 0;
  975. }
  976. static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  977. {
  978. struct mlx5_bfreg_info *bfregi;
  979. int err;
  980. int i;
  981. bfregi = &context->bfregi;
  982. for (i = 0; i < bfregi->num_sys_pages; i++) {
  983. err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
  984. if (err)
  985. goto error;
  986. mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
  987. }
  988. return 0;
  989. error:
  990. for (--i; i >= 0; i--)
  991. if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
  992. mlx5_ib_warn(dev, "failed to free uar %d\n", i);
  993. return err;
  994. }
  995. static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  996. {
  997. struct mlx5_bfreg_info *bfregi;
  998. int err;
  999. int i;
  1000. bfregi = &context->bfregi;
  1001. for (i = 0; i < bfregi->num_sys_pages; i++) {
  1002. err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
  1003. if (err) {
  1004. mlx5_ib_warn(dev, "failed to free uar %d\n", i);
  1005. return err;
  1006. }
  1007. }
  1008. return 0;
  1009. }
  1010. static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
  1011. struct ib_udata *udata)
  1012. {
  1013. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  1014. struct mlx5_ib_alloc_ucontext_req_v2 req = {};
  1015. struct mlx5_ib_alloc_ucontext_resp resp = {};
  1016. struct mlx5_ib_ucontext *context;
  1017. struct mlx5_bfreg_info *bfregi;
  1018. int ver;
  1019. int err;
  1020. size_t reqlen;
  1021. size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
  1022. max_cqe_version);
  1023. bool lib_uar_4k;
  1024. if (!dev->ib_active)
  1025. return ERR_PTR(-EAGAIN);
  1026. if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
  1027. return ERR_PTR(-EINVAL);
  1028. reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
  1029. if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
  1030. ver = 0;
  1031. else if (reqlen >= min_req_v2)
  1032. ver = 2;
  1033. else
  1034. return ERR_PTR(-EINVAL);
  1035. err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
  1036. if (err)
  1037. return ERR_PTR(err);
  1038. if (req.flags)
  1039. return ERR_PTR(-EINVAL);
  1040. if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
  1041. return ERR_PTR(-EOPNOTSUPP);
  1042. req.total_num_bfregs = ALIGN(req.total_num_bfregs,
  1043. MLX5_NON_FP_BFREGS_PER_UAR);
  1044. if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
  1045. return ERR_PTR(-EINVAL);
  1046. resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
  1047. if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
  1048. resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
  1049. resp.cache_line_size = cache_line_size();
  1050. resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
  1051. resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
  1052. resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
  1053. resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
  1054. resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
  1055. resp.cqe_version = min_t(__u8,
  1056. (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
  1057. req.max_cqe_version);
  1058. resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1059. MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
  1060. resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1061. MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
  1062. resp.response_length = min(offsetof(typeof(resp), response_length) +
  1063. sizeof(resp.response_length), udata->outlen);
  1064. context = kzalloc(sizeof(*context), GFP_KERNEL);
  1065. if (!context)
  1066. return ERR_PTR(-ENOMEM);
  1067. lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
  1068. bfregi = &context->bfregi;
  1069. /* updates req->total_num_bfregs */
  1070. err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
  1071. if (err)
  1072. goto out_ctx;
  1073. mutex_init(&bfregi->lock);
  1074. bfregi->lib_uar_4k = lib_uar_4k;
  1075. bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
  1076. GFP_KERNEL);
  1077. if (!bfregi->count) {
  1078. err = -ENOMEM;
  1079. goto out_ctx;
  1080. }
  1081. bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
  1082. sizeof(*bfregi->sys_pages),
  1083. GFP_KERNEL);
  1084. if (!bfregi->sys_pages) {
  1085. err = -ENOMEM;
  1086. goto out_count;
  1087. }
  1088. err = allocate_uars(dev, context);
  1089. if (err)
  1090. goto out_sys_pages;
  1091. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1092. context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
  1093. #endif
  1094. context->upd_xlt_page = __get_free_page(GFP_KERNEL);
  1095. if (!context->upd_xlt_page) {
  1096. err = -ENOMEM;
  1097. goto out_uars;
  1098. }
  1099. mutex_init(&context->upd_xlt_page_mutex);
  1100. if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
  1101. err = mlx5_core_alloc_transport_domain(dev->mdev,
  1102. &context->tdn);
  1103. if (err)
  1104. goto out_page;
  1105. }
  1106. INIT_LIST_HEAD(&context->vma_private_list);
  1107. INIT_LIST_HEAD(&context->db_page_list);
  1108. mutex_init(&context->db_page_mutex);
  1109. resp.tot_bfregs = req.total_num_bfregs;
  1110. resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
  1111. if (field_avail(typeof(resp), cqe_version, udata->outlen))
  1112. resp.response_length += sizeof(resp.cqe_version);
  1113. if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
  1114. resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
  1115. MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
  1116. resp.response_length += sizeof(resp.cmds_supp_uhw);
  1117. }
  1118. if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
  1119. if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
  1120. mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
  1121. resp.eth_min_inline++;
  1122. }
  1123. resp.response_length += sizeof(resp.eth_min_inline);
  1124. }
  1125. /*
  1126. * We don't want to expose information from the PCI bar that is located
  1127. * after 4096 bytes, so if the arch only supports larger pages, let's
  1128. * pretend we don't support reading the HCA's core clock. This is also
  1129. * forced by mmap function.
  1130. */
  1131. if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
  1132. if (PAGE_SIZE <= 4096) {
  1133. resp.comp_mask |=
  1134. MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
  1135. resp.hca_core_clock_offset =
  1136. offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
  1137. }
  1138. resp.response_length += sizeof(resp.hca_core_clock_offset) +
  1139. sizeof(resp.reserved2);
  1140. }
  1141. if (field_avail(typeof(resp), log_uar_size, udata->outlen))
  1142. resp.response_length += sizeof(resp.log_uar_size);
  1143. if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
  1144. resp.response_length += sizeof(resp.num_uars_per_page);
  1145. err = ib_copy_to_udata(udata, &resp, resp.response_length);
  1146. if (err)
  1147. goto out_td;
  1148. bfregi->ver = ver;
  1149. bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
  1150. context->cqe_version = resp.cqe_version;
  1151. context->lib_caps = req.lib_caps;
  1152. print_lib_caps(dev, context->lib_caps);
  1153. return &context->ibucontext;
  1154. out_td:
  1155. if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
  1156. mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
  1157. out_page:
  1158. free_page(context->upd_xlt_page);
  1159. out_uars:
  1160. deallocate_uars(dev, context);
  1161. out_sys_pages:
  1162. kfree(bfregi->sys_pages);
  1163. out_count:
  1164. kfree(bfregi->count);
  1165. out_ctx:
  1166. kfree(context);
  1167. return ERR_PTR(err);
  1168. }
  1169. static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  1170. {
  1171. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1172. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  1173. struct mlx5_bfreg_info *bfregi;
  1174. bfregi = &context->bfregi;
  1175. if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
  1176. mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
  1177. free_page(context->upd_xlt_page);
  1178. deallocate_uars(dev, context);
  1179. kfree(bfregi->sys_pages);
  1180. kfree(bfregi->count);
  1181. kfree(context);
  1182. return 0;
  1183. }
  1184. static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
  1185. struct mlx5_bfreg_info *bfregi,
  1186. int idx)
  1187. {
  1188. int fw_uars_per_page;
  1189. fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
  1190. return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
  1191. bfregi->sys_pages[idx] / fw_uars_per_page;
  1192. }
  1193. static int get_command(unsigned long offset)
  1194. {
  1195. return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
  1196. }
  1197. static int get_arg(unsigned long offset)
  1198. {
  1199. return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
  1200. }
  1201. static int get_index(unsigned long offset)
  1202. {
  1203. return get_arg(offset);
  1204. }
  1205. static void mlx5_ib_vma_open(struct vm_area_struct *area)
  1206. {
  1207. /* vma_open is called when a new VMA is created on top of our VMA. This
  1208. * is done through either mremap flow or split_vma (usually due to
  1209. * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
  1210. * as this VMA is strongly hardware related. Therefore we set the
  1211. * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
  1212. * calling us again and trying to do incorrect actions. We assume that
  1213. * the original VMA size is exactly a single page, and therefore all
  1214. * "splitting" operation will not happen to it.
  1215. */
  1216. area->vm_ops = NULL;
  1217. }
  1218. static void mlx5_ib_vma_close(struct vm_area_struct *area)
  1219. {
  1220. struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
  1221. /* It's guaranteed that all VMAs opened on a FD are closed before the
  1222. * file itself is closed, therefore no sync is needed with the regular
  1223. * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
  1224. * However need a sync with accessing the vma as part of
  1225. * mlx5_ib_disassociate_ucontext.
  1226. * The close operation is usually called under mm->mmap_sem except when
  1227. * process is exiting.
  1228. * The exiting case is handled explicitly as part of
  1229. * mlx5_ib_disassociate_ucontext.
  1230. */
  1231. mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
  1232. /* setting the vma context pointer to null in the mlx5_ib driver's
  1233. * private data, to protect a race condition in
  1234. * mlx5_ib_disassociate_ucontext().
  1235. */
  1236. mlx5_ib_vma_priv_data->vma = NULL;
  1237. list_del(&mlx5_ib_vma_priv_data->list);
  1238. kfree(mlx5_ib_vma_priv_data);
  1239. }
  1240. static const struct vm_operations_struct mlx5_ib_vm_ops = {
  1241. .open = mlx5_ib_vma_open,
  1242. .close = mlx5_ib_vma_close
  1243. };
  1244. static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
  1245. struct mlx5_ib_ucontext *ctx)
  1246. {
  1247. struct mlx5_ib_vma_private_data *vma_prv;
  1248. struct list_head *vma_head = &ctx->vma_private_list;
  1249. vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
  1250. if (!vma_prv)
  1251. return -ENOMEM;
  1252. vma_prv->vma = vma;
  1253. vma->vm_private_data = vma_prv;
  1254. vma->vm_ops = &mlx5_ib_vm_ops;
  1255. list_add(&vma_prv->list, vma_head);
  1256. return 0;
  1257. }
  1258. static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
  1259. {
  1260. int ret;
  1261. struct vm_area_struct *vma;
  1262. struct mlx5_ib_vma_private_data *vma_private, *n;
  1263. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1264. struct task_struct *owning_process = NULL;
  1265. struct mm_struct *owning_mm = NULL;
  1266. owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
  1267. if (!owning_process)
  1268. return;
  1269. owning_mm = get_task_mm(owning_process);
  1270. if (!owning_mm) {
  1271. pr_info("no mm, disassociate ucontext is pending task termination\n");
  1272. while (1) {
  1273. put_task_struct(owning_process);
  1274. usleep_range(1000, 2000);
  1275. owning_process = get_pid_task(ibcontext->tgid,
  1276. PIDTYPE_PID);
  1277. if (!owning_process ||
  1278. owning_process->state == TASK_DEAD) {
  1279. pr_info("disassociate ucontext done, task was terminated\n");
  1280. /* in case task was dead need to release the
  1281. * task struct.
  1282. */
  1283. if (owning_process)
  1284. put_task_struct(owning_process);
  1285. return;
  1286. }
  1287. }
  1288. }
  1289. /* need to protect from a race on closing the vma as part of
  1290. * mlx5_ib_vma_close.
  1291. */
  1292. down_write(&owning_mm->mmap_sem);
  1293. list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
  1294. list) {
  1295. vma = vma_private->vma;
  1296. ret = zap_vma_ptes(vma, vma->vm_start,
  1297. PAGE_SIZE);
  1298. WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
  1299. /* context going to be destroyed, should
  1300. * not access ops any more.
  1301. */
  1302. vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
  1303. vma->vm_ops = NULL;
  1304. list_del(&vma_private->list);
  1305. kfree(vma_private);
  1306. }
  1307. up_write(&owning_mm->mmap_sem);
  1308. mmput(owning_mm);
  1309. put_task_struct(owning_process);
  1310. }
  1311. static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
  1312. {
  1313. switch (cmd) {
  1314. case MLX5_IB_MMAP_WC_PAGE:
  1315. return "WC";
  1316. case MLX5_IB_MMAP_REGULAR_PAGE:
  1317. return "best effort WC";
  1318. case MLX5_IB_MMAP_NC_PAGE:
  1319. return "NC";
  1320. default:
  1321. return NULL;
  1322. }
  1323. }
  1324. static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
  1325. struct vm_area_struct *vma,
  1326. struct mlx5_ib_ucontext *context)
  1327. {
  1328. struct mlx5_bfreg_info *bfregi = &context->bfregi;
  1329. int err;
  1330. unsigned long idx;
  1331. phys_addr_t pfn, pa;
  1332. pgprot_t prot;
  1333. int uars_per_page;
  1334. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1335. return -EINVAL;
  1336. uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
  1337. idx = get_index(vma->vm_pgoff);
  1338. if (idx % uars_per_page ||
  1339. idx * uars_per_page >= bfregi->num_sys_pages) {
  1340. mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
  1341. return -EINVAL;
  1342. }
  1343. switch (cmd) {
  1344. case MLX5_IB_MMAP_WC_PAGE:
  1345. /* Some architectures don't support WC memory */
  1346. #if defined(CONFIG_X86)
  1347. if (!pat_enabled())
  1348. return -EPERM;
  1349. #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
  1350. return -EPERM;
  1351. #endif
  1352. /* fall through */
  1353. case MLX5_IB_MMAP_REGULAR_PAGE:
  1354. /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
  1355. prot = pgprot_writecombine(vma->vm_page_prot);
  1356. break;
  1357. case MLX5_IB_MMAP_NC_PAGE:
  1358. prot = pgprot_noncached(vma->vm_page_prot);
  1359. break;
  1360. default:
  1361. return -EINVAL;
  1362. }
  1363. pfn = uar_index2pfn(dev, bfregi, idx);
  1364. mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
  1365. vma->vm_page_prot = prot;
  1366. err = io_remap_pfn_range(vma, vma->vm_start, pfn,
  1367. PAGE_SIZE, vma->vm_page_prot);
  1368. if (err) {
  1369. mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
  1370. err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
  1371. return -EAGAIN;
  1372. }
  1373. pa = pfn << PAGE_SHIFT;
  1374. mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
  1375. vma->vm_start, &pa);
  1376. return mlx5_ib_set_vma_data(vma, context);
  1377. }
  1378. static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
  1379. {
  1380. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1381. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  1382. unsigned long command;
  1383. phys_addr_t pfn;
  1384. command = get_command(vma->vm_pgoff);
  1385. switch (command) {
  1386. case MLX5_IB_MMAP_WC_PAGE:
  1387. case MLX5_IB_MMAP_NC_PAGE:
  1388. case MLX5_IB_MMAP_REGULAR_PAGE:
  1389. return uar_mmap(dev, command, vma, context);
  1390. case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
  1391. return -ENOSYS;
  1392. case MLX5_IB_MMAP_CORE_CLOCK:
  1393. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1394. return -EINVAL;
  1395. if (vma->vm_flags & VM_WRITE)
  1396. return -EPERM;
  1397. /* Don't expose to user-space information it shouldn't have */
  1398. if (PAGE_SIZE > 4096)
  1399. return -EOPNOTSUPP;
  1400. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1401. pfn = (dev->mdev->iseg_base +
  1402. offsetof(struct mlx5_init_seg, internal_timer_h)) >>
  1403. PAGE_SHIFT;
  1404. if (io_remap_pfn_range(vma, vma->vm_start, pfn,
  1405. PAGE_SIZE, vma->vm_page_prot))
  1406. return -EAGAIN;
  1407. mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
  1408. vma->vm_start,
  1409. (unsigned long long)pfn << PAGE_SHIFT);
  1410. break;
  1411. default:
  1412. return -EINVAL;
  1413. }
  1414. return 0;
  1415. }
  1416. static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
  1417. struct ib_ucontext *context,
  1418. struct ib_udata *udata)
  1419. {
  1420. struct mlx5_ib_alloc_pd_resp resp;
  1421. struct mlx5_ib_pd *pd;
  1422. int err;
  1423. pd = kmalloc(sizeof(*pd), GFP_KERNEL);
  1424. if (!pd)
  1425. return ERR_PTR(-ENOMEM);
  1426. err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
  1427. if (err) {
  1428. kfree(pd);
  1429. return ERR_PTR(err);
  1430. }
  1431. if (context) {
  1432. resp.pdn = pd->pdn;
  1433. if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
  1434. mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
  1435. kfree(pd);
  1436. return ERR_PTR(-EFAULT);
  1437. }
  1438. }
  1439. return &pd->ibpd;
  1440. }
  1441. static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
  1442. {
  1443. struct mlx5_ib_dev *mdev = to_mdev(pd->device);
  1444. struct mlx5_ib_pd *mpd = to_mpd(pd);
  1445. mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
  1446. kfree(mpd);
  1447. return 0;
  1448. }
  1449. enum {
  1450. MATCH_CRITERIA_ENABLE_OUTER_BIT,
  1451. MATCH_CRITERIA_ENABLE_MISC_BIT,
  1452. MATCH_CRITERIA_ENABLE_INNER_BIT
  1453. };
  1454. #define HEADER_IS_ZERO(match_criteria, headers) \
  1455. !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
  1456. 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
  1457. static u8 get_match_criteria_enable(u32 *match_criteria)
  1458. {
  1459. u8 match_criteria_enable;
  1460. match_criteria_enable =
  1461. (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
  1462. MATCH_CRITERIA_ENABLE_OUTER_BIT;
  1463. match_criteria_enable |=
  1464. (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
  1465. MATCH_CRITERIA_ENABLE_MISC_BIT;
  1466. match_criteria_enable |=
  1467. (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
  1468. MATCH_CRITERIA_ENABLE_INNER_BIT;
  1469. return match_criteria_enable;
  1470. }
  1471. static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
  1472. {
  1473. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
  1474. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
  1475. }
  1476. static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
  1477. bool inner)
  1478. {
  1479. if (inner) {
  1480. MLX5_SET(fte_match_set_misc,
  1481. misc_c, inner_ipv6_flow_label, mask);
  1482. MLX5_SET(fte_match_set_misc,
  1483. misc_v, inner_ipv6_flow_label, val);
  1484. } else {
  1485. MLX5_SET(fte_match_set_misc,
  1486. misc_c, outer_ipv6_flow_label, mask);
  1487. MLX5_SET(fte_match_set_misc,
  1488. misc_v, outer_ipv6_flow_label, val);
  1489. }
  1490. }
  1491. static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
  1492. {
  1493. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
  1494. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
  1495. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
  1496. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
  1497. }
  1498. #define LAST_ETH_FIELD vlan_tag
  1499. #define LAST_IB_FIELD sl
  1500. #define LAST_IPV4_FIELD tos
  1501. #define LAST_IPV6_FIELD traffic_class
  1502. #define LAST_TCP_UDP_FIELD src_port
  1503. #define LAST_TUNNEL_FIELD tunnel_id
  1504. #define LAST_FLOW_TAG_FIELD tag_id
  1505. #define LAST_DROP_FIELD size
  1506. /* Field is the last supported field */
  1507. #define FIELDS_NOT_SUPPORTED(filter, field)\
  1508. memchr_inv((void *)&filter.field +\
  1509. sizeof(filter.field), 0,\
  1510. sizeof(filter) -\
  1511. offsetof(typeof(filter), field) -\
  1512. sizeof(filter.field))
  1513. #define IPV4_VERSION 4
  1514. #define IPV6_VERSION 6
  1515. static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
  1516. u32 *match_v, const union ib_flow_spec *ib_spec,
  1517. u32 *tag_id, bool *is_drop)
  1518. {
  1519. void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
  1520. misc_parameters);
  1521. void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
  1522. misc_parameters);
  1523. void *headers_c;
  1524. void *headers_v;
  1525. int match_ipv;
  1526. if (ib_spec->type & IB_FLOW_SPEC_INNER) {
  1527. headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
  1528. inner_headers);
  1529. headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
  1530. inner_headers);
  1531. match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  1532. ft_field_support.inner_ip_version);
  1533. } else {
  1534. headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
  1535. outer_headers);
  1536. headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
  1537. outer_headers);
  1538. match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  1539. ft_field_support.outer_ip_version);
  1540. }
  1541. switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
  1542. case IB_FLOW_SPEC_ETH:
  1543. if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
  1544. return -EOPNOTSUPP;
  1545. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1546. dmac_47_16),
  1547. ib_spec->eth.mask.dst_mac);
  1548. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1549. dmac_47_16),
  1550. ib_spec->eth.val.dst_mac);
  1551. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1552. smac_47_16),
  1553. ib_spec->eth.mask.src_mac);
  1554. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1555. smac_47_16),
  1556. ib_spec->eth.val.src_mac);
  1557. if (ib_spec->eth.mask.vlan_tag) {
  1558. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1559. cvlan_tag, 1);
  1560. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1561. cvlan_tag, 1);
  1562. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1563. first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
  1564. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1565. first_vid, ntohs(ib_spec->eth.val.vlan_tag));
  1566. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1567. first_cfi,
  1568. ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
  1569. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1570. first_cfi,
  1571. ntohs(ib_spec->eth.val.vlan_tag) >> 12);
  1572. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1573. first_prio,
  1574. ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
  1575. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1576. first_prio,
  1577. ntohs(ib_spec->eth.val.vlan_tag) >> 13);
  1578. }
  1579. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1580. ethertype, ntohs(ib_spec->eth.mask.ether_type));
  1581. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1582. ethertype, ntohs(ib_spec->eth.val.ether_type));
  1583. break;
  1584. case IB_FLOW_SPEC_IPV4:
  1585. if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
  1586. return -EOPNOTSUPP;
  1587. if (match_ipv) {
  1588. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1589. ip_version, 0xf);
  1590. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1591. ip_version, IPV4_VERSION);
  1592. } else {
  1593. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1594. ethertype, 0xffff);
  1595. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1596. ethertype, ETH_P_IP);
  1597. }
  1598. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1599. src_ipv4_src_ipv6.ipv4_layout.ipv4),
  1600. &ib_spec->ipv4.mask.src_ip,
  1601. sizeof(ib_spec->ipv4.mask.src_ip));
  1602. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1603. src_ipv4_src_ipv6.ipv4_layout.ipv4),
  1604. &ib_spec->ipv4.val.src_ip,
  1605. sizeof(ib_spec->ipv4.val.src_ip));
  1606. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1607. dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
  1608. &ib_spec->ipv4.mask.dst_ip,
  1609. sizeof(ib_spec->ipv4.mask.dst_ip));
  1610. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1611. dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
  1612. &ib_spec->ipv4.val.dst_ip,
  1613. sizeof(ib_spec->ipv4.val.dst_ip));
  1614. set_tos(headers_c, headers_v,
  1615. ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
  1616. set_proto(headers_c, headers_v,
  1617. ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
  1618. break;
  1619. case IB_FLOW_SPEC_IPV6:
  1620. if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
  1621. return -EOPNOTSUPP;
  1622. if (match_ipv) {
  1623. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1624. ip_version, 0xf);
  1625. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1626. ip_version, IPV6_VERSION);
  1627. } else {
  1628. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1629. ethertype, 0xffff);
  1630. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1631. ethertype, ETH_P_IPV6);
  1632. }
  1633. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1634. src_ipv4_src_ipv6.ipv6_layout.ipv6),
  1635. &ib_spec->ipv6.mask.src_ip,
  1636. sizeof(ib_spec->ipv6.mask.src_ip));
  1637. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1638. src_ipv4_src_ipv6.ipv6_layout.ipv6),
  1639. &ib_spec->ipv6.val.src_ip,
  1640. sizeof(ib_spec->ipv6.val.src_ip));
  1641. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1642. dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
  1643. &ib_spec->ipv6.mask.dst_ip,
  1644. sizeof(ib_spec->ipv6.mask.dst_ip));
  1645. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1646. dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
  1647. &ib_spec->ipv6.val.dst_ip,
  1648. sizeof(ib_spec->ipv6.val.dst_ip));
  1649. set_tos(headers_c, headers_v,
  1650. ib_spec->ipv6.mask.traffic_class,
  1651. ib_spec->ipv6.val.traffic_class);
  1652. set_proto(headers_c, headers_v,
  1653. ib_spec->ipv6.mask.next_hdr,
  1654. ib_spec->ipv6.val.next_hdr);
  1655. set_flow_label(misc_params_c, misc_params_v,
  1656. ntohl(ib_spec->ipv6.mask.flow_label),
  1657. ntohl(ib_spec->ipv6.val.flow_label),
  1658. ib_spec->type & IB_FLOW_SPEC_INNER);
  1659. break;
  1660. case IB_FLOW_SPEC_TCP:
  1661. if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
  1662. LAST_TCP_UDP_FIELD))
  1663. return -EOPNOTSUPP;
  1664. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  1665. 0xff);
  1666. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  1667. IPPROTO_TCP);
  1668. MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
  1669. ntohs(ib_spec->tcp_udp.mask.src_port));
  1670. MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
  1671. ntohs(ib_spec->tcp_udp.val.src_port));
  1672. MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
  1673. ntohs(ib_spec->tcp_udp.mask.dst_port));
  1674. MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
  1675. ntohs(ib_spec->tcp_udp.val.dst_port));
  1676. break;
  1677. case IB_FLOW_SPEC_UDP:
  1678. if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
  1679. LAST_TCP_UDP_FIELD))
  1680. return -EOPNOTSUPP;
  1681. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  1682. 0xff);
  1683. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  1684. IPPROTO_UDP);
  1685. MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
  1686. ntohs(ib_spec->tcp_udp.mask.src_port));
  1687. MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
  1688. ntohs(ib_spec->tcp_udp.val.src_port));
  1689. MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
  1690. ntohs(ib_spec->tcp_udp.mask.dst_port));
  1691. MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
  1692. ntohs(ib_spec->tcp_udp.val.dst_port));
  1693. break;
  1694. case IB_FLOW_SPEC_VXLAN_TUNNEL:
  1695. if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
  1696. LAST_TUNNEL_FIELD))
  1697. return -EOPNOTSUPP;
  1698. MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
  1699. ntohl(ib_spec->tunnel.mask.tunnel_id));
  1700. MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
  1701. ntohl(ib_spec->tunnel.val.tunnel_id));
  1702. break;
  1703. case IB_FLOW_SPEC_ACTION_TAG:
  1704. if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
  1705. LAST_FLOW_TAG_FIELD))
  1706. return -EOPNOTSUPP;
  1707. if (ib_spec->flow_tag.tag_id >= BIT(24))
  1708. return -EINVAL;
  1709. *tag_id = ib_spec->flow_tag.tag_id;
  1710. break;
  1711. case IB_FLOW_SPEC_ACTION_DROP:
  1712. if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
  1713. LAST_DROP_FIELD))
  1714. return -EOPNOTSUPP;
  1715. *is_drop = true;
  1716. break;
  1717. default:
  1718. return -EINVAL;
  1719. }
  1720. return 0;
  1721. }
  1722. /* If a flow could catch both multicast and unicast packets,
  1723. * it won't fall into the multicast flow steering table and this rule
  1724. * could steal other multicast packets.
  1725. */
  1726. static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
  1727. {
  1728. struct ib_flow_spec_eth *eth_spec;
  1729. if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
  1730. ib_attr->size < sizeof(struct ib_flow_attr) +
  1731. sizeof(struct ib_flow_spec_eth) ||
  1732. ib_attr->num_of_specs < 1)
  1733. return false;
  1734. eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
  1735. if (eth_spec->type != IB_FLOW_SPEC_ETH ||
  1736. eth_spec->size != sizeof(*eth_spec))
  1737. return false;
  1738. return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
  1739. is_multicast_ether_addr(eth_spec->val.dst_mac);
  1740. }
  1741. static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
  1742. const struct ib_flow_attr *flow_attr,
  1743. bool check_inner)
  1744. {
  1745. union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
  1746. int match_ipv = check_inner ?
  1747. MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  1748. ft_field_support.inner_ip_version) :
  1749. MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
  1750. ft_field_support.outer_ip_version);
  1751. int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
  1752. bool ipv4_spec_valid, ipv6_spec_valid;
  1753. unsigned int ip_spec_type = 0;
  1754. bool has_ethertype = false;
  1755. unsigned int spec_index;
  1756. bool mask_valid = true;
  1757. u16 eth_type = 0;
  1758. bool type_valid;
  1759. /* Validate that ethertype is correct */
  1760. for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
  1761. if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
  1762. ib_spec->eth.mask.ether_type) {
  1763. mask_valid = (ib_spec->eth.mask.ether_type ==
  1764. htons(0xffff));
  1765. has_ethertype = true;
  1766. eth_type = ntohs(ib_spec->eth.val.ether_type);
  1767. } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
  1768. (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
  1769. ip_spec_type = ib_spec->type;
  1770. }
  1771. ib_spec = (void *)ib_spec + ib_spec->size;
  1772. }
  1773. type_valid = (!has_ethertype) || (!ip_spec_type);
  1774. if (!type_valid && mask_valid) {
  1775. ipv4_spec_valid = (eth_type == ETH_P_IP) &&
  1776. (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
  1777. ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
  1778. (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
  1779. type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
  1780. (((eth_type == ETH_P_MPLS_UC) ||
  1781. (eth_type == ETH_P_MPLS_MC)) && match_ipv);
  1782. }
  1783. return type_valid;
  1784. }
  1785. static bool is_valid_attr(struct mlx5_core_dev *mdev,
  1786. const struct ib_flow_attr *flow_attr)
  1787. {
  1788. return is_valid_ethertype(mdev, flow_attr, false) &&
  1789. is_valid_ethertype(mdev, flow_attr, true);
  1790. }
  1791. static void put_flow_table(struct mlx5_ib_dev *dev,
  1792. struct mlx5_ib_flow_prio *prio, bool ft_added)
  1793. {
  1794. prio->refcount -= !!ft_added;
  1795. if (!prio->refcount) {
  1796. mlx5_destroy_flow_table(prio->flow_table);
  1797. prio->flow_table = NULL;
  1798. }
  1799. }
  1800. static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
  1801. {
  1802. struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
  1803. struct mlx5_ib_flow_handler *handler = container_of(flow_id,
  1804. struct mlx5_ib_flow_handler,
  1805. ibflow);
  1806. struct mlx5_ib_flow_handler *iter, *tmp;
  1807. mutex_lock(&dev->flow_db.lock);
  1808. list_for_each_entry_safe(iter, tmp, &handler->list, list) {
  1809. mlx5_del_flow_rules(iter->rule);
  1810. put_flow_table(dev, iter->prio, true);
  1811. list_del(&iter->list);
  1812. kfree(iter);
  1813. }
  1814. mlx5_del_flow_rules(handler->rule);
  1815. put_flow_table(dev, handler->prio, true);
  1816. mutex_unlock(&dev->flow_db.lock);
  1817. kfree(handler);
  1818. return 0;
  1819. }
  1820. static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
  1821. {
  1822. priority *= 2;
  1823. if (!dont_trap)
  1824. priority++;
  1825. return priority;
  1826. }
  1827. enum flow_table_type {
  1828. MLX5_IB_FT_RX,
  1829. MLX5_IB_FT_TX
  1830. };
  1831. #define MLX5_FS_MAX_TYPES 6
  1832. #define MLX5_FS_MAX_ENTRIES BIT(16)
  1833. static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
  1834. struct ib_flow_attr *flow_attr,
  1835. enum flow_table_type ft_type)
  1836. {
  1837. bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
  1838. struct mlx5_flow_namespace *ns = NULL;
  1839. struct mlx5_ib_flow_prio *prio;
  1840. struct mlx5_flow_table *ft;
  1841. int max_table_size;
  1842. int num_entries;
  1843. int num_groups;
  1844. int priority;
  1845. int err = 0;
  1846. max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
  1847. log_max_ft_size));
  1848. if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  1849. if (flow_is_multicast_only(flow_attr) &&
  1850. !dont_trap)
  1851. priority = MLX5_IB_FLOW_MCAST_PRIO;
  1852. else
  1853. priority = ib_prio_to_core_prio(flow_attr->priority,
  1854. dont_trap);
  1855. ns = mlx5_get_flow_namespace(dev->mdev,
  1856. MLX5_FLOW_NAMESPACE_BYPASS);
  1857. num_entries = MLX5_FS_MAX_ENTRIES;
  1858. num_groups = MLX5_FS_MAX_TYPES;
  1859. prio = &dev->flow_db.prios[priority];
  1860. } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  1861. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
  1862. ns = mlx5_get_flow_namespace(dev->mdev,
  1863. MLX5_FLOW_NAMESPACE_LEFTOVERS);
  1864. build_leftovers_ft_param(&priority,
  1865. &num_entries,
  1866. &num_groups);
  1867. prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
  1868. } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  1869. if (!MLX5_CAP_FLOWTABLE(dev->mdev,
  1870. allow_sniffer_and_nic_rx_shared_tir))
  1871. return ERR_PTR(-ENOTSUPP);
  1872. ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
  1873. MLX5_FLOW_NAMESPACE_SNIFFER_RX :
  1874. MLX5_FLOW_NAMESPACE_SNIFFER_TX);
  1875. prio = &dev->flow_db.sniffer[ft_type];
  1876. priority = 0;
  1877. num_entries = 1;
  1878. num_groups = 1;
  1879. }
  1880. if (!ns)
  1881. return ERR_PTR(-ENOTSUPP);
  1882. if (num_entries > max_table_size)
  1883. return ERR_PTR(-ENOMEM);
  1884. ft = prio->flow_table;
  1885. if (!ft) {
  1886. ft = mlx5_create_auto_grouped_flow_table(ns, priority,
  1887. num_entries,
  1888. num_groups,
  1889. 0, 0);
  1890. if (!IS_ERR(ft)) {
  1891. prio->refcount = 0;
  1892. prio->flow_table = ft;
  1893. } else {
  1894. err = PTR_ERR(ft);
  1895. }
  1896. }
  1897. return err ? ERR_PTR(err) : prio;
  1898. }
  1899. static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
  1900. struct mlx5_ib_flow_prio *ft_prio,
  1901. const struct ib_flow_attr *flow_attr,
  1902. struct mlx5_flow_destination *dst)
  1903. {
  1904. struct mlx5_flow_table *ft = ft_prio->flow_table;
  1905. struct mlx5_ib_flow_handler *handler;
  1906. struct mlx5_flow_act flow_act = {0};
  1907. struct mlx5_flow_spec *spec;
  1908. struct mlx5_flow_destination *rule_dst = dst;
  1909. const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
  1910. unsigned int spec_index;
  1911. u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
  1912. bool is_drop = false;
  1913. int err = 0;
  1914. int dest_num = 1;
  1915. if (!is_valid_attr(dev->mdev, flow_attr))
  1916. return ERR_PTR(-EINVAL);
  1917. spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
  1918. handler = kzalloc(sizeof(*handler), GFP_KERNEL);
  1919. if (!handler || !spec) {
  1920. err = -ENOMEM;
  1921. goto free;
  1922. }
  1923. INIT_LIST_HEAD(&handler->list);
  1924. for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
  1925. err = parse_flow_attr(dev->mdev, spec->match_criteria,
  1926. spec->match_value,
  1927. ib_flow, &flow_tag, &is_drop);
  1928. if (err < 0)
  1929. goto free;
  1930. ib_flow += ((union ib_flow_spec *)ib_flow)->size;
  1931. }
  1932. spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
  1933. if (is_drop) {
  1934. flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
  1935. rule_dst = NULL;
  1936. dest_num = 0;
  1937. } else {
  1938. flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
  1939. MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
  1940. }
  1941. if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
  1942. (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  1943. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
  1944. mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
  1945. flow_tag, flow_attr->type);
  1946. err = -EINVAL;
  1947. goto free;
  1948. }
  1949. flow_act.flow_tag = flow_tag;
  1950. handler->rule = mlx5_add_flow_rules(ft, spec,
  1951. &flow_act,
  1952. rule_dst, dest_num);
  1953. if (IS_ERR(handler->rule)) {
  1954. err = PTR_ERR(handler->rule);
  1955. goto free;
  1956. }
  1957. ft_prio->refcount++;
  1958. handler->prio = ft_prio;
  1959. ft_prio->flow_table = ft;
  1960. free:
  1961. if (err)
  1962. kfree(handler);
  1963. kvfree(spec);
  1964. return err ? ERR_PTR(err) : handler;
  1965. }
  1966. static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
  1967. struct mlx5_ib_flow_prio *ft_prio,
  1968. struct ib_flow_attr *flow_attr,
  1969. struct mlx5_flow_destination *dst)
  1970. {
  1971. struct mlx5_ib_flow_handler *handler_dst = NULL;
  1972. struct mlx5_ib_flow_handler *handler = NULL;
  1973. handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
  1974. if (!IS_ERR(handler)) {
  1975. handler_dst = create_flow_rule(dev, ft_prio,
  1976. flow_attr, dst);
  1977. if (IS_ERR(handler_dst)) {
  1978. mlx5_del_flow_rules(handler->rule);
  1979. ft_prio->refcount--;
  1980. kfree(handler);
  1981. handler = handler_dst;
  1982. } else {
  1983. list_add(&handler_dst->list, &handler->list);
  1984. }
  1985. }
  1986. return handler;
  1987. }
  1988. enum {
  1989. LEFTOVERS_MC,
  1990. LEFTOVERS_UC,
  1991. };
  1992. static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
  1993. struct mlx5_ib_flow_prio *ft_prio,
  1994. struct ib_flow_attr *flow_attr,
  1995. struct mlx5_flow_destination *dst)
  1996. {
  1997. struct mlx5_ib_flow_handler *handler_ucast = NULL;
  1998. struct mlx5_ib_flow_handler *handler = NULL;
  1999. static struct {
  2000. struct ib_flow_attr flow_attr;
  2001. struct ib_flow_spec_eth eth_flow;
  2002. } leftovers_specs[] = {
  2003. [LEFTOVERS_MC] = {
  2004. .flow_attr = {
  2005. .num_of_specs = 1,
  2006. .size = sizeof(leftovers_specs[0])
  2007. },
  2008. .eth_flow = {
  2009. .type = IB_FLOW_SPEC_ETH,
  2010. .size = sizeof(struct ib_flow_spec_eth),
  2011. .mask = {.dst_mac = {0x1} },
  2012. .val = {.dst_mac = {0x1} }
  2013. }
  2014. },
  2015. [LEFTOVERS_UC] = {
  2016. .flow_attr = {
  2017. .num_of_specs = 1,
  2018. .size = sizeof(leftovers_specs[0])
  2019. },
  2020. .eth_flow = {
  2021. .type = IB_FLOW_SPEC_ETH,
  2022. .size = sizeof(struct ib_flow_spec_eth),
  2023. .mask = {.dst_mac = {0x1} },
  2024. .val = {.dst_mac = {} }
  2025. }
  2026. }
  2027. };
  2028. handler = create_flow_rule(dev, ft_prio,
  2029. &leftovers_specs[LEFTOVERS_MC].flow_attr,
  2030. dst);
  2031. if (!IS_ERR(handler) &&
  2032. flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
  2033. handler_ucast = create_flow_rule(dev, ft_prio,
  2034. &leftovers_specs[LEFTOVERS_UC].flow_attr,
  2035. dst);
  2036. if (IS_ERR(handler_ucast)) {
  2037. mlx5_del_flow_rules(handler->rule);
  2038. ft_prio->refcount--;
  2039. kfree(handler);
  2040. handler = handler_ucast;
  2041. } else {
  2042. list_add(&handler_ucast->list, &handler->list);
  2043. }
  2044. }
  2045. return handler;
  2046. }
  2047. static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
  2048. struct mlx5_ib_flow_prio *ft_rx,
  2049. struct mlx5_ib_flow_prio *ft_tx,
  2050. struct mlx5_flow_destination *dst)
  2051. {
  2052. struct mlx5_ib_flow_handler *handler_rx;
  2053. struct mlx5_ib_flow_handler *handler_tx;
  2054. int err;
  2055. static const struct ib_flow_attr flow_attr = {
  2056. .num_of_specs = 0,
  2057. .size = sizeof(flow_attr)
  2058. };
  2059. handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
  2060. if (IS_ERR(handler_rx)) {
  2061. err = PTR_ERR(handler_rx);
  2062. goto err;
  2063. }
  2064. handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
  2065. if (IS_ERR(handler_tx)) {
  2066. err = PTR_ERR(handler_tx);
  2067. goto err_tx;
  2068. }
  2069. list_add(&handler_tx->list, &handler_rx->list);
  2070. return handler_rx;
  2071. err_tx:
  2072. mlx5_del_flow_rules(handler_rx->rule);
  2073. ft_rx->refcount--;
  2074. kfree(handler_rx);
  2075. err:
  2076. return ERR_PTR(err);
  2077. }
  2078. static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
  2079. struct ib_flow_attr *flow_attr,
  2080. int domain)
  2081. {
  2082. struct mlx5_ib_dev *dev = to_mdev(qp->device);
  2083. struct mlx5_ib_qp *mqp = to_mqp(qp);
  2084. struct mlx5_ib_flow_handler *handler = NULL;
  2085. struct mlx5_flow_destination *dst = NULL;
  2086. struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
  2087. struct mlx5_ib_flow_prio *ft_prio;
  2088. int err;
  2089. if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
  2090. return ERR_PTR(-ENOMEM);
  2091. if (domain != IB_FLOW_DOMAIN_USER ||
  2092. flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
  2093. (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
  2094. return ERR_PTR(-EINVAL);
  2095. dst = kzalloc(sizeof(*dst), GFP_KERNEL);
  2096. if (!dst)
  2097. return ERR_PTR(-ENOMEM);
  2098. mutex_lock(&dev->flow_db.lock);
  2099. ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
  2100. if (IS_ERR(ft_prio)) {
  2101. err = PTR_ERR(ft_prio);
  2102. goto unlock;
  2103. }
  2104. if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  2105. ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
  2106. if (IS_ERR(ft_prio_tx)) {
  2107. err = PTR_ERR(ft_prio_tx);
  2108. ft_prio_tx = NULL;
  2109. goto destroy_ft;
  2110. }
  2111. }
  2112. dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  2113. if (mqp->flags & MLX5_IB_QP_RSS)
  2114. dst->tir_num = mqp->rss_qp.tirn;
  2115. else
  2116. dst->tir_num = mqp->raw_packet_qp.rq.tirn;
  2117. if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  2118. if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
  2119. handler = create_dont_trap_rule(dev, ft_prio,
  2120. flow_attr, dst);
  2121. } else {
  2122. handler = create_flow_rule(dev, ft_prio, flow_attr,
  2123. dst);
  2124. }
  2125. } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  2126. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
  2127. handler = create_leftovers_rule(dev, ft_prio, flow_attr,
  2128. dst);
  2129. } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  2130. handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
  2131. } else {
  2132. err = -EINVAL;
  2133. goto destroy_ft;
  2134. }
  2135. if (IS_ERR(handler)) {
  2136. err = PTR_ERR(handler);
  2137. handler = NULL;
  2138. goto destroy_ft;
  2139. }
  2140. mutex_unlock(&dev->flow_db.lock);
  2141. kfree(dst);
  2142. return &handler->ibflow;
  2143. destroy_ft:
  2144. put_flow_table(dev, ft_prio, false);
  2145. if (ft_prio_tx)
  2146. put_flow_table(dev, ft_prio_tx, false);
  2147. unlock:
  2148. mutex_unlock(&dev->flow_db.lock);
  2149. kfree(dst);
  2150. kfree(handler);
  2151. return ERR_PTR(err);
  2152. }
  2153. static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  2154. {
  2155. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  2156. int err;
  2157. err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
  2158. if (err)
  2159. mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
  2160. ibqp->qp_num, gid->raw);
  2161. return err;
  2162. }
  2163. static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  2164. {
  2165. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  2166. int err;
  2167. err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
  2168. if (err)
  2169. mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
  2170. ibqp->qp_num, gid->raw);
  2171. return err;
  2172. }
  2173. static int init_node_data(struct mlx5_ib_dev *dev)
  2174. {
  2175. int err;
  2176. err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
  2177. if (err)
  2178. return err;
  2179. dev->mdev->rev_id = dev->mdev->pdev->revision;
  2180. return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
  2181. }
  2182. static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
  2183. char *buf)
  2184. {
  2185. struct mlx5_ib_dev *dev =
  2186. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2187. return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
  2188. }
  2189. static ssize_t show_reg_pages(struct device *device,
  2190. struct device_attribute *attr, char *buf)
  2191. {
  2192. struct mlx5_ib_dev *dev =
  2193. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2194. return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
  2195. }
  2196. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  2197. char *buf)
  2198. {
  2199. struct mlx5_ib_dev *dev =
  2200. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2201. return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
  2202. }
  2203. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  2204. char *buf)
  2205. {
  2206. struct mlx5_ib_dev *dev =
  2207. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2208. return sprintf(buf, "%x\n", dev->mdev->rev_id);
  2209. }
  2210. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  2211. char *buf)
  2212. {
  2213. struct mlx5_ib_dev *dev =
  2214. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2215. return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
  2216. dev->mdev->board_id);
  2217. }
  2218. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  2219. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  2220. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  2221. static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
  2222. static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
  2223. static struct device_attribute *mlx5_class_attributes[] = {
  2224. &dev_attr_hw_rev,
  2225. &dev_attr_hca_type,
  2226. &dev_attr_board_id,
  2227. &dev_attr_fw_pages,
  2228. &dev_attr_reg_pages,
  2229. };
  2230. static void pkey_change_handler(struct work_struct *work)
  2231. {
  2232. struct mlx5_ib_port_resources *ports =
  2233. container_of(work, struct mlx5_ib_port_resources,
  2234. pkey_change_work);
  2235. mutex_lock(&ports->devr->mutex);
  2236. mlx5_ib_gsi_pkey_change(ports->gsi);
  2237. mutex_unlock(&ports->devr->mutex);
  2238. }
  2239. static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
  2240. {
  2241. struct mlx5_ib_qp *mqp;
  2242. struct mlx5_ib_cq *send_mcq, *recv_mcq;
  2243. struct mlx5_core_cq *mcq;
  2244. struct list_head cq_armed_list;
  2245. unsigned long flags_qp;
  2246. unsigned long flags_cq;
  2247. unsigned long flags;
  2248. INIT_LIST_HEAD(&cq_armed_list);
  2249. /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
  2250. spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
  2251. list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
  2252. spin_lock_irqsave(&mqp->sq.lock, flags_qp);
  2253. if (mqp->sq.tail != mqp->sq.head) {
  2254. send_mcq = to_mcq(mqp->ibqp.send_cq);
  2255. spin_lock_irqsave(&send_mcq->lock, flags_cq);
  2256. if (send_mcq->mcq.comp &&
  2257. mqp->ibqp.send_cq->comp_handler) {
  2258. if (!send_mcq->mcq.reset_notify_added) {
  2259. send_mcq->mcq.reset_notify_added = 1;
  2260. list_add_tail(&send_mcq->mcq.reset_notify,
  2261. &cq_armed_list);
  2262. }
  2263. }
  2264. spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
  2265. }
  2266. spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
  2267. spin_lock_irqsave(&mqp->rq.lock, flags_qp);
  2268. /* no handling is needed for SRQ */
  2269. if (!mqp->ibqp.srq) {
  2270. if (mqp->rq.tail != mqp->rq.head) {
  2271. recv_mcq = to_mcq(mqp->ibqp.recv_cq);
  2272. spin_lock_irqsave(&recv_mcq->lock, flags_cq);
  2273. if (recv_mcq->mcq.comp &&
  2274. mqp->ibqp.recv_cq->comp_handler) {
  2275. if (!recv_mcq->mcq.reset_notify_added) {
  2276. recv_mcq->mcq.reset_notify_added = 1;
  2277. list_add_tail(&recv_mcq->mcq.reset_notify,
  2278. &cq_armed_list);
  2279. }
  2280. }
  2281. spin_unlock_irqrestore(&recv_mcq->lock,
  2282. flags_cq);
  2283. }
  2284. }
  2285. spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
  2286. }
  2287. /*At that point all inflight post send were put to be executed as of we
  2288. * lock/unlock above locks Now need to arm all involved CQs.
  2289. */
  2290. list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
  2291. mcq->comp(mcq);
  2292. }
  2293. spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
  2294. }
  2295. static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
  2296. enum mlx5_dev_event event, unsigned long param)
  2297. {
  2298. struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
  2299. struct ib_event ibev;
  2300. bool fatal = false;
  2301. u8 port = 0;
  2302. switch (event) {
  2303. case MLX5_DEV_EVENT_SYS_ERROR:
  2304. ibev.event = IB_EVENT_DEVICE_FATAL;
  2305. mlx5_ib_handle_internal_error(ibdev);
  2306. fatal = true;
  2307. break;
  2308. case MLX5_DEV_EVENT_PORT_UP:
  2309. case MLX5_DEV_EVENT_PORT_DOWN:
  2310. case MLX5_DEV_EVENT_PORT_INITIALIZED:
  2311. port = (u8)param;
  2312. /* In RoCE, port up/down events are handled in
  2313. * mlx5_netdev_event().
  2314. */
  2315. if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
  2316. IB_LINK_LAYER_ETHERNET)
  2317. return;
  2318. ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
  2319. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  2320. break;
  2321. case MLX5_DEV_EVENT_LID_CHANGE:
  2322. ibev.event = IB_EVENT_LID_CHANGE;
  2323. port = (u8)param;
  2324. break;
  2325. case MLX5_DEV_EVENT_PKEY_CHANGE:
  2326. ibev.event = IB_EVENT_PKEY_CHANGE;
  2327. port = (u8)param;
  2328. schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
  2329. break;
  2330. case MLX5_DEV_EVENT_GUID_CHANGE:
  2331. ibev.event = IB_EVENT_GID_CHANGE;
  2332. port = (u8)param;
  2333. break;
  2334. case MLX5_DEV_EVENT_CLIENT_REREG:
  2335. ibev.event = IB_EVENT_CLIENT_REREGISTER;
  2336. port = (u8)param;
  2337. break;
  2338. default:
  2339. return;
  2340. }
  2341. ibev.device = &ibdev->ib_dev;
  2342. ibev.element.port_num = port;
  2343. if (port < 1 || port > ibdev->num_ports) {
  2344. mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
  2345. return;
  2346. }
  2347. if (ibdev->ib_active)
  2348. ib_dispatch_event(&ibev);
  2349. if (fatal)
  2350. ibdev->ib_active = false;
  2351. }
  2352. static int set_has_smi_cap(struct mlx5_ib_dev *dev)
  2353. {
  2354. struct mlx5_hca_vport_context vport_ctx;
  2355. int err;
  2356. int port;
  2357. for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
  2358. dev->mdev->port_caps[port - 1].has_smi = false;
  2359. if (MLX5_CAP_GEN(dev->mdev, port_type) ==
  2360. MLX5_CAP_PORT_TYPE_IB) {
  2361. if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
  2362. err = mlx5_query_hca_vport_context(dev->mdev, 0,
  2363. port, 0,
  2364. &vport_ctx);
  2365. if (err) {
  2366. mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
  2367. port, err);
  2368. return err;
  2369. }
  2370. dev->mdev->port_caps[port - 1].has_smi =
  2371. vport_ctx.has_smi;
  2372. } else {
  2373. dev->mdev->port_caps[port - 1].has_smi = true;
  2374. }
  2375. }
  2376. }
  2377. return 0;
  2378. }
  2379. static void get_ext_port_caps(struct mlx5_ib_dev *dev)
  2380. {
  2381. int port;
  2382. for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
  2383. mlx5_query_ext_port_caps(dev, port);
  2384. }
  2385. static int get_port_caps(struct mlx5_ib_dev *dev)
  2386. {
  2387. struct ib_device_attr *dprops = NULL;
  2388. struct ib_port_attr *pprops = NULL;
  2389. int err = -ENOMEM;
  2390. int port;
  2391. struct ib_udata uhw = {.inlen = 0, .outlen = 0};
  2392. pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
  2393. if (!pprops)
  2394. goto out;
  2395. dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
  2396. if (!dprops)
  2397. goto out;
  2398. err = set_has_smi_cap(dev);
  2399. if (err)
  2400. goto out;
  2401. err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
  2402. if (err) {
  2403. mlx5_ib_warn(dev, "query_device failed %d\n", err);
  2404. goto out;
  2405. }
  2406. for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
  2407. memset(pprops, 0, sizeof(*pprops));
  2408. err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
  2409. if (err) {
  2410. mlx5_ib_warn(dev, "query_port %d failed %d\n",
  2411. port, err);
  2412. break;
  2413. }
  2414. dev->mdev->port_caps[port - 1].pkey_table_len =
  2415. dprops->max_pkeys;
  2416. dev->mdev->port_caps[port - 1].gid_table_len =
  2417. pprops->gid_tbl_len;
  2418. mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
  2419. dprops->max_pkeys, pprops->gid_tbl_len);
  2420. }
  2421. out:
  2422. kfree(pprops);
  2423. kfree(dprops);
  2424. return err;
  2425. }
  2426. static void destroy_umrc_res(struct mlx5_ib_dev *dev)
  2427. {
  2428. int err;
  2429. err = mlx5_mr_cache_cleanup(dev);
  2430. if (err)
  2431. mlx5_ib_warn(dev, "mr cache cleanup failed\n");
  2432. mlx5_ib_destroy_qp(dev->umrc.qp);
  2433. ib_free_cq(dev->umrc.cq);
  2434. ib_dealloc_pd(dev->umrc.pd);
  2435. }
  2436. enum {
  2437. MAX_UMR_WR = 128,
  2438. };
  2439. static int create_umr_res(struct mlx5_ib_dev *dev)
  2440. {
  2441. struct ib_qp_init_attr *init_attr = NULL;
  2442. struct ib_qp_attr *attr = NULL;
  2443. struct ib_pd *pd;
  2444. struct ib_cq *cq;
  2445. struct ib_qp *qp;
  2446. int ret;
  2447. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  2448. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  2449. if (!attr || !init_attr) {
  2450. ret = -ENOMEM;
  2451. goto error_0;
  2452. }
  2453. pd = ib_alloc_pd(&dev->ib_dev, 0);
  2454. if (IS_ERR(pd)) {
  2455. mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
  2456. ret = PTR_ERR(pd);
  2457. goto error_0;
  2458. }
  2459. cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
  2460. if (IS_ERR(cq)) {
  2461. mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
  2462. ret = PTR_ERR(cq);
  2463. goto error_2;
  2464. }
  2465. init_attr->send_cq = cq;
  2466. init_attr->recv_cq = cq;
  2467. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  2468. init_attr->cap.max_send_wr = MAX_UMR_WR;
  2469. init_attr->cap.max_send_sge = 1;
  2470. init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
  2471. init_attr->port_num = 1;
  2472. qp = mlx5_ib_create_qp(pd, init_attr, NULL);
  2473. if (IS_ERR(qp)) {
  2474. mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
  2475. ret = PTR_ERR(qp);
  2476. goto error_3;
  2477. }
  2478. qp->device = &dev->ib_dev;
  2479. qp->real_qp = qp;
  2480. qp->uobject = NULL;
  2481. qp->qp_type = MLX5_IB_QPT_REG_UMR;
  2482. attr->qp_state = IB_QPS_INIT;
  2483. attr->port_num = 1;
  2484. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
  2485. IB_QP_PORT, NULL);
  2486. if (ret) {
  2487. mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
  2488. goto error_4;
  2489. }
  2490. memset(attr, 0, sizeof(*attr));
  2491. attr->qp_state = IB_QPS_RTR;
  2492. attr->path_mtu = IB_MTU_256;
  2493. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  2494. if (ret) {
  2495. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
  2496. goto error_4;
  2497. }
  2498. memset(attr, 0, sizeof(*attr));
  2499. attr->qp_state = IB_QPS_RTS;
  2500. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  2501. if (ret) {
  2502. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
  2503. goto error_4;
  2504. }
  2505. dev->umrc.qp = qp;
  2506. dev->umrc.cq = cq;
  2507. dev->umrc.pd = pd;
  2508. sema_init(&dev->umrc.sem, MAX_UMR_WR);
  2509. ret = mlx5_mr_cache_init(dev);
  2510. if (ret) {
  2511. mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
  2512. goto error_4;
  2513. }
  2514. kfree(attr);
  2515. kfree(init_attr);
  2516. return 0;
  2517. error_4:
  2518. mlx5_ib_destroy_qp(qp);
  2519. error_3:
  2520. ib_free_cq(cq);
  2521. error_2:
  2522. ib_dealloc_pd(pd);
  2523. error_0:
  2524. kfree(attr);
  2525. kfree(init_attr);
  2526. return ret;
  2527. }
  2528. static int create_dev_resources(struct mlx5_ib_resources *devr)
  2529. {
  2530. struct ib_srq_init_attr attr;
  2531. struct mlx5_ib_dev *dev;
  2532. struct ib_cq_init_attr cq_attr = {.cqe = 1};
  2533. int port;
  2534. int ret = 0;
  2535. dev = container_of(devr, struct mlx5_ib_dev, devr);
  2536. mutex_init(&devr->mutex);
  2537. devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
  2538. if (IS_ERR(devr->p0)) {
  2539. ret = PTR_ERR(devr->p0);
  2540. goto error0;
  2541. }
  2542. devr->p0->device = &dev->ib_dev;
  2543. devr->p0->uobject = NULL;
  2544. atomic_set(&devr->p0->usecnt, 0);
  2545. devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
  2546. if (IS_ERR(devr->c0)) {
  2547. ret = PTR_ERR(devr->c0);
  2548. goto error1;
  2549. }
  2550. devr->c0->device = &dev->ib_dev;
  2551. devr->c0->uobject = NULL;
  2552. devr->c0->comp_handler = NULL;
  2553. devr->c0->event_handler = NULL;
  2554. devr->c0->cq_context = NULL;
  2555. atomic_set(&devr->c0->usecnt, 0);
  2556. devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  2557. if (IS_ERR(devr->x0)) {
  2558. ret = PTR_ERR(devr->x0);
  2559. goto error2;
  2560. }
  2561. devr->x0->device = &dev->ib_dev;
  2562. devr->x0->inode = NULL;
  2563. atomic_set(&devr->x0->usecnt, 0);
  2564. mutex_init(&devr->x0->tgt_qp_mutex);
  2565. INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
  2566. devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  2567. if (IS_ERR(devr->x1)) {
  2568. ret = PTR_ERR(devr->x1);
  2569. goto error3;
  2570. }
  2571. devr->x1->device = &dev->ib_dev;
  2572. devr->x1->inode = NULL;
  2573. atomic_set(&devr->x1->usecnt, 0);
  2574. mutex_init(&devr->x1->tgt_qp_mutex);
  2575. INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
  2576. memset(&attr, 0, sizeof(attr));
  2577. attr.attr.max_sge = 1;
  2578. attr.attr.max_wr = 1;
  2579. attr.srq_type = IB_SRQT_XRC;
  2580. attr.ext.xrc.cq = devr->c0;
  2581. attr.ext.xrc.xrcd = devr->x0;
  2582. devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  2583. if (IS_ERR(devr->s0)) {
  2584. ret = PTR_ERR(devr->s0);
  2585. goto error4;
  2586. }
  2587. devr->s0->device = &dev->ib_dev;
  2588. devr->s0->pd = devr->p0;
  2589. devr->s0->uobject = NULL;
  2590. devr->s0->event_handler = NULL;
  2591. devr->s0->srq_context = NULL;
  2592. devr->s0->srq_type = IB_SRQT_XRC;
  2593. devr->s0->ext.xrc.xrcd = devr->x0;
  2594. devr->s0->ext.xrc.cq = devr->c0;
  2595. atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
  2596. atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
  2597. atomic_inc(&devr->p0->usecnt);
  2598. atomic_set(&devr->s0->usecnt, 0);
  2599. memset(&attr, 0, sizeof(attr));
  2600. attr.attr.max_sge = 1;
  2601. attr.attr.max_wr = 1;
  2602. attr.srq_type = IB_SRQT_BASIC;
  2603. devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  2604. if (IS_ERR(devr->s1)) {
  2605. ret = PTR_ERR(devr->s1);
  2606. goto error5;
  2607. }
  2608. devr->s1->device = &dev->ib_dev;
  2609. devr->s1->pd = devr->p0;
  2610. devr->s1->uobject = NULL;
  2611. devr->s1->event_handler = NULL;
  2612. devr->s1->srq_context = NULL;
  2613. devr->s1->srq_type = IB_SRQT_BASIC;
  2614. devr->s1->ext.xrc.cq = devr->c0;
  2615. atomic_inc(&devr->p0->usecnt);
  2616. atomic_set(&devr->s0->usecnt, 0);
  2617. for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
  2618. INIT_WORK(&devr->ports[port].pkey_change_work,
  2619. pkey_change_handler);
  2620. devr->ports[port].devr = devr;
  2621. }
  2622. return 0;
  2623. error5:
  2624. mlx5_ib_destroy_srq(devr->s0);
  2625. error4:
  2626. mlx5_ib_dealloc_xrcd(devr->x1);
  2627. error3:
  2628. mlx5_ib_dealloc_xrcd(devr->x0);
  2629. error2:
  2630. mlx5_ib_destroy_cq(devr->c0);
  2631. error1:
  2632. mlx5_ib_dealloc_pd(devr->p0);
  2633. error0:
  2634. return ret;
  2635. }
  2636. static void destroy_dev_resources(struct mlx5_ib_resources *devr)
  2637. {
  2638. struct mlx5_ib_dev *dev =
  2639. container_of(devr, struct mlx5_ib_dev, devr);
  2640. int port;
  2641. mlx5_ib_destroy_srq(devr->s1);
  2642. mlx5_ib_destroy_srq(devr->s0);
  2643. mlx5_ib_dealloc_xrcd(devr->x0);
  2644. mlx5_ib_dealloc_xrcd(devr->x1);
  2645. mlx5_ib_destroy_cq(devr->c0);
  2646. mlx5_ib_dealloc_pd(devr->p0);
  2647. /* Make sure no change P_Key work items are still executing */
  2648. for (port = 0; port < dev->num_ports; ++port)
  2649. cancel_work_sync(&devr->ports[port].pkey_change_work);
  2650. }
  2651. static u32 get_core_cap_flags(struct ib_device *ibdev)
  2652. {
  2653. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2654. enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
  2655. u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
  2656. u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
  2657. u32 ret = 0;
  2658. if (ll == IB_LINK_LAYER_INFINIBAND)
  2659. return RDMA_CORE_PORT_IBA_IB;
  2660. ret = RDMA_CORE_PORT_RAW_PACKET;
  2661. if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
  2662. return ret;
  2663. if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
  2664. return ret;
  2665. if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
  2666. ret |= RDMA_CORE_PORT_IBA_ROCE;
  2667. if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
  2668. ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  2669. return ret;
  2670. }
  2671. static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
  2672. struct ib_port_immutable *immutable)
  2673. {
  2674. struct ib_port_attr attr;
  2675. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2676. enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
  2677. int err;
  2678. immutable->core_cap_flags = get_core_cap_flags(ibdev);
  2679. err = ib_query_port(ibdev, port_num, &attr);
  2680. if (err)
  2681. return err;
  2682. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  2683. immutable->gid_tbl_len = attr.gid_tbl_len;
  2684. immutable->core_cap_flags = get_core_cap_flags(ibdev);
  2685. if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
  2686. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  2687. return 0;
  2688. }
  2689. static void get_dev_fw_str(struct ib_device *ibdev, char *str,
  2690. size_t str_len)
  2691. {
  2692. struct mlx5_ib_dev *dev =
  2693. container_of(ibdev, struct mlx5_ib_dev, ib_dev);
  2694. snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
  2695. fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
  2696. }
  2697. static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
  2698. {
  2699. struct mlx5_core_dev *mdev = dev->mdev;
  2700. struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
  2701. MLX5_FLOW_NAMESPACE_LAG);
  2702. struct mlx5_flow_table *ft;
  2703. int err;
  2704. if (!ns || !mlx5_lag_is_active(mdev))
  2705. return 0;
  2706. err = mlx5_cmd_create_vport_lag(mdev);
  2707. if (err)
  2708. return err;
  2709. ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
  2710. if (IS_ERR(ft)) {
  2711. err = PTR_ERR(ft);
  2712. goto err_destroy_vport_lag;
  2713. }
  2714. dev->flow_db.lag_demux_ft = ft;
  2715. return 0;
  2716. err_destroy_vport_lag:
  2717. mlx5_cmd_destroy_vport_lag(mdev);
  2718. return err;
  2719. }
  2720. static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
  2721. {
  2722. struct mlx5_core_dev *mdev = dev->mdev;
  2723. if (dev->flow_db.lag_demux_ft) {
  2724. mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
  2725. dev->flow_db.lag_demux_ft = NULL;
  2726. mlx5_cmd_destroy_vport_lag(mdev);
  2727. }
  2728. }
  2729. static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
  2730. {
  2731. int err;
  2732. dev->roce.nb.notifier_call = mlx5_netdev_event;
  2733. err = register_netdevice_notifier(&dev->roce.nb);
  2734. if (err) {
  2735. dev->roce.nb.notifier_call = NULL;
  2736. return err;
  2737. }
  2738. return 0;
  2739. }
  2740. static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
  2741. {
  2742. if (dev->roce.nb.notifier_call) {
  2743. unregister_netdevice_notifier(&dev->roce.nb);
  2744. dev->roce.nb.notifier_call = NULL;
  2745. }
  2746. }
  2747. static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
  2748. {
  2749. int err;
  2750. err = mlx5_add_netdev_notifier(dev);
  2751. if (err)
  2752. return err;
  2753. if (MLX5_CAP_GEN(dev->mdev, roce)) {
  2754. err = mlx5_nic_vport_enable_roce(dev->mdev);
  2755. if (err)
  2756. goto err_unregister_netdevice_notifier;
  2757. }
  2758. err = mlx5_eth_lag_init(dev);
  2759. if (err)
  2760. goto err_disable_roce;
  2761. return 0;
  2762. err_disable_roce:
  2763. if (MLX5_CAP_GEN(dev->mdev, roce))
  2764. mlx5_nic_vport_disable_roce(dev->mdev);
  2765. err_unregister_netdevice_notifier:
  2766. mlx5_remove_netdev_notifier(dev);
  2767. return err;
  2768. }
  2769. static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
  2770. {
  2771. mlx5_eth_lag_cleanup(dev);
  2772. if (MLX5_CAP_GEN(dev->mdev, roce))
  2773. mlx5_nic_vport_disable_roce(dev->mdev);
  2774. }
  2775. struct mlx5_ib_counter {
  2776. const char *name;
  2777. size_t offset;
  2778. };
  2779. #define INIT_Q_COUNTER(_name) \
  2780. { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
  2781. static const struct mlx5_ib_counter basic_q_cnts[] = {
  2782. INIT_Q_COUNTER(rx_write_requests),
  2783. INIT_Q_COUNTER(rx_read_requests),
  2784. INIT_Q_COUNTER(rx_atomic_requests),
  2785. INIT_Q_COUNTER(out_of_buffer),
  2786. };
  2787. static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
  2788. INIT_Q_COUNTER(out_of_sequence),
  2789. };
  2790. static const struct mlx5_ib_counter retrans_q_cnts[] = {
  2791. INIT_Q_COUNTER(duplicate_request),
  2792. INIT_Q_COUNTER(rnr_nak_retry_err),
  2793. INIT_Q_COUNTER(packet_seq_err),
  2794. INIT_Q_COUNTER(implied_nak_seq_err),
  2795. INIT_Q_COUNTER(local_ack_timeout_err),
  2796. };
  2797. #define INIT_CONG_COUNTER(_name) \
  2798. { .name = #_name, .offset = \
  2799. MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
  2800. static const struct mlx5_ib_counter cong_cnts[] = {
  2801. INIT_CONG_COUNTER(rp_cnp_ignored),
  2802. INIT_CONG_COUNTER(rp_cnp_handled),
  2803. INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
  2804. INIT_CONG_COUNTER(np_cnp_sent),
  2805. };
  2806. static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
  2807. {
  2808. unsigned int i;
  2809. for (i = 0; i < dev->num_ports; i++) {
  2810. mlx5_core_dealloc_q_counter(dev->mdev,
  2811. dev->port[i].cnts.set_id);
  2812. kfree(dev->port[i].cnts.names);
  2813. kfree(dev->port[i].cnts.offsets);
  2814. }
  2815. }
  2816. static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
  2817. struct mlx5_ib_counters *cnts)
  2818. {
  2819. u32 num_counters;
  2820. num_counters = ARRAY_SIZE(basic_q_cnts);
  2821. if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
  2822. num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
  2823. if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
  2824. num_counters += ARRAY_SIZE(retrans_q_cnts);
  2825. cnts->num_q_counters = num_counters;
  2826. if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
  2827. cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
  2828. num_counters += ARRAY_SIZE(cong_cnts);
  2829. }
  2830. cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
  2831. if (!cnts->names)
  2832. return -ENOMEM;
  2833. cnts->offsets = kcalloc(num_counters,
  2834. sizeof(cnts->offsets), GFP_KERNEL);
  2835. if (!cnts->offsets)
  2836. goto err_names;
  2837. return 0;
  2838. err_names:
  2839. kfree(cnts->names);
  2840. return -ENOMEM;
  2841. }
  2842. static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
  2843. const char **names,
  2844. size_t *offsets)
  2845. {
  2846. int i;
  2847. int j = 0;
  2848. for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
  2849. names[j] = basic_q_cnts[i].name;
  2850. offsets[j] = basic_q_cnts[i].offset;
  2851. }
  2852. if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
  2853. for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
  2854. names[j] = out_of_seq_q_cnts[i].name;
  2855. offsets[j] = out_of_seq_q_cnts[i].offset;
  2856. }
  2857. }
  2858. if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
  2859. for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
  2860. names[j] = retrans_q_cnts[i].name;
  2861. offsets[j] = retrans_q_cnts[i].offset;
  2862. }
  2863. }
  2864. if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
  2865. for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
  2866. names[j] = cong_cnts[i].name;
  2867. offsets[j] = cong_cnts[i].offset;
  2868. }
  2869. }
  2870. }
  2871. static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
  2872. {
  2873. int i;
  2874. int ret;
  2875. for (i = 0; i < dev->num_ports; i++) {
  2876. struct mlx5_ib_port *port = &dev->port[i];
  2877. ret = mlx5_core_alloc_q_counter(dev->mdev,
  2878. &port->cnts.set_id);
  2879. if (ret) {
  2880. mlx5_ib_warn(dev,
  2881. "couldn't allocate queue counter for port %d, err %d\n",
  2882. i + 1, ret);
  2883. goto dealloc_counters;
  2884. }
  2885. ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
  2886. if (ret)
  2887. goto dealloc_counters;
  2888. mlx5_ib_fill_counters(dev, port->cnts.names,
  2889. port->cnts.offsets);
  2890. }
  2891. return 0;
  2892. dealloc_counters:
  2893. while (--i >= 0)
  2894. mlx5_core_dealloc_q_counter(dev->mdev,
  2895. dev->port[i].cnts.set_id);
  2896. return ret;
  2897. }
  2898. static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
  2899. u8 port_num)
  2900. {
  2901. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2902. struct mlx5_ib_port *port = &dev->port[port_num - 1];
  2903. /* We support only per port stats */
  2904. if (port_num == 0)
  2905. return NULL;
  2906. return rdma_alloc_hw_stats_struct(port->cnts.names,
  2907. port->cnts.num_q_counters +
  2908. port->cnts.num_cong_counters,
  2909. RDMA_HW_STATS_DEFAULT_LIFESPAN);
  2910. }
  2911. static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
  2912. struct mlx5_ib_port *port,
  2913. struct rdma_hw_stats *stats)
  2914. {
  2915. int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
  2916. void *out;
  2917. __be32 val;
  2918. int ret, i;
  2919. out = kvzalloc(outlen, GFP_KERNEL);
  2920. if (!out)
  2921. return -ENOMEM;
  2922. ret = mlx5_core_query_q_counter(dev->mdev,
  2923. port->cnts.set_id, 0,
  2924. out, outlen);
  2925. if (ret)
  2926. goto free;
  2927. for (i = 0; i < port->cnts.num_q_counters; i++) {
  2928. val = *(__be32 *)(out + port->cnts.offsets[i]);
  2929. stats->value[i] = (u64)be32_to_cpu(val);
  2930. }
  2931. free:
  2932. kvfree(out);
  2933. return ret;
  2934. }
  2935. static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
  2936. struct mlx5_ib_port *port,
  2937. struct rdma_hw_stats *stats)
  2938. {
  2939. int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
  2940. void *out;
  2941. int ret, i;
  2942. int offset = port->cnts.num_q_counters;
  2943. out = kvzalloc(outlen, GFP_KERNEL);
  2944. if (!out)
  2945. return -ENOMEM;
  2946. ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
  2947. if (ret)
  2948. goto free;
  2949. for (i = 0; i < port->cnts.num_cong_counters; i++) {
  2950. stats->value[i + offset] =
  2951. be64_to_cpup((__be64 *)(out +
  2952. port->cnts.offsets[i + offset]));
  2953. }
  2954. free:
  2955. kvfree(out);
  2956. return ret;
  2957. }
  2958. static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
  2959. struct rdma_hw_stats *stats,
  2960. u8 port_num, int index)
  2961. {
  2962. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2963. struct mlx5_ib_port *port = &dev->port[port_num - 1];
  2964. int ret, num_counters;
  2965. if (!stats)
  2966. return -EINVAL;
  2967. ret = mlx5_ib_query_q_counters(dev, port, stats);
  2968. if (ret)
  2969. return ret;
  2970. num_counters = port->cnts.num_q_counters;
  2971. if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
  2972. ret = mlx5_ib_query_cong_counters(dev, port, stats);
  2973. if (ret)
  2974. return ret;
  2975. num_counters += port->cnts.num_cong_counters;
  2976. }
  2977. return num_counters;
  2978. }
  2979. static struct net_device*
  2980. mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
  2981. u8 port_num,
  2982. enum rdma_netdev_t type,
  2983. const char *name,
  2984. unsigned char name_assign_type,
  2985. void (*setup)(struct net_device *))
  2986. {
  2987. if (type != RDMA_NETDEV_IPOIB)
  2988. return ERR_PTR(-EOPNOTSUPP);
  2989. return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
  2990. name, setup);
  2991. }
  2992. static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
  2993. {
  2994. return mlx5_rdma_netdev_free(netdev);
  2995. }
  2996. static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
  2997. {
  2998. struct mlx5_ib_dev *dev;
  2999. enum rdma_link_layer ll;
  3000. int port_type_cap;
  3001. const char *name;
  3002. int err;
  3003. int i;
  3004. port_type_cap = MLX5_CAP_GEN(mdev, port_type);
  3005. ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  3006. printk_once(KERN_INFO "%s", mlx5_version);
  3007. dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
  3008. if (!dev)
  3009. return NULL;
  3010. dev->mdev = mdev;
  3011. dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
  3012. GFP_KERNEL);
  3013. if (!dev->port)
  3014. goto err_dealloc;
  3015. rwlock_init(&dev->roce.netdev_lock);
  3016. err = get_port_caps(dev);
  3017. if (err)
  3018. goto err_free_port;
  3019. if (mlx5_use_mad_ifc(dev))
  3020. get_ext_port_caps(dev);
  3021. if (!mlx5_lag_is_active(mdev))
  3022. name = "mlx5_%d";
  3023. else
  3024. name = "mlx5_bond_%d";
  3025. strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
  3026. dev->ib_dev.owner = THIS_MODULE;
  3027. dev->ib_dev.node_type = RDMA_NODE_IB_CA;
  3028. dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
  3029. dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
  3030. dev->ib_dev.phys_port_cnt = dev->num_ports;
  3031. dev->ib_dev.num_comp_vectors =
  3032. dev->mdev->priv.eq_table.num_comp_vectors;
  3033. dev->ib_dev.dev.parent = &mdev->pdev->dev;
  3034. dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
  3035. dev->ib_dev.uverbs_cmd_mask =
  3036. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  3037. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  3038. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  3039. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  3040. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  3041. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  3042. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  3043. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  3044. (1ull << IB_USER_VERBS_CMD_REREG_MR) |
  3045. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  3046. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  3047. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  3048. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  3049. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  3050. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  3051. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  3052. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  3053. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  3054. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  3055. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  3056. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  3057. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  3058. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  3059. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  3060. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  3061. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  3062. dev->ib_dev.uverbs_ex_cmd_mask =
  3063. (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
  3064. (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
  3065. (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
  3066. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
  3067. dev->ib_dev.query_device = mlx5_ib_query_device;
  3068. dev->ib_dev.query_port = mlx5_ib_query_port;
  3069. dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
  3070. if (ll == IB_LINK_LAYER_ETHERNET)
  3071. dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
  3072. dev->ib_dev.query_gid = mlx5_ib_query_gid;
  3073. dev->ib_dev.add_gid = mlx5_ib_add_gid;
  3074. dev->ib_dev.del_gid = mlx5_ib_del_gid;
  3075. dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
  3076. dev->ib_dev.modify_device = mlx5_ib_modify_device;
  3077. dev->ib_dev.modify_port = mlx5_ib_modify_port;
  3078. dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
  3079. dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
  3080. dev->ib_dev.mmap = mlx5_ib_mmap;
  3081. dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
  3082. dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
  3083. dev->ib_dev.create_ah = mlx5_ib_create_ah;
  3084. dev->ib_dev.query_ah = mlx5_ib_query_ah;
  3085. dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
  3086. dev->ib_dev.create_srq = mlx5_ib_create_srq;
  3087. dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
  3088. dev->ib_dev.query_srq = mlx5_ib_query_srq;
  3089. dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
  3090. dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
  3091. dev->ib_dev.create_qp = mlx5_ib_create_qp;
  3092. dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
  3093. dev->ib_dev.query_qp = mlx5_ib_query_qp;
  3094. dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
  3095. dev->ib_dev.post_send = mlx5_ib_post_send;
  3096. dev->ib_dev.post_recv = mlx5_ib_post_recv;
  3097. dev->ib_dev.create_cq = mlx5_ib_create_cq;
  3098. dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
  3099. dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
  3100. dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
  3101. dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
  3102. dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
  3103. dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
  3104. dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
  3105. dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
  3106. dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
  3107. dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
  3108. dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
  3109. dev->ib_dev.process_mad = mlx5_ib_process_mad;
  3110. dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
  3111. dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
  3112. dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
  3113. dev->ib_dev.get_port_immutable = mlx5_port_immutable;
  3114. dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
  3115. dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
  3116. dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
  3117. if (mlx5_core_is_pf(mdev)) {
  3118. dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
  3119. dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
  3120. dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
  3121. dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
  3122. }
  3123. dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
  3124. mlx5_ib_internal_fill_odp_caps(dev);
  3125. if (MLX5_CAP_GEN(mdev, imaicl)) {
  3126. dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
  3127. dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
  3128. dev->ib_dev.uverbs_cmd_mask |=
  3129. (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
  3130. (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
  3131. }
  3132. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
  3133. dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
  3134. dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
  3135. }
  3136. if (MLX5_CAP_GEN(mdev, xrc)) {
  3137. dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
  3138. dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
  3139. dev->ib_dev.uverbs_cmd_mask |=
  3140. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  3141. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  3142. }
  3143. if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
  3144. IB_LINK_LAYER_ETHERNET) {
  3145. dev->ib_dev.create_flow = mlx5_ib_create_flow;
  3146. dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
  3147. dev->ib_dev.create_wq = mlx5_ib_create_wq;
  3148. dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
  3149. dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
  3150. dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
  3151. dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
  3152. dev->ib_dev.uverbs_ex_cmd_mask |=
  3153. (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
  3154. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
  3155. (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
  3156. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
  3157. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
  3158. (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
  3159. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
  3160. }
  3161. err = init_node_data(dev);
  3162. if (err)
  3163. goto err_free_port;
  3164. mutex_init(&dev->flow_db.lock);
  3165. mutex_init(&dev->cap_mask_mutex);
  3166. INIT_LIST_HEAD(&dev->qp_list);
  3167. spin_lock_init(&dev->reset_flow_resource_lock);
  3168. if (ll == IB_LINK_LAYER_ETHERNET) {
  3169. err = mlx5_enable_eth(dev);
  3170. if (err)
  3171. goto err_free_port;
  3172. }
  3173. err = create_dev_resources(&dev->devr);
  3174. if (err)
  3175. goto err_disable_eth;
  3176. err = mlx5_ib_odp_init_one(dev);
  3177. if (err)
  3178. goto err_rsrc;
  3179. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
  3180. err = mlx5_ib_alloc_counters(dev);
  3181. if (err)
  3182. goto err_odp;
  3183. }
  3184. dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
  3185. if (!dev->mdev->priv.uar)
  3186. goto err_cnt;
  3187. err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
  3188. if (err)
  3189. goto err_uar_page;
  3190. err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
  3191. if (err)
  3192. goto err_bfreg;
  3193. err = ib_register_device(&dev->ib_dev, NULL);
  3194. if (err)
  3195. goto err_fp_bfreg;
  3196. err = create_umr_res(dev);
  3197. if (err)
  3198. goto err_dev;
  3199. for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
  3200. err = device_create_file(&dev->ib_dev.dev,
  3201. mlx5_class_attributes[i]);
  3202. if (err)
  3203. goto err_umrc;
  3204. }
  3205. dev->ib_active = true;
  3206. return dev;
  3207. err_umrc:
  3208. destroy_umrc_res(dev);
  3209. err_dev:
  3210. ib_unregister_device(&dev->ib_dev);
  3211. err_fp_bfreg:
  3212. mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
  3213. err_bfreg:
  3214. mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  3215. err_uar_page:
  3216. mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
  3217. err_cnt:
  3218. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
  3219. mlx5_ib_dealloc_counters(dev);
  3220. err_odp:
  3221. mlx5_ib_odp_remove_one(dev);
  3222. err_rsrc:
  3223. destroy_dev_resources(&dev->devr);
  3224. err_disable_eth:
  3225. if (ll == IB_LINK_LAYER_ETHERNET) {
  3226. mlx5_disable_eth(dev);
  3227. mlx5_remove_netdev_notifier(dev);
  3228. }
  3229. err_free_port:
  3230. kfree(dev->port);
  3231. err_dealloc:
  3232. ib_dealloc_device((struct ib_device *)dev);
  3233. return NULL;
  3234. }
  3235. static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
  3236. {
  3237. struct mlx5_ib_dev *dev = context;
  3238. enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
  3239. mlx5_remove_netdev_notifier(dev);
  3240. ib_unregister_device(&dev->ib_dev);
  3241. mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
  3242. mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  3243. mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
  3244. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
  3245. mlx5_ib_dealloc_counters(dev);
  3246. destroy_umrc_res(dev);
  3247. mlx5_ib_odp_remove_one(dev);
  3248. destroy_dev_resources(&dev->devr);
  3249. if (ll == IB_LINK_LAYER_ETHERNET)
  3250. mlx5_disable_eth(dev);
  3251. kfree(dev->port);
  3252. ib_dealloc_device(&dev->ib_dev);
  3253. }
  3254. static struct mlx5_interface mlx5_ib_interface = {
  3255. .add = mlx5_ib_add,
  3256. .remove = mlx5_ib_remove,
  3257. .event = mlx5_ib_event,
  3258. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  3259. .pfault = mlx5_ib_pfault,
  3260. #endif
  3261. .protocol = MLX5_INTERFACE_PROTOCOL_IB,
  3262. };
  3263. static int __init mlx5_ib_init(void)
  3264. {
  3265. int err;
  3266. mlx5_ib_odp_init();
  3267. err = mlx5_register_interface(&mlx5_ib_interface);
  3268. return err;
  3269. }
  3270. static void __exit mlx5_ib_cleanup(void)
  3271. {
  3272. mlx5_unregister_interface(&mlx5_ib_interface);
  3273. }
  3274. module_init(mlx5_ib_init);
  3275. module_exit(mlx5_ib_cleanup);