main.c 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/highmem.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/pci.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/slab.h>
  39. #if defined(CONFIG_X86)
  40. #include <asm/pat.h>
  41. #endif
  42. #include <linux/sched.h>
  43. #include <linux/delay.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_addr.h>
  46. #include <rdma/ib_cache.h>
  47. #include <linux/mlx5/port.h>
  48. #include <linux/mlx5/vport.h>
  49. #include <linux/list.h>
  50. #include <rdma/ib_smi.h>
  51. #include <rdma/ib_umem.h>
  52. #include <linux/in.h>
  53. #include <linux/etherdevice.h>
  54. #include <linux/mlx5/fs.h>
  55. #include "mlx5_ib.h"
  56. #define DRIVER_NAME "mlx5_ib"
  57. #define DRIVER_VERSION "2.2-1"
  58. #define DRIVER_RELDATE "Feb 2014"
  59. MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
  60. MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
  61. MODULE_LICENSE("Dual BSD/GPL");
  62. MODULE_VERSION(DRIVER_VERSION);
  63. static char mlx5_version[] =
  64. DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
  65. DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
  66. enum {
  67. MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
  68. };
  69. static enum rdma_link_layer
  70. mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
  71. {
  72. switch (port_type_cap) {
  73. case MLX5_CAP_PORT_TYPE_IB:
  74. return IB_LINK_LAYER_INFINIBAND;
  75. case MLX5_CAP_PORT_TYPE_ETH:
  76. return IB_LINK_LAYER_ETHERNET;
  77. default:
  78. return IB_LINK_LAYER_UNSPECIFIED;
  79. }
  80. }
  81. static enum rdma_link_layer
  82. mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
  83. {
  84. struct mlx5_ib_dev *dev = to_mdev(device);
  85. int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
  86. return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  87. }
  88. static int mlx5_netdev_event(struct notifier_block *this,
  89. unsigned long event, void *ptr)
  90. {
  91. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  92. struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
  93. roce.nb);
  94. switch (event) {
  95. case NETDEV_REGISTER:
  96. case NETDEV_UNREGISTER:
  97. write_lock(&ibdev->roce.netdev_lock);
  98. if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
  99. ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
  100. NULL : ndev;
  101. write_unlock(&ibdev->roce.netdev_lock);
  102. break;
  103. case NETDEV_UP:
  104. case NETDEV_DOWN: {
  105. struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
  106. struct net_device *upper = NULL;
  107. if (lag_ndev) {
  108. upper = netdev_master_upper_dev_get(lag_ndev);
  109. dev_put(lag_ndev);
  110. }
  111. if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
  112. && ibdev->ib_active) {
  113. struct ib_event ibev = { };
  114. ibev.device = &ibdev->ib_dev;
  115. ibev.event = (event == NETDEV_UP) ?
  116. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  117. ibev.element.port_num = 1;
  118. ib_dispatch_event(&ibev);
  119. }
  120. break;
  121. }
  122. default:
  123. break;
  124. }
  125. return NOTIFY_DONE;
  126. }
  127. static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
  128. u8 port_num)
  129. {
  130. struct mlx5_ib_dev *ibdev = to_mdev(device);
  131. struct net_device *ndev;
  132. ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
  133. if (ndev)
  134. return ndev;
  135. /* Ensure ndev does not disappear before we invoke dev_hold()
  136. */
  137. read_lock(&ibdev->roce.netdev_lock);
  138. ndev = ibdev->roce.netdev;
  139. if (ndev)
  140. dev_hold(ndev);
  141. read_unlock(&ibdev->roce.netdev_lock);
  142. return ndev;
  143. }
  144. static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
  145. struct ib_port_attr *props)
  146. {
  147. struct mlx5_ib_dev *dev = to_mdev(device);
  148. struct net_device *ndev, *upper;
  149. enum ib_mtu ndev_ib_mtu;
  150. u16 qkey_viol_cntr;
  151. /* props being zeroed by the caller, avoid zeroing it here */
  152. props->port_cap_flags |= IB_PORT_CM_SUP;
  153. props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
  154. props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
  155. roce_address_table_size);
  156. props->max_mtu = IB_MTU_4096;
  157. props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
  158. props->pkey_tbl_len = 1;
  159. props->state = IB_PORT_DOWN;
  160. props->phys_state = 3;
  161. mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
  162. props->qkey_viol_cntr = qkey_viol_cntr;
  163. ndev = mlx5_ib_get_netdev(device, port_num);
  164. if (!ndev)
  165. return 0;
  166. if (mlx5_lag_is_active(dev->mdev)) {
  167. rcu_read_lock();
  168. upper = netdev_master_upper_dev_get_rcu(ndev);
  169. if (upper) {
  170. dev_put(ndev);
  171. ndev = upper;
  172. dev_hold(ndev);
  173. }
  174. rcu_read_unlock();
  175. }
  176. if (netif_running(ndev) && netif_carrier_ok(ndev)) {
  177. props->state = IB_PORT_ACTIVE;
  178. props->phys_state = 5;
  179. }
  180. ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
  181. dev_put(ndev);
  182. props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
  183. props->active_width = IB_WIDTH_4X; /* TODO */
  184. props->active_speed = IB_SPEED_QDR; /* TODO */
  185. return 0;
  186. }
  187. static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
  188. const struct ib_gid_attr *attr,
  189. void *mlx5_addr)
  190. {
  191. #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
  192. char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
  193. source_l3_address);
  194. void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
  195. source_mac_47_32);
  196. if (!gid)
  197. return;
  198. ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
  199. if (is_vlan_dev(attr->ndev)) {
  200. MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
  201. MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
  202. }
  203. switch (attr->gid_type) {
  204. case IB_GID_TYPE_IB:
  205. MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
  206. break;
  207. case IB_GID_TYPE_ROCE_UDP_ENCAP:
  208. MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
  209. break;
  210. default:
  211. WARN_ON(true);
  212. }
  213. if (attr->gid_type != IB_GID_TYPE_IB) {
  214. if (ipv6_addr_v4mapped((void *)gid))
  215. MLX5_SET_RA(mlx5_addr, roce_l3_type,
  216. MLX5_ROCE_L3_TYPE_IPV4);
  217. else
  218. MLX5_SET_RA(mlx5_addr, roce_l3_type,
  219. MLX5_ROCE_L3_TYPE_IPV6);
  220. }
  221. if ((attr->gid_type == IB_GID_TYPE_IB) ||
  222. !ipv6_addr_v4mapped((void *)gid))
  223. memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
  224. else
  225. memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
  226. }
  227. static int set_roce_addr(struct ib_device *device, u8 port_num,
  228. unsigned int index,
  229. const union ib_gid *gid,
  230. const struct ib_gid_attr *attr)
  231. {
  232. struct mlx5_ib_dev *dev = to_mdev(device);
  233. u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
  234. u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
  235. void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
  236. enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
  237. if (ll != IB_LINK_LAYER_ETHERNET)
  238. return -EINVAL;
  239. ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
  240. MLX5_SET(set_roce_address_in, in, roce_address_index, index);
  241. MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
  242. return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  243. }
  244. static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
  245. unsigned int index, const union ib_gid *gid,
  246. const struct ib_gid_attr *attr,
  247. __always_unused void **context)
  248. {
  249. return set_roce_addr(device, port_num, index, gid, attr);
  250. }
  251. static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
  252. unsigned int index, __always_unused void **context)
  253. {
  254. return set_roce_addr(device, port_num, index, NULL, NULL);
  255. }
  256. __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
  257. int index)
  258. {
  259. struct ib_gid_attr attr;
  260. union ib_gid gid;
  261. if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
  262. return 0;
  263. if (!attr.ndev)
  264. return 0;
  265. dev_put(attr.ndev);
  266. if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
  267. return 0;
  268. return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
  269. }
  270. int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
  271. int index, enum ib_gid_type *gid_type)
  272. {
  273. struct ib_gid_attr attr;
  274. union ib_gid gid;
  275. int ret;
  276. ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
  277. if (ret)
  278. return ret;
  279. if (!attr.ndev)
  280. return -ENODEV;
  281. dev_put(attr.ndev);
  282. *gid_type = attr.gid_type;
  283. return 0;
  284. }
  285. static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
  286. {
  287. if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
  288. return !MLX5_CAP_GEN(dev->mdev, ib_virt);
  289. return 0;
  290. }
  291. enum {
  292. MLX5_VPORT_ACCESS_METHOD_MAD,
  293. MLX5_VPORT_ACCESS_METHOD_HCA,
  294. MLX5_VPORT_ACCESS_METHOD_NIC,
  295. };
  296. static int mlx5_get_vport_access_method(struct ib_device *ibdev)
  297. {
  298. if (mlx5_use_mad_ifc(to_mdev(ibdev)))
  299. return MLX5_VPORT_ACCESS_METHOD_MAD;
  300. if (mlx5_ib_port_link_layer(ibdev, 1) ==
  301. IB_LINK_LAYER_ETHERNET)
  302. return MLX5_VPORT_ACCESS_METHOD_NIC;
  303. return MLX5_VPORT_ACCESS_METHOD_HCA;
  304. }
  305. static void get_atomic_caps(struct mlx5_ib_dev *dev,
  306. struct ib_device_attr *props)
  307. {
  308. u8 tmp;
  309. u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
  310. u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
  311. u8 atomic_req_8B_endianness_mode =
  312. MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
  313. /* Check if HW supports 8 bytes standard atomic operations and capable
  314. * of host endianness respond
  315. */
  316. tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
  317. if (((atomic_operations & tmp) == tmp) &&
  318. (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
  319. (atomic_req_8B_endianness_mode)) {
  320. props->atomic_cap = IB_ATOMIC_HCA;
  321. } else {
  322. props->atomic_cap = IB_ATOMIC_NONE;
  323. }
  324. }
  325. static int mlx5_query_system_image_guid(struct ib_device *ibdev,
  326. __be64 *sys_image_guid)
  327. {
  328. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  329. struct mlx5_core_dev *mdev = dev->mdev;
  330. u64 tmp;
  331. int err;
  332. switch (mlx5_get_vport_access_method(ibdev)) {
  333. case MLX5_VPORT_ACCESS_METHOD_MAD:
  334. return mlx5_query_mad_ifc_system_image_guid(ibdev,
  335. sys_image_guid);
  336. case MLX5_VPORT_ACCESS_METHOD_HCA:
  337. err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
  338. break;
  339. case MLX5_VPORT_ACCESS_METHOD_NIC:
  340. err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
  341. break;
  342. default:
  343. return -EINVAL;
  344. }
  345. if (!err)
  346. *sys_image_guid = cpu_to_be64(tmp);
  347. return err;
  348. }
  349. static int mlx5_query_max_pkeys(struct ib_device *ibdev,
  350. u16 *max_pkeys)
  351. {
  352. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  353. struct mlx5_core_dev *mdev = dev->mdev;
  354. switch (mlx5_get_vport_access_method(ibdev)) {
  355. case MLX5_VPORT_ACCESS_METHOD_MAD:
  356. return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
  357. case MLX5_VPORT_ACCESS_METHOD_HCA:
  358. case MLX5_VPORT_ACCESS_METHOD_NIC:
  359. *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
  360. pkey_table_size));
  361. return 0;
  362. default:
  363. return -EINVAL;
  364. }
  365. }
  366. static int mlx5_query_vendor_id(struct ib_device *ibdev,
  367. u32 *vendor_id)
  368. {
  369. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  370. switch (mlx5_get_vport_access_method(ibdev)) {
  371. case MLX5_VPORT_ACCESS_METHOD_MAD:
  372. return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
  373. case MLX5_VPORT_ACCESS_METHOD_HCA:
  374. case MLX5_VPORT_ACCESS_METHOD_NIC:
  375. return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
  376. default:
  377. return -EINVAL;
  378. }
  379. }
  380. static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
  381. __be64 *node_guid)
  382. {
  383. u64 tmp;
  384. int err;
  385. switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
  386. case MLX5_VPORT_ACCESS_METHOD_MAD:
  387. return mlx5_query_mad_ifc_node_guid(dev, node_guid);
  388. case MLX5_VPORT_ACCESS_METHOD_HCA:
  389. err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
  390. break;
  391. case MLX5_VPORT_ACCESS_METHOD_NIC:
  392. err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
  393. break;
  394. default:
  395. return -EINVAL;
  396. }
  397. if (!err)
  398. *node_guid = cpu_to_be64(tmp);
  399. return err;
  400. }
  401. struct mlx5_reg_node_desc {
  402. u8 desc[IB_DEVICE_NODE_DESC_MAX];
  403. };
  404. static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
  405. {
  406. struct mlx5_reg_node_desc in;
  407. if (mlx5_use_mad_ifc(dev))
  408. return mlx5_query_mad_ifc_node_desc(dev, node_desc);
  409. memset(&in, 0, sizeof(in));
  410. return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
  411. sizeof(struct mlx5_reg_node_desc),
  412. MLX5_REG_NODE_DESC, 0, 0);
  413. }
  414. static int mlx5_ib_query_device(struct ib_device *ibdev,
  415. struct ib_device_attr *props,
  416. struct ib_udata *uhw)
  417. {
  418. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  419. struct mlx5_core_dev *mdev = dev->mdev;
  420. int err = -ENOMEM;
  421. int max_sq_desc;
  422. int max_rq_sg;
  423. int max_sq_sg;
  424. u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
  425. struct mlx5_ib_query_device_resp resp = {};
  426. size_t resp_len;
  427. u64 max_tso;
  428. resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
  429. if (uhw->outlen && uhw->outlen < resp_len)
  430. return -EINVAL;
  431. else
  432. resp.response_length = resp_len;
  433. if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
  434. return -EINVAL;
  435. memset(props, 0, sizeof(*props));
  436. err = mlx5_query_system_image_guid(ibdev,
  437. &props->sys_image_guid);
  438. if (err)
  439. return err;
  440. err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
  441. if (err)
  442. return err;
  443. err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
  444. if (err)
  445. return err;
  446. props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
  447. (fw_rev_min(dev->mdev) << 16) |
  448. fw_rev_sub(dev->mdev);
  449. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  450. IB_DEVICE_PORT_ACTIVE_EVENT |
  451. IB_DEVICE_SYS_IMAGE_GUID |
  452. IB_DEVICE_RC_RNR_NAK_GEN;
  453. if (MLX5_CAP_GEN(mdev, pkv))
  454. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  455. if (MLX5_CAP_GEN(mdev, qkv))
  456. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  457. if (MLX5_CAP_GEN(mdev, apm))
  458. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  459. if (MLX5_CAP_GEN(mdev, xrc))
  460. props->device_cap_flags |= IB_DEVICE_XRC;
  461. if (MLX5_CAP_GEN(mdev, imaicl)) {
  462. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
  463. IB_DEVICE_MEM_WINDOW_TYPE_2B;
  464. props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
  465. /* We support 'Gappy' memory registration too */
  466. props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
  467. }
  468. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  469. if (MLX5_CAP_GEN(mdev, sho)) {
  470. props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
  471. /* At this stage no support for signature handover */
  472. props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
  473. IB_PROT_T10DIF_TYPE_2 |
  474. IB_PROT_T10DIF_TYPE_3;
  475. props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
  476. IB_GUARD_T10DIF_CSUM;
  477. }
  478. if (MLX5_CAP_GEN(mdev, block_lb_mc))
  479. props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  480. if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
  481. if (MLX5_CAP_ETH(mdev, csum_cap)) {
  482. /* Legacy bit to support old userspace libraries */
  483. props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
  484. props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
  485. }
  486. if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
  487. props->raw_packet_caps |=
  488. IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
  489. if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
  490. max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
  491. if (max_tso) {
  492. resp.tso_caps.max_tso = 1 << max_tso;
  493. resp.tso_caps.supported_qpts |=
  494. 1 << IB_QPT_RAW_PACKET;
  495. resp.response_length += sizeof(resp.tso_caps);
  496. }
  497. }
  498. if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
  499. resp.rss_caps.rx_hash_function =
  500. MLX5_RX_HASH_FUNC_TOEPLITZ;
  501. resp.rss_caps.rx_hash_fields_mask =
  502. MLX5_RX_HASH_SRC_IPV4 |
  503. MLX5_RX_HASH_DST_IPV4 |
  504. MLX5_RX_HASH_SRC_IPV6 |
  505. MLX5_RX_HASH_DST_IPV6 |
  506. MLX5_RX_HASH_SRC_PORT_TCP |
  507. MLX5_RX_HASH_DST_PORT_TCP |
  508. MLX5_RX_HASH_SRC_PORT_UDP |
  509. MLX5_RX_HASH_DST_PORT_UDP;
  510. resp.response_length += sizeof(resp.rss_caps);
  511. }
  512. } else {
  513. if (field_avail(typeof(resp), tso_caps, uhw->outlen))
  514. resp.response_length += sizeof(resp.tso_caps);
  515. if (field_avail(typeof(resp), rss_caps, uhw->outlen))
  516. resp.response_length += sizeof(resp.rss_caps);
  517. }
  518. if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
  519. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  520. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  521. }
  522. if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
  523. MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
  524. /* Legacy bit to support old userspace libraries */
  525. props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
  526. props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
  527. }
  528. if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
  529. props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
  530. props->vendor_part_id = mdev->pdev->device;
  531. props->hw_ver = mdev->pdev->revision;
  532. props->max_mr_size = ~0ull;
  533. props->page_size_cap = ~(min_page_size - 1);
  534. props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
  535. props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
  536. max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
  537. sizeof(struct mlx5_wqe_data_seg);
  538. max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
  539. max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
  540. sizeof(struct mlx5_wqe_raddr_seg)) /
  541. sizeof(struct mlx5_wqe_data_seg);
  542. props->max_sge = min(max_rq_sg, max_sq_sg);
  543. props->max_sge_rd = MLX5_MAX_SGE_RD;
  544. props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
  545. props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
  546. props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
  547. props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
  548. props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
  549. props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
  550. props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
  551. props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
  552. props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
  553. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  554. props->max_srq_sge = max_rq_sg - 1;
  555. props->max_fast_reg_page_list_len =
  556. 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
  557. get_atomic_caps(dev, props);
  558. props->masked_atomic_cap = IB_ATOMIC_NONE;
  559. props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
  560. props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
  561. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  562. props->max_mcast_grp;
  563. props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
  564. props->max_ah = INT_MAX;
  565. props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
  566. props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
  567. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  568. if (MLX5_CAP_GEN(mdev, pg))
  569. props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
  570. props->odp_caps = dev->odp_caps;
  571. #endif
  572. if (MLX5_CAP_GEN(mdev, cd))
  573. props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
  574. if (!mlx5_core_is_pf(mdev))
  575. props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
  576. if (mlx5_ib_port_link_layer(ibdev, 1) ==
  577. IB_LINK_LAYER_ETHERNET) {
  578. props->rss_caps.max_rwq_indirection_tables =
  579. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
  580. props->rss_caps.max_rwq_indirection_table_size =
  581. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
  582. props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
  583. props->max_wq_type_rq =
  584. 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
  585. }
  586. if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
  587. resp.cqe_comp_caps.max_num =
  588. MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
  589. MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
  590. resp.cqe_comp_caps.supported_format =
  591. MLX5_IB_CQE_RES_FORMAT_HASH |
  592. MLX5_IB_CQE_RES_FORMAT_CSUM;
  593. resp.response_length += sizeof(resp.cqe_comp_caps);
  594. }
  595. if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
  596. if (MLX5_CAP_QOS(mdev, packet_pacing) &&
  597. MLX5_CAP_GEN(mdev, qos)) {
  598. resp.packet_pacing_caps.qp_rate_limit_max =
  599. MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
  600. resp.packet_pacing_caps.qp_rate_limit_min =
  601. MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
  602. resp.packet_pacing_caps.supported_qpts |=
  603. 1 << IB_QPT_RAW_PACKET;
  604. }
  605. resp.response_length += sizeof(resp.packet_pacing_caps);
  606. }
  607. if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
  608. uhw->outlen)) {
  609. resp.mlx5_ib_support_multi_pkt_send_wqes =
  610. MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
  611. resp.response_length +=
  612. sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
  613. }
  614. if (field_avail(typeof(resp), reserved, uhw->outlen))
  615. resp.response_length += sizeof(resp.reserved);
  616. if (uhw->outlen) {
  617. err = ib_copy_to_udata(uhw, &resp, resp.response_length);
  618. if (err)
  619. return err;
  620. }
  621. return 0;
  622. }
  623. enum mlx5_ib_width {
  624. MLX5_IB_WIDTH_1X = 1 << 0,
  625. MLX5_IB_WIDTH_2X = 1 << 1,
  626. MLX5_IB_WIDTH_4X = 1 << 2,
  627. MLX5_IB_WIDTH_8X = 1 << 3,
  628. MLX5_IB_WIDTH_12X = 1 << 4
  629. };
  630. static int translate_active_width(struct ib_device *ibdev, u8 active_width,
  631. u8 *ib_width)
  632. {
  633. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  634. int err = 0;
  635. if (active_width & MLX5_IB_WIDTH_1X) {
  636. *ib_width = IB_WIDTH_1X;
  637. } else if (active_width & MLX5_IB_WIDTH_2X) {
  638. mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
  639. (int)active_width);
  640. err = -EINVAL;
  641. } else if (active_width & MLX5_IB_WIDTH_4X) {
  642. *ib_width = IB_WIDTH_4X;
  643. } else if (active_width & MLX5_IB_WIDTH_8X) {
  644. *ib_width = IB_WIDTH_8X;
  645. } else if (active_width & MLX5_IB_WIDTH_12X) {
  646. *ib_width = IB_WIDTH_12X;
  647. } else {
  648. mlx5_ib_dbg(dev, "Invalid active_width %d\n",
  649. (int)active_width);
  650. err = -EINVAL;
  651. }
  652. return err;
  653. }
  654. static int mlx5_mtu_to_ib_mtu(int mtu)
  655. {
  656. switch (mtu) {
  657. case 256: return 1;
  658. case 512: return 2;
  659. case 1024: return 3;
  660. case 2048: return 4;
  661. case 4096: return 5;
  662. default:
  663. pr_warn("invalid mtu\n");
  664. return -1;
  665. }
  666. }
  667. enum ib_max_vl_num {
  668. __IB_MAX_VL_0 = 1,
  669. __IB_MAX_VL_0_1 = 2,
  670. __IB_MAX_VL_0_3 = 3,
  671. __IB_MAX_VL_0_7 = 4,
  672. __IB_MAX_VL_0_14 = 5,
  673. };
  674. enum mlx5_vl_hw_cap {
  675. MLX5_VL_HW_0 = 1,
  676. MLX5_VL_HW_0_1 = 2,
  677. MLX5_VL_HW_0_2 = 3,
  678. MLX5_VL_HW_0_3 = 4,
  679. MLX5_VL_HW_0_4 = 5,
  680. MLX5_VL_HW_0_5 = 6,
  681. MLX5_VL_HW_0_6 = 7,
  682. MLX5_VL_HW_0_7 = 8,
  683. MLX5_VL_HW_0_14 = 15
  684. };
  685. static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
  686. u8 *max_vl_num)
  687. {
  688. switch (vl_hw_cap) {
  689. case MLX5_VL_HW_0:
  690. *max_vl_num = __IB_MAX_VL_0;
  691. break;
  692. case MLX5_VL_HW_0_1:
  693. *max_vl_num = __IB_MAX_VL_0_1;
  694. break;
  695. case MLX5_VL_HW_0_3:
  696. *max_vl_num = __IB_MAX_VL_0_3;
  697. break;
  698. case MLX5_VL_HW_0_7:
  699. *max_vl_num = __IB_MAX_VL_0_7;
  700. break;
  701. case MLX5_VL_HW_0_14:
  702. *max_vl_num = __IB_MAX_VL_0_14;
  703. break;
  704. default:
  705. return -EINVAL;
  706. }
  707. return 0;
  708. }
  709. static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
  710. struct ib_port_attr *props)
  711. {
  712. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  713. struct mlx5_core_dev *mdev = dev->mdev;
  714. struct mlx5_hca_vport_context *rep;
  715. u16 max_mtu;
  716. u16 oper_mtu;
  717. int err;
  718. u8 ib_link_width_oper;
  719. u8 vl_hw_cap;
  720. rep = kzalloc(sizeof(*rep), GFP_KERNEL);
  721. if (!rep) {
  722. err = -ENOMEM;
  723. goto out;
  724. }
  725. /* props being zeroed by the caller, avoid zeroing it here */
  726. err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
  727. if (err)
  728. goto out;
  729. props->lid = rep->lid;
  730. props->lmc = rep->lmc;
  731. props->sm_lid = rep->sm_lid;
  732. props->sm_sl = rep->sm_sl;
  733. props->state = rep->vport_state;
  734. props->phys_state = rep->port_physical_state;
  735. props->port_cap_flags = rep->cap_mask1;
  736. props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
  737. props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
  738. props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
  739. props->bad_pkey_cntr = rep->pkey_violation_counter;
  740. props->qkey_viol_cntr = rep->qkey_violation_counter;
  741. props->subnet_timeout = rep->subnet_timeout;
  742. props->init_type_reply = rep->init_type_reply;
  743. props->grh_required = rep->grh_required;
  744. err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
  745. if (err)
  746. goto out;
  747. err = translate_active_width(ibdev, ib_link_width_oper,
  748. &props->active_width);
  749. if (err)
  750. goto out;
  751. err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
  752. if (err)
  753. goto out;
  754. mlx5_query_port_max_mtu(mdev, &max_mtu, port);
  755. props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
  756. mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
  757. props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
  758. err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
  759. if (err)
  760. goto out;
  761. err = translate_max_vl_num(ibdev, vl_hw_cap,
  762. &props->max_vl_num);
  763. out:
  764. kfree(rep);
  765. return err;
  766. }
  767. int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
  768. struct ib_port_attr *props)
  769. {
  770. switch (mlx5_get_vport_access_method(ibdev)) {
  771. case MLX5_VPORT_ACCESS_METHOD_MAD:
  772. return mlx5_query_mad_ifc_port(ibdev, port, props);
  773. case MLX5_VPORT_ACCESS_METHOD_HCA:
  774. return mlx5_query_hca_port(ibdev, port, props);
  775. case MLX5_VPORT_ACCESS_METHOD_NIC:
  776. return mlx5_query_port_roce(ibdev, port, props);
  777. default:
  778. return -EINVAL;
  779. }
  780. }
  781. static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  782. union ib_gid *gid)
  783. {
  784. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  785. struct mlx5_core_dev *mdev = dev->mdev;
  786. switch (mlx5_get_vport_access_method(ibdev)) {
  787. case MLX5_VPORT_ACCESS_METHOD_MAD:
  788. return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
  789. case MLX5_VPORT_ACCESS_METHOD_HCA:
  790. return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
  791. default:
  792. return -EINVAL;
  793. }
  794. }
  795. static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  796. u16 *pkey)
  797. {
  798. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  799. struct mlx5_core_dev *mdev = dev->mdev;
  800. switch (mlx5_get_vport_access_method(ibdev)) {
  801. case MLX5_VPORT_ACCESS_METHOD_MAD:
  802. return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
  803. case MLX5_VPORT_ACCESS_METHOD_HCA:
  804. case MLX5_VPORT_ACCESS_METHOD_NIC:
  805. return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
  806. pkey);
  807. default:
  808. return -EINVAL;
  809. }
  810. }
  811. static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
  812. struct ib_device_modify *props)
  813. {
  814. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  815. struct mlx5_reg_node_desc in;
  816. struct mlx5_reg_node_desc out;
  817. int err;
  818. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  819. return -EOPNOTSUPP;
  820. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  821. return 0;
  822. /*
  823. * If possible, pass node desc to FW, so it can generate
  824. * a 144 trap. If cmd fails, just ignore.
  825. */
  826. memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
  827. err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
  828. sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
  829. if (err)
  830. return err;
  831. memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
  832. return err;
  833. }
  834. static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
  835. u32 value)
  836. {
  837. struct mlx5_hca_vport_context ctx = {};
  838. int err;
  839. err = mlx5_query_hca_vport_context(dev->mdev, 0,
  840. port_num, 0, &ctx);
  841. if (err)
  842. return err;
  843. if (~ctx.cap_mask1_perm & mask) {
  844. mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
  845. mask, ctx.cap_mask1_perm);
  846. return -EINVAL;
  847. }
  848. ctx.cap_mask1 = value;
  849. ctx.cap_mask1_perm = mask;
  850. err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
  851. port_num, 0, &ctx);
  852. return err;
  853. }
  854. static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  855. struct ib_port_modify *props)
  856. {
  857. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  858. struct ib_port_attr attr;
  859. u32 tmp;
  860. int err;
  861. u32 change_mask;
  862. u32 value;
  863. bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
  864. IB_LINK_LAYER_INFINIBAND);
  865. if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
  866. change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
  867. value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
  868. return set_port_caps_atomic(dev, port, change_mask, value);
  869. }
  870. mutex_lock(&dev->cap_mask_mutex);
  871. err = ib_query_port(ibdev, port, &attr);
  872. if (err)
  873. goto out;
  874. tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
  875. ~props->clr_port_cap_mask;
  876. err = mlx5_set_port_caps(dev->mdev, port, tmp);
  877. out:
  878. mutex_unlock(&dev->cap_mask_mutex);
  879. return err;
  880. }
  881. static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
  882. {
  883. mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
  884. caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
  885. }
  886. static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
  887. struct mlx5_ib_alloc_ucontext_req_v2 *req,
  888. u32 *num_sys_pages)
  889. {
  890. int uars_per_sys_page;
  891. int bfregs_per_sys_page;
  892. int ref_bfregs = req->total_num_bfregs;
  893. if (req->total_num_bfregs == 0)
  894. return -EINVAL;
  895. BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
  896. BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
  897. if (req->total_num_bfregs > MLX5_MAX_BFREGS)
  898. return -ENOMEM;
  899. uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
  900. bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
  901. req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
  902. *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
  903. if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
  904. return -EINVAL;
  905. mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
  906. MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
  907. lib_uar_4k ? "yes" : "no", ref_bfregs,
  908. req->total_num_bfregs, *num_sys_pages);
  909. return 0;
  910. }
  911. static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  912. {
  913. struct mlx5_bfreg_info *bfregi;
  914. int err;
  915. int i;
  916. bfregi = &context->bfregi;
  917. for (i = 0; i < bfregi->num_sys_pages; i++) {
  918. err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
  919. if (err)
  920. goto error;
  921. mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
  922. }
  923. return 0;
  924. error:
  925. for (--i; i >= 0; i--)
  926. if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
  927. mlx5_ib_warn(dev, "failed to free uar %d\n", i);
  928. return err;
  929. }
  930. static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  931. {
  932. struct mlx5_bfreg_info *bfregi;
  933. int err;
  934. int i;
  935. bfregi = &context->bfregi;
  936. for (i = 0; i < bfregi->num_sys_pages; i++) {
  937. err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
  938. if (err) {
  939. mlx5_ib_warn(dev, "failed to free uar %d\n", i);
  940. return err;
  941. }
  942. }
  943. return 0;
  944. }
  945. static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
  946. struct ib_udata *udata)
  947. {
  948. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  949. struct mlx5_ib_alloc_ucontext_req_v2 req = {};
  950. struct mlx5_ib_alloc_ucontext_resp resp = {};
  951. struct mlx5_ib_ucontext *context;
  952. struct mlx5_bfreg_info *bfregi;
  953. int ver;
  954. int err;
  955. size_t reqlen;
  956. size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
  957. max_cqe_version);
  958. bool lib_uar_4k;
  959. if (!dev->ib_active)
  960. return ERR_PTR(-EAGAIN);
  961. if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
  962. return ERR_PTR(-EINVAL);
  963. reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
  964. if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
  965. ver = 0;
  966. else if (reqlen >= min_req_v2)
  967. ver = 2;
  968. else
  969. return ERR_PTR(-EINVAL);
  970. err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
  971. if (err)
  972. return ERR_PTR(err);
  973. if (req.flags)
  974. return ERR_PTR(-EINVAL);
  975. if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
  976. return ERR_PTR(-EOPNOTSUPP);
  977. req.total_num_bfregs = ALIGN(req.total_num_bfregs,
  978. MLX5_NON_FP_BFREGS_PER_UAR);
  979. if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
  980. return ERR_PTR(-EINVAL);
  981. resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
  982. if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
  983. resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
  984. resp.cache_line_size = cache_line_size();
  985. resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
  986. resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
  987. resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
  988. resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
  989. resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
  990. resp.cqe_version = min_t(__u8,
  991. (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
  992. req.max_cqe_version);
  993. resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  994. MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
  995. resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  996. MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
  997. resp.response_length = min(offsetof(typeof(resp), response_length) +
  998. sizeof(resp.response_length), udata->outlen);
  999. context = kzalloc(sizeof(*context), GFP_KERNEL);
  1000. if (!context)
  1001. return ERR_PTR(-ENOMEM);
  1002. lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
  1003. bfregi = &context->bfregi;
  1004. /* updates req->total_num_bfregs */
  1005. err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
  1006. if (err)
  1007. goto out_ctx;
  1008. mutex_init(&bfregi->lock);
  1009. bfregi->lib_uar_4k = lib_uar_4k;
  1010. bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
  1011. GFP_KERNEL);
  1012. if (!bfregi->count) {
  1013. err = -ENOMEM;
  1014. goto out_ctx;
  1015. }
  1016. bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
  1017. sizeof(*bfregi->sys_pages),
  1018. GFP_KERNEL);
  1019. if (!bfregi->sys_pages) {
  1020. err = -ENOMEM;
  1021. goto out_count;
  1022. }
  1023. err = allocate_uars(dev, context);
  1024. if (err)
  1025. goto out_sys_pages;
  1026. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1027. context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
  1028. #endif
  1029. context->upd_xlt_page = __get_free_page(GFP_KERNEL);
  1030. if (!context->upd_xlt_page) {
  1031. err = -ENOMEM;
  1032. goto out_uars;
  1033. }
  1034. mutex_init(&context->upd_xlt_page_mutex);
  1035. if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
  1036. err = mlx5_core_alloc_transport_domain(dev->mdev,
  1037. &context->tdn);
  1038. if (err)
  1039. goto out_page;
  1040. }
  1041. INIT_LIST_HEAD(&context->vma_private_list);
  1042. INIT_LIST_HEAD(&context->db_page_list);
  1043. mutex_init(&context->db_page_mutex);
  1044. resp.tot_bfregs = req.total_num_bfregs;
  1045. resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
  1046. if (field_avail(typeof(resp), cqe_version, udata->outlen))
  1047. resp.response_length += sizeof(resp.cqe_version);
  1048. if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
  1049. resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
  1050. MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
  1051. resp.response_length += sizeof(resp.cmds_supp_uhw);
  1052. }
  1053. /*
  1054. * We don't want to expose information from the PCI bar that is located
  1055. * after 4096 bytes, so if the arch only supports larger pages, let's
  1056. * pretend we don't support reading the HCA's core clock. This is also
  1057. * forced by mmap function.
  1058. */
  1059. if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
  1060. if (PAGE_SIZE <= 4096) {
  1061. resp.comp_mask |=
  1062. MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
  1063. resp.hca_core_clock_offset =
  1064. offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
  1065. }
  1066. resp.response_length += sizeof(resp.hca_core_clock_offset) +
  1067. sizeof(resp.reserved2);
  1068. }
  1069. if (field_avail(typeof(resp), log_uar_size, udata->outlen))
  1070. resp.response_length += sizeof(resp.log_uar_size);
  1071. if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
  1072. resp.response_length += sizeof(resp.num_uars_per_page);
  1073. err = ib_copy_to_udata(udata, &resp, resp.response_length);
  1074. if (err)
  1075. goto out_td;
  1076. bfregi->ver = ver;
  1077. bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
  1078. context->cqe_version = resp.cqe_version;
  1079. context->lib_caps = req.lib_caps;
  1080. print_lib_caps(dev, context->lib_caps);
  1081. return &context->ibucontext;
  1082. out_td:
  1083. if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
  1084. mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
  1085. out_page:
  1086. free_page(context->upd_xlt_page);
  1087. out_uars:
  1088. deallocate_uars(dev, context);
  1089. out_sys_pages:
  1090. kfree(bfregi->sys_pages);
  1091. out_count:
  1092. kfree(bfregi->count);
  1093. out_ctx:
  1094. kfree(context);
  1095. return ERR_PTR(err);
  1096. }
  1097. static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  1098. {
  1099. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1100. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  1101. struct mlx5_bfreg_info *bfregi;
  1102. bfregi = &context->bfregi;
  1103. if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
  1104. mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
  1105. free_page(context->upd_xlt_page);
  1106. deallocate_uars(dev, context);
  1107. kfree(bfregi->sys_pages);
  1108. kfree(bfregi->count);
  1109. kfree(context);
  1110. return 0;
  1111. }
  1112. static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
  1113. struct mlx5_bfreg_info *bfregi,
  1114. int idx)
  1115. {
  1116. int fw_uars_per_page;
  1117. fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
  1118. return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
  1119. bfregi->sys_pages[idx] / fw_uars_per_page;
  1120. }
  1121. static int get_command(unsigned long offset)
  1122. {
  1123. return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
  1124. }
  1125. static int get_arg(unsigned long offset)
  1126. {
  1127. return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
  1128. }
  1129. static int get_index(unsigned long offset)
  1130. {
  1131. return get_arg(offset);
  1132. }
  1133. static void mlx5_ib_vma_open(struct vm_area_struct *area)
  1134. {
  1135. /* vma_open is called when a new VMA is created on top of our VMA. This
  1136. * is done through either mremap flow or split_vma (usually due to
  1137. * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
  1138. * as this VMA is strongly hardware related. Therefore we set the
  1139. * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
  1140. * calling us again and trying to do incorrect actions. We assume that
  1141. * the original VMA size is exactly a single page, and therefore all
  1142. * "splitting" operation will not happen to it.
  1143. */
  1144. area->vm_ops = NULL;
  1145. }
  1146. static void mlx5_ib_vma_close(struct vm_area_struct *area)
  1147. {
  1148. struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
  1149. /* It's guaranteed that all VMAs opened on a FD are closed before the
  1150. * file itself is closed, therefore no sync is needed with the regular
  1151. * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
  1152. * However need a sync with accessing the vma as part of
  1153. * mlx5_ib_disassociate_ucontext.
  1154. * The close operation is usually called under mm->mmap_sem except when
  1155. * process is exiting.
  1156. * The exiting case is handled explicitly as part of
  1157. * mlx5_ib_disassociate_ucontext.
  1158. */
  1159. mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
  1160. /* setting the vma context pointer to null in the mlx5_ib driver's
  1161. * private data, to protect a race condition in
  1162. * mlx5_ib_disassociate_ucontext().
  1163. */
  1164. mlx5_ib_vma_priv_data->vma = NULL;
  1165. list_del(&mlx5_ib_vma_priv_data->list);
  1166. kfree(mlx5_ib_vma_priv_data);
  1167. }
  1168. static const struct vm_operations_struct mlx5_ib_vm_ops = {
  1169. .open = mlx5_ib_vma_open,
  1170. .close = mlx5_ib_vma_close
  1171. };
  1172. static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
  1173. struct mlx5_ib_ucontext *ctx)
  1174. {
  1175. struct mlx5_ib_vma_private_data *vma_prv;
  1176. struct list_head *vma_head = &ctx->vma_private_list;
  1177. vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
  1178. if (!vma_prv)
  1179. return -ENOMEM;
  1180. vma_prv->vma = vma;
  1181. vma->vm_private_data = vma_prv;
  1182. vma->vm_ops = &mlx5_ib_vm_ops;
  1183. list_add(&vma_prv->list, vma_head);
  1184. return 0;
  1185. }
  1186. static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
  1187. {
  1188. int ret;
  1189. struct vm_area_struct *vma;
  1190. struct mlx5_ib_vma_private_data *vma_private, *n;
  1191. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1192. struct task_struct *owning_process = NULL;
  1193. struct mm_struct *owning_mm = NULL;
  1194. owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
  1195. if (!owning_process)
  1196. return;
  1197. owning_mm = get_task_mm(owning_process);
  1198. if (!owning_mm) {
  1199. pr_info("no mm, disassociate ucontext is pending task termination\n");
  1200. while (1) {
  1201. put_task_struct(owning_process);
  1202. usleep_range(1000, 2000);
  1203. owning_process = get_pid_task(ibcontext->tgid,
  1204. PIDTYPE_PID);
  1205. if (!owning_process ||
  1206. owning_process->state == TASK_DEAD) {
  1207. pr_info("disassociate ucontext done, task was terminated\n");
  1208. /* in case task was dead need to release the
  1209. * task struct.
  1210. */
  1211. if (owning_process)
  1212. put_task_struct(owning_process);
  1213. return;
  1214. }
  1215. }
  1216. }
  1217. /* need to protect from a race on closing the vma as part of
  1218. * mlx5_ib_vma_close.
  1219. */
  1220. down_read(&owning_mm->mmap_sem);
  1221. list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
  1222. list) {
  1223. vma = vma_private->vma;
  1224. ret = zap_vma_ptes(vma, vma->vm_start,
  1225. PAGE_SIZE);
  1226. WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
  1227. /* context going to be destroyed, should
  1228. * not access ops any more.
  1229. */
  1230. vma->vm_ops = NULL;
  1231. list_del(&vma_private->list);
  1232. kfree(vma_private);
  1233. }
  1234. up_read(&owning_mm->mmap_sem);
  1235. mmput(owning_mm);
  1236. put_task_struct(owning_process);
  1237. }
  1238. static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
  1239. {
  1240. switch (cmd) {
  1241. case MLX5_IB_MMAP_WC_PAGE:
  1242. return "WC";
  1243. case MLX5_IB_MMAP_REGULAR_PAGE:
  1244. return "best effort WC";
  1245. case MLX5_IB_MMAP_NC_PAGE:
  1246. return "NC";
  1247. default:
  1248. return NULL;
  1249. }
  1250. }
  1251. static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
  1252. struct vm_area_struct *vma,
  1253. struct mlx5_ib_ucontext *context)
  1254. {
  1255. struct mlx5_bfreg_info *bfregi = &context->bfregi;
  1256. int err;
  1257. unsigned long idx;
  1258. phys_addr_t pfn, pa;
  1259. pgprot_t prot;
  1260. int uars_per_page;
  1261. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1262. return -EINVAL;
  1263. uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
  1264. idx = get_index(vma->vm_pgoff);
  1265. if (idx % uars_per_page ||
  1266. idx * uars_per_page >= bfregi->num_sys_pages) {
  1267. mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
  1268. return -EINVAL;
  1269. }
  1270. switch (cmd) {
  1271. case MLX5_IB_MMAP_WC_PAGE:
  1272. /* Some architectures don't support WC memory */
  1273. #if defined(CONFIG_X86)
  1274. if (!pat_enabled())
  1275. return -EPERM;
  1276. #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
  1277. return -EPERM;
  1278. #endif
  1279. /* fall through */
  1280. case MLX5_IB_MMAP_REGULAR_PAGE:
  1281. /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
  1282. prot = pgprot_writecombine(vma->vm_page_prot);
  1283. break;
  1284. case MLX5_IB_MMAP_NC_PAGE:
  1285. prot = pgprot_noncached(vma->vm_page_prot);
  1286. break;
  1287. default:
  1288. return -EINVAL;
  1289. }
  1290. pfn = uar_index2pfn(dev, bfregi, idx);
  1291. mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
  1292. vma->vm_page_prot = prot;
  1293. err = io_remap_pfn_range(vma, vma->vm_start, pfn,
  1294. PAGE_SIZE, vma->vm_page_prot);
  1295. if (err) {
  1296. mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
  1297. err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
  1298. return -EAGAIN;
  1299. }
  1300. pa = pfn << PAGE_SHIFT;
  1301. mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
  1302. vma->vm_start, &pa);
  1303. return mlx5_ib_set_vma_data(vma, context);
  1304. }
  1305. static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
  1306. {
  1307. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  1308. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  1309. unsigned long command;
  1310. phys_addr_t pfn;
  1311. command = get_command(vma->vm_pgoff);
  1312. switch (command) {
  1313. case MLX5_IB_MMAP_WC_PAGE:
  1314. case MLX5_IB_MMAP_NC_PAGE:
  1315. case MLX5_IB_MMAP_REGULAR_PAGE:
  1316. return uar_mmap(dev, command, vma, context);
  1317. case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
  1318. return -ENOSYS;
  1319. case MLX5_IB_MMAP_CORE_CLOCK:
  1320. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  1321. return -EINVAL;
  1322. if (vma->vm_flags & VM_WRITE)
  1323. return -EPERM;
  1324. /* Don't expose to user-space information it shouldn't have */
  1325. if (PAGE_SIZE > 4096)
  1326. return -EOPNOTSUPP;
  1327. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1328. pfn = (dev->mdev->iseg_base +
  1329. offsetof(struct mlx5_init_seg, internal_timer_h)) >>
  1330. PAGE_SHIFT;
  1331. if (io_remap_pfn_range(vma, vma->vm_start, pfn,
  1332. PAGE_SIZE, vma->vm_page_prot))
  1333. return -EAGAIN;
  1334. mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
  1335. vma->vm_start,
  1336. (unsigned long long)pfn << PAGE_SHIFT);
  1337. break;
  1338. default:
  1339. return -EINVAL;
  1340. }
  1341. return 0;
  1342. }
  1343. static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
  1344. struct ib_ucontext *context,
  1345. struct ib_udata *udata)
  1346. {
  1347. struct mlx5_ib_alloc_pd_resp resp;
  1348. struct mlx5_ib_pd *pd;
  1349. int err;
  1350. pd = kmalloc(sizeof(*pd), GFP_KERNEL);
  1351. if (!pd)
  1352. return ERR_PTR(-ENOMEM);
  1353. err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
  1354. if (err) {
  1355. kfree(pd);
  1356. return ERR_PTR(err);
  1357. }
  1358. if (context) {
  1359. resp.pdn = pd->pdn;
  1360. if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
  1361. mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
  1362. kfree(pd);
  1363. return ERR_PTR(-EFAULT);
  1364. }
  1365. }
  1366. return &pd->ibpd;
  1367. }
  1368. static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
  1369. {
  1370. struct mlx5_ib_dev *mdev = to_mdev(pd->device);
  1371. struct mlx5_ib_pd *mpd = to_mpd(pd);
  1372. mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
  1373. kfree(mpd);
  1374. return 0;
  1375. }
  1376. enum {
  1377. MATCH_CRITERIA_ENABLE_OUTER_BIT,
  1378. MATCH_CRITERIA_ENABLE_MISC_BIT,
  1379. MATCH_CRITERIA_ENABLE_INNER_BIT
  1380. };
  1381. #define HEADER_IS_ZERO(match_criteria, headers) \
  1382. !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
  1383. 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
  1384. static u8 get_match_criteria_enable(u32 *match_criteria)
  1385. {
  1386. u8 match_criteria_enable;
  1387. match_criteria_enable =
  1388. (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
  1389. MATCH_CRITERIA_ENABLE_OUTER_BIT;
  1390. match_criteria_enable |=
  1391. (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
  1392. MATCH_CRITERIA_ENABLE_MISC_BIT;
  1393. match_criteria_enable |=
  1394. (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
  1395. MATCH_CRITERIA_ENABLE_INNER_BIT;
  1396. return match_criteria_enable;
  1397. }
  1398. static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
  1399. {
  1400. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
  1401. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
  1402. }
  1403. static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
  1404. bool inner)
  1405. {
  1406. if (inner) {
  1407. MLX5_SET(fte_match_set_misc,
  1408. misc_c, inner_ipv6_flow_label, mask);
  1409. MLX5_SET(fte_match_set_misc,
  1410. misc_v, inner_ipv6_flow_label, val);
  1411. } else {
  1412. MLX5_SET(fte_match_set_misc,
  1413. misc_c, outer_ipv6_flow_label, mask);
  1414. MLX5_SET(fte_match_set_misc,
  1415. misc_v, outer_ipv6_flow_label, val);
  1416. }
  1417. }
  1418. static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
  1419. {
  1420. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
  1421. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
  1422. MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
  1423. MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
  1424. }
  1425. #define LAST_ETH_FIELD vlan_tag
  1426. #define LAST_IB_FIELD sl
  1427. #define LAST_IPV4_FIELD tos
  1428. #define LAST_IPV6_FIELD traffic_class
  1429. #define LAST_TCP_UDP_FIELD src_port
  1430. #define LAST_TUNNEL_FIELD tunnel_id
  1431. #define LAST_FLOW_TAG_FIELD tag_id
  1432. /* Field is the last supported field */
  1433. #define FIELDS_NOT_SUPPORTED(filter, field)\
  1434. memchr_inv((void *)&filter.field +\
  1435. sizeof(filter.field), 0,\
  1436. sizeof(filter) -\
  1437. offsetof(typeof(filter), field) -\
  1438. sizeof(filter.field))
  1439. static int parse_flow_attr(u32 *match_c, u32 *match_v,
  1440. const union ib_flow_spec *ib_spec, u32 *tag_id)
  1441. {
  1442. void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
  1443. misc_parameters);
  1444. void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
  1445. misc_parameters);
  1446. void *headers_c;
  1447. void *headers_v;
  1448. if (ib_spec->type & IB_FLOW_SPEC_INNER) {
  1449. headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
  1450. inner_headers);
  1451. headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
  1452. inner_headers);
  1453. } else {
  1454. headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
  1455. outer_headers);
  1456. headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
  1457. outer_headers);
  1458. }
  1459. switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
  1460. case IB_FLOW_SPEC_ETH:
  1461. if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
  1462. return -EOPNOTSUPP;
  1463. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1464. dmac_47_16),
  1465. ib_spec->eth.mask.dst_mac);
  1466. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1467. dmac_47_16),
  1468. ib_spec->eth.val.dst_mac);
  1469. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1470. smac_47_16),
  1471. ib_spec->eth.mask.src_mac);
  1472. ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1473. smac_47_16),
  1474. ib_spec->eth.val.src_mac);
  1475. if (ib_spec->eth.mask.vlan_tag) {
  1476. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1477. vlan_tag, 1);
  1478. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1479. vlan_tag, 1);
  1480. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1481. first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
  1482. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1483. first_vid, ntohs(ib_spec->eth.val.vlan_tag));
  1484. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1485. first_cfi,
  1486. ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
  1487. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1488. first_cfi,
  1489. ntohs(ib_spec->eth.val.vlan_tag) >> 12);
  1490. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1491. first_prio,
  1492. ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
  1493. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1494. first_prio,
  1495. ntohs(ib_spec->eth.val.vlan_tag) >> 13);
  1496. }
  1497. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1498. ethertype, ntohs(ib_spec->eth.mask.ether_type));
  1499. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1500. ethertype, ntohs(ib_spec->eth.val.ether_type));
  1501. break;
  1502. case IB_FLOW_SPEC_IPV4:
  1503. if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
  1504. return -EOPNOTSUPP;
  1505. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1506. ethertype, 0xffff);
  1507. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1508. ethertype, ETH_P_IP);
  1509. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1510. src_ipv4_src_ipv6.ipv4_layout.ipv4),
  1511. &ib_spec->ipv4.mask.src_ip,
  1512. sizeof(ib_spec->ipv4.mask.src_ip));
  1513. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1514. src_ipv4_src_ipv6.ipv4_layout.ipv4),
  1515. &ib_spec->ipv4.val.src_ip,
  1516. sizeof(ib_spec->ipv4.val.src_ip));
  1517. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1518. dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
  1519. &ib_spec->ipv4.mask.dst_ip,
  1520. sizeof(ib_spec->ipv4.mask.dst_ip));
  1521. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1522. dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
  1523. &ib_spec->ipv4.val.dst_ip,
  1524. sizeof(ib_spec->ipv4.val.dst_ip));
  1525. set_tos(headers_c, headers_v,
  1526. ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
  1527. set_proto(headers_c, headers_v,
  1528. ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
  1529. break;
  1530. case IB_FLOW_SPEC_IPV6:
  1531. if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
  1532. return -EOPNOTSUPP;
  1533. MLX5_SET(fte_match_set_lyr_2_4, headers_c,
  1534. ethertype, 0xffff);
  1535. MLX5_SET(fte_match_set_lyr_2_4, headers_v,
  1536. ethertype, ETH_P_IPV6);
  1537. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1538. src_ipv4_src_ipv6.ipv6_layout.ipv6),
  1539. &ib_spec->ipv6.mask.src_ip,
  1540. sizeof(ib_spec->ipv6.mask.src_ip));
  1541. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1542. src_ipv4_src_ipv6.ipv6_layout.ipv6),
  1543. &ib_spec->ipv6.val.src_ip,
  1544. sizeof(ib_spec->ipv6.val.src_ip));
  1545. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
  1546. dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
  1547. &ib_spec->ipv6.mask.dst_ip,
  1548. sizeof(ib_spec->ipv6.mask.dst_ip));
  1549. memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
  1550. dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
  1551. &ib_spec->ipv6.val.dst_ip,
  1552. sizeof(ib_spec->ipv6.val.dst_ip));
  1553. set_tos(headers_c, headers_v,
  1554. ib_spec->ipv6.mask.traffic_class,
  1555. ib_spec->ipv6.val.traffic_class);
  1556. set_proto(headers_c, headers_v,
  1557. ib_spec->ipv6.mask.next_hdr,
  1558. ib_spec->ipv6.val.next_hdr);
  1559. set_flow_label(misc_params_c, misc_params_v,
  1560. ntohl(ib_spec->ipv6.mask.flow_label),
  1561. ntohl(ib_spec->ipv6.val.flow_label),
  1562. ib_spec->type & IB_FLOW_SPEC_INNER);
  1563. break;
  1564. case IB_FLOW_SPEC_TCP:
  1565. if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
  1566. LAST_TCP_UDP_FIELD))
  1567. return -EOPNOTSUPP;
  1568. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  1569. 0xff);
  1570. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  1571. IPPROTO_TCP);
  1572. MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
  1573. ntohs(ib_spec->tcp_udp.mask.src_port));
  1574. MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
  1575. ntohs(ib_spec->tcp_udp.val.src_port));
  1576. MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
  1577. ntohs(ib_spec->tcp_udp.mask.dst_port));
  1578. MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
  1579. ntohs(ib_spec->tcp_udp.val.dst_port));
  1580. break;
  1581. case IB_FLOW_SPEC_UDP:
  1582. if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
  1583. LAST_TCP_UDP_FIELD))
  1584. return -EOPNOTSUPP;
  1585. MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
  1586. 0xff);
  1587. MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
  1588. IPPROTO_UDP);
  1589. MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
  1590. ntohs(ib_spec->tcp_udp.mask.src_port));
  1591. MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
  1592. ntohs(ib_spec->tcp_udp.val.src_port));
  1593. MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
  1594. ntohs(ib_spec->tcp_udp.mask.dst_port));
  1595. MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
  1596. ntohs(ib_spec->tcp_udp.val.dst_port));
  1597. break;
  1598. case IB_FLOW_SPEC_VXLAN_TUNNEL:
  1599. if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
  1600. LAST_TUNNEL_FIELD))
  1601. return -EOPNOTSUPP;
  1602. MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
  1603. ntohl(ib_spec->tunnel.mask.tunnel_id));
  1604. MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
  1605. ntohl(ib_spec->tunnel.val.tunnel_id));
  1606. break;
  1607. case IB_FLOW_SPEC_ACTION_TAG:
  1608. if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
  1609. LAST_FLOW_TAG_FIELD))
  1610. return -EOPNOTSUPP;
  1611. if (ib_spec->flow_tag.tag_id >= BIT(24))
  1612. return -EINVAL;
  1613. *tag_id = ib_spec->flow_tag.tag_id;
  1614. break;
  1615. default:
  1616. return -EINVAL;
  1617. }
  1618. return 0;
  1619. }
  1620. /* If a flow could catch both multicast and unicast packets,
  1621. * it won't fall into the multicast flow steering table and this rule
  1622. * could steal other multicast packets.
  1623. */
  1624. static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
  1625. {
  1626. struct ib_flow_spec_eth *eth_spec;
  1627. if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
  1628. ib_attr->size < sizeof(struct ib_flow_attr) +
  1629. sizeof(struct ib_flow_spec_eth) ||
  1630. ib_attr->num_of_specs < 1)
  1631. return false;
  1632. eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
  1633. if (eth_spec->type != IB_FLOW_SPEC_ETH ||
  1634. eth_spec->size != sizeof(*eth_spec))
  1635. return false;
  1636. return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
  1637. is_multicast_ether_addr(eth_spec->val.dst_mac);
  1638. }
  1639. static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
  1640. {
  1641. union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
  1642. bool has_ipv4_spec = false;
  1643. bool eth_type_ipv4 = true;
  1644. unsigned int spec_index;
  1645. /* Validate that ethertype is correct */
  1646. for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
  1647. if (ib_spec->type == IB_FLOW_SPEC_ETH &&
  1648. ib_spec->eth.mask.ether_type) {
  1649. if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
  1650. ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
  1651. eth_type_ipv4 = false;
  1652. } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
  1653. has_ipv4_spec = true;
  1654. }
  1655. ib_spec = (void *)ib_spec + ib_spec->size;
  1656. }
  1657. return !has_ipv4_spec || eth_type_ipv4;
  1658. }
  1659. static void put_flow_table(struct mlx5_ib_dev *dev,
  1660. struct mlx5_ib_flow_prio *prio, bool ft_added)
  1661. {
  1662. prio->refcount -= !!ft_added;
  1663. if (!prio->refcount) {
  1664. mlx5_destroy_flow_table(prio->flow_table);
  1665. prio->flow_table = NULL;
  1666. }
  1667. }
  1668. static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
  1669. {
  1670. struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
  1671. struct mlx5_ib_flow_handler *handler = container_of(flow_id,
  1672. struct mlx5_ib_flow_handler,
  1673. ibflow);
  1674. struct mlx5_ib_flow_handler *iter, *tmp;
  1675. mutex_lock(&dev->flow_db.lock);
  1676. list_for_each_entry_safe(iter, tmp, &handler->list, list) {
  1677. mlx5_del_flow_rules(iter->rule);
  1678. put_flow_table(dev, iter->prio, true);
  1679. list_del(&iter->list);
  1680. kfree(iter);
  1681. }
  1682. mlx5_del_flow_rules(handler->rule);
  1683. put_flow_table(dev, handler->prio, true);
  1684. mutex_unlock(&dev->flow_db.lock);
  1685. kfree(handler);
  1686. return 0;
  1687. }
  1688. static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
  1689. {
  1690. priority *= 2;
  1691. if (!dont_trap)
  1692. priority++;
  1693. return priority;
  1694. }
  1695. enum flow_table_type {
  1696. MLX5_IB_FT_RX,
  1697. MLX5_IB_FT_TX
  1698. };
  1699. #define MLX5_FS_MAX_TYPES 10
  1700. #define MLX5_FS_MAX_ENTRIES 32000UL
  1701. static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
  1702. struct ib_flow_attr *flow_attr,
  1703. enum flow_table_type ft_type)
  1704. {
  1705. bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
  1706. struct mlx5_flow_namespace *ns = NULL;
  1707. struct mlx5_ib_flow_prio *prio;
  1708. struct mlx5_flow_table *ft;
  1709. int num_entries;
  1710. int num_groups;
  1711. int priority;
  1712. int err = 0;
  1713. if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  1714. if (flow_is_multicast_only(flow_attr) &&
  1715. !dont_trap)
  1716. priority = MLX5_IB_FLOW_MCAST_PRIO;
  1717. else
  1718. priority = ib_prio_to_core_prio(flow_attr->priority,
  1719. dont_trap);
  1720. ns = mlx5_get_flow_namespace(dev->mdev,
  1721. MLX5_FLOW_NAMESPACE_BYPASS);
  1722. num_entries = MLX5_FS_MAX_ENTRIES;
  1723. num_groups = MLX5_FS_MAX_TYPES;
  1724. prio = &dev->flow_db.prios[priority];
  1725. } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  1726. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
  1727. ns = mlx5_get_flow_namespace(dev->mdev,
  1728. MLX5_FLOW_NAMESPACE_LEFTOVERS);
  1729. build_leftovers_ft_param(&priority,
  1730. &num_entries,
  1731. &num_groups);
  1732. prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
  1733. } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  1734. if (!MLX5_CAP_FLOWTABLE(dev->mdev,
  1735. allow_sniffer_and_nic_rx_shared_tir))
  1736. return ERR_PTR(-ENOTSUPP);
  1737. ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
  1738. MLX5_FLOW_NAMESPACE_SNIFFER_RX :
  1739. MLX5_FLOW_NAMESPACE_SNIFFER_TX);
  1740. prio = &dev->flow_db.sniffer[ft_type];
  1741. priority = 0;
  1742. num_entries = 1;
  1743. num_groups = 1;
  1744. }
  1745. if (!ns)
  1746. return ERR_PTR(-ENOTSUPP);
  1747. ft = prio->flow_table;
  1748. if (!ft) {
  1749. ft = mlx5_create_auto_grouped_flow_table(ns, priority,
  1750. num_entries,
  1751. num_groups,
  1752. 0, 0);
  1753. if (!IS_ERR(ft)) {
  1754. prio->refcount = 0;
  1755. prio->flow_table = ft;
  1756. } else {
  1757. err = PTR_ERR(ft);
  1758. }
  1759. }
  1760. return err ? ERR_PTR(err) : prio;
  1761. }
  1762. static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
  1763. struct mlx5_ib_flow_prio *ft_prio,
  1764. const struct ib_flow_attr *flow_attr,
  1765. struct mlx5_flow_destination *dst)
  1766. {
  1767. struct mlx5_flow_table *ft = ft_prio->flow_table;
  1768. struct mlx5_ib_flow_handler *handler;
  1769. struct mlx5_flow_act flow_act = {0};
  1770. struct mlx5_flow_spec *spec;
  1771. const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
  1772. unsigned int spec_index;
  1773. u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
  1774. int err = 0;
  1775. if (!is_valid_attr(flow_attr))
  1776. return ERR_PTR(-EINVAL);
  1777. spec = mlx5_vzalloc(sizeof(*spec));
  1778. handler = kzalloc(sizeof(*handler), GFP_KERNEL);
  1779. if (!handler || !spec) {
  1780. err = -ENOMEM;
  1781. goto free;
  1782. }
  1783. INIT_LIST_HEAD(&handler->list);
  1784. for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
  1785. err = parse_flow_attr(spec->match_criteria,
  1786. spec->match_value, ib_flow, &flow_tag);
  1787. if (err < 0)
  1788. goto free;
  1789. ib_flow += ((union ib_flow_spec *)ib_flow)->size;
  1790. }
  1791. spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
  1792. flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
  1793. MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
  1794. if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
  1795. (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  1796. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
  1797. mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
  1798. flow_tag, flow_attr->type);
  1799. err = -EINVAL;
  1800. goto free;
  1801. }
  1802. flow_act.flow_tag = flow_tag;
  1803. handler->rule = mlx5_add_flow_rules(ft, spec,
  1804. &flow_act,
  1805. dst, 1);
  1806. if (IS_ERR(handler->rule)) {
  1807. err = PTR_ERR(handler->rule);
  1808. goto free;
  1809. }
  1810. ft_prio->refcount++;
  1811. handler->prio = ft_prio;
  1812. ft_prio->flow_table = ft;
  1813. free:
  1814. if (err)
  1815. kfree(handler);
  1816. kvfree(spec);
  1817. return err ? ERR_PTR(err) : handler;
  1818. }
  1819. static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
  1820. struct mlx5_ib_flow_prio *ft_prio,
  1821. struct ib_flow_attr *flow_attr,
  1822. struct mlx5_flow_destination *dst)
  1823. {
  1824. struct mlx5_ib_flow_handler *handler_dst = NULL;
  1825. struct mlx5_ib_flow_handler *handler = NULL;
  1826. handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
  1827. if (!IS_ERR(handler)) {
  1828. handler_dst = create_flow_rule(dev, ft_prio,
  1829. flow_attr, dst);
  1830. if (IS_ERR(handler_dst)) {
  1831. mlx5_del_flow_rules(handler->rule);
  1832. ft_prio->refcount--;
  1833. kfree(handler);
  1834. handler = handler_dst;
  1835. } else {
  1836. list_add(&handler_dst->list, &handler->list);
  1837. }
  1838. }
  1839. return handler;
  1840. }
  1841. enum {
  1842. LEFTOVERS_MC,
  1843. LEFTOVERS_UC,
  1844. };
  1845. static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
  1846. struct mlx5_ib_flow_prio *ft_prio,
  1847. struct ib_flow_attr *flow_attr,
  1848. struct mlx5_flow_destination *dst)
  1849. {
  1850. struct mlx5_ib_flow_handler *handler_ucast = NULL;
  1851. struct mlx5_ib_flow_handler *handler = NULL;
  1852. static struct {
  1853. struct ib_flow_attr flow_attr;
  1854. struct ib_flow_spec_eth eth_flow;
  1855. } leftovers_specs[] = {
  1856. [LEFTOVERS_MC] = {
  1857. .flow_attr = {
  1858. .num_of_specs = 1,
  1859. .size = sizeof(leftovers_specs[0])
  1860. },
  1861. .eth_flow = {
  1862. .type = IB_FLOW_SPEC_ETH,
  1863. .size = sizeof(struct ib_flow_spec_eth),
  1864. .mask = {.dst_mac = {0x1} },
  1865. .val = {.dst_mac = {0x1} }
  1866. }
  1867. },
  1868. [LEFTOVERS_UC] = {
  1869. .flow_attr = {
  1870. .num_of_specs = 1,
  1871. .size = sizeof(leftovers_specs[0])
  1872. },
  1873. .eth_flow = {
  1874. .type = IB_FLOW_SPEC_ETH,
  1875. .size = sizeof(struct ib_flow_spec_eth),
  1876. .mask = {.dst_mac = {0x1} },
  1877. .val = {.dst_mac = {} }
  1878. }
  1879. }
  1880. };
  1881. handler = create_flow_rule(dev, ft_prio,
  1882. &leftovers_specs[LEFTOVERS_MC].flow_attr,
  1883. dst);
  1884. if (!IS_ERR(handler) &&
  1885. flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
  1886. handler_ucast = create_flow_rule(dev, ft_prio,
  1887. &leftovers_specs[LEFTOVERS_UC].flow_attr,
  1888. dst);
  1889. if (IS_ERR(handler_ucast)) {
  1890. mlx5_del_flow_rules(handler->rule);
  1891. ft_prio->refcount--;
  1892. kfree(handler);
  1893. handler = handler_ucast;
  1894. } else {
  1895. list_add(&handler_ucast->list, &handler->list);
  1896. }
  1897. }
  1898. return handler;
  1899. }
  1900. static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
  1901. struct mlx5_ib_flow_prio *ft_rx,
  1902. struct mlx5_ib_flow_prio *ft_tx,
  1903. struct mlx5_flow_destination *dst)
  1904. {
  1905. struct mlx5_ib_flow_handler *handler_rx;
  1906. struct mlx5_ib_flow_handler *handler_tx;
  1907. int err;
  1908. static const struct ib_flow_attr flow_attr = {
  1909. .num_of_specs = 0,
  1910. .size = sizeof(flow_attr)
  1911. };
  1912. handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
  1913. if (IS_ERR(handler_rx)) {
  1914. err = PTR_ERR(handler_rx);
  1915. goto err;
  1916. }
  1917. handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
  1918. if (IS_ERR(handler_tx)) {
  1919. err = PTR_ERR(handler_tx);
  1920. goto err_tx;
  1921. }
  1922. list_add(&handler_tx->list, &handler_rx->list);
  1923. return handler_rx;
  1924. err_tx:
  1925. mlx5_del_flow_rules(handler_rx->rule);
  1926. ft_rx->refcount--;
  1927. kfree(handler_rx);
  1928. err:
  1929. return ERR_PTR(err);
  1930. }
  1931. static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
  1932. struct ib_flow_attr *flow_attr,
  1933. int domain)
  1934. {
  1935. struct mlx5_ib_dev *dev = to_mdev(qp->device);
  1936. struct mlx5_ib_qp *mqp = to_mqp(qp);
  1937. struct mlx5_ib_flow_handler *handler = NULL;
  1938. struct mlx5_flow_destination *dst = NULL;
  1939. struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
  1940. struct mlx5_ib_flow_prio *ft_prio;
  1941. int err;
  1942. if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
  1943. return ERR_PTR(-ENOSPC);
  1944. if (domain != IB_FLOW_DOMAIN_USER ||
  1945. flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
  1946. (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
  1947. return ERR_PTR(-EINVAL);
  1948. dst = kzalloc(sizeof(*dst), GFP_KERNEL);
  1949. if (!dst)
  1950. return ERR_PTR(-ENOMEM);
  1951. mutex_lock(&dev->flow_db.lock);
  1952. ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
  1953. if (IS_ERR(ft_prio)) {
  1954. err = PTR_ERR(ft_prio);
  1955. goto unlock;
  1956. }
  1957. if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  1958. ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
  1959. if (IS_ERR(ft_prio_tx)) {
  1960. err = PTR_ERR(ft_prio_tx);
  1961. ft_prio_tx = NULL;
  1962. goto destroy_ft;
  1963. }
  1964. }
  1965. dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  1966. if (mqp->flags & MLX5_IB_QP_RSS)
  1967. dst->tir_num = mqp->rss_qp.tirn;
  1968. else
  1969. dst->tir_num = mqp->raw_packet_qp.rq.tirn;
  1970. if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  1971. if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
  1972. handler = create_dont_trap_rule(dev, ft_prio,
  1973. flow_attr, dst);
  1974. } else {
  1975. handler = create_flow_rule(dev, ft_prio, flow_attr,
  1976. dst);
  1977. }
  1978. } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
  1979. flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
  1980. handler = create_leftovers_rule(dev, ft_prio, flow_attr,
  1981. dst);
  1982. } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
  1983. handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
  1984. } else {
  1985. err = -EINVAL;
  1986. goto destroy_ft;
  1987. }
  1988. if (IS_ERR(handler)) {
  1989. err = PTR_ERR(handler);
  1990. handler = NULL;
  1991. goto destroy_ft;
  1992. }
  1993. mutex_unlock(&dev->flow_db.lock);
  1994. kfree(dst);
  1995. return &handler->ibflow;
  1996. destroy_ft:
  1997. put_flow_table(dev, ft_prio, false);
  1998. if (ft_prio_tx)
  1999. put_flow_table(dev, ft_prio_tx, false);
  2000. unlock:
  2001. mutex_unlock(&dev->flow_db.lock);
  2002. kfree(dst);
  2003. kfree(handler);
  2004. return ERR_PTR(err);
  2005. }
  2006. static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  2007. {
  2008. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  2009. int err;
  2010. err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
  2011. if (err)
  2012. mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
  2013. ibqp->qp_num, gid->raw);
  2014. return err;
  2015. }
  2016. static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  2017. {
  2018. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  2019. int err;
  2020. err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
  2021. if (err)
  2022. mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
  2023. ibqp->qp_num, gid->raw);
  2024. return err;
  2025. }
  2026. static int init_node_data(struct mlx5_ib_dev *dev)
  2027. {
  2028. int err;
  2029. err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
  2030. if (err)
  2031. return err;
  2032. dev->mdev->rev_id = dev->mdev->pdev->revision;
  2033. return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
  2034. }
  2035. static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
  2036. char *buf)
  2037. {
  2038. struct mlx5_ib_dev *dev =
  2039. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2040. return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
  2041. }
  2042. static ssize_t show_reg_pages(struct device *device,
  2043. struct device_attribute *attr, char *buf)
  2044. {
  2045. struct mlx5_ib_dev *dev =
  2046. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2047. return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
  2048. }
  2049. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  2050. char *buf)
  2051. {
  2052. struct mlx5_ib_dev *dev =
  2053. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2054. return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
  2055. }
  2056. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  2057. char *buf)
  2058. {
  2059. struct mlx5_ib_dev *dev =
  2060. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2061. return sprintf(buf, "%x\n", dev->mdev->rev_id);
  2062. }
  2063. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  2064. char *buf)
  2065. {
  2066. struct mlx5_ib_dev *dev =
  2067. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  2068. return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
  2069. dev->mdev->board_id);
  2070. }
  2071. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  2072. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  2073. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  2074. static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
  2075. static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
  2076. static struct device_attribute *mlx5_class_attributes[] = {
  2077. &dev_attr_hw_rev,
  2078. &dev_attr_hca_type,
  2079. &dev_attr_board_id,
  2080. &dev_attr_fw_pages,
  2081. &dev_attr_reg_pages,
  2082. };
  2083. static void pkey_change_handler(struct work_struct *work)
  2084. {
  2085. struct mlx5_ib_port_resources *ports =
  2086. container_of(work, struct mlx5_ib_port_resources,
  2087. pkey_change_work);
  2088. mutex_lock(&ports->devr->mutex);
  2089. mlx5_ib_gsi_pkey_change(ports->gsi);
  2090. mutex_unlock(&ports->devr->mutex);
  2091. }
  2092. static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
  2093. {
  2094. struct mlx5_ib_qp *mqp;
  2095. struct mlx5_ib_cq *send_mcq, *recv_mcq;
  2096. struct mlx5_core_cq *mcq;
  2097. struct list_head cq_armed_list;
  2098. unsigned long flags_qp;
  2099. unsigned long flags_cq;
  2100. unsigned long flags;
  2101. INIT_LIST_HEAD(&cq_armed_list);
  2102. /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
  2103. spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
  2104. list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
  2105. spin_lock_irqsave(&mqp->sq.lock, flags_qp);
  2106. if (mqp->sq.tail != mqp->sq.head) {
  2107. send_mcq = to_mcq(mqp->ibqp.send_cq);
  2108. spin_lock_irqsave(&send_mcq->lock, flags_cq);
  2109. if (send_mcq->mcq.comp &&
  2110. mqp->ibqp.send_cq->comp_handler) {
  2111. if (!send_mcq->mcq.reset_notify_added) {
  2112. send_mcq->mcq.reset_notify_added = 1;
  2113. list_add_tail(&send_mcq->mcq.reset_notify,
  2114. &cq_armed_list);
  2115. }
  2116. }
  2117. spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
  2118. }
  2119. spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
  2120. spin_lock_irqsave(&mqp->rq.lock, flags_qp);
  2121. /* no handling is needed for SRQ */
  2122. if (!mqp->ibqp.srq) {
  2123. if (mqp->rq.tail != mqp->rq.head) {
  2124. recv_mcq = to_mcq(mqp->ibqp.recv_cq);
  2125. spin_lock_irqsave(&recv_mcq->lock, flags_cq);
  2126. if (recv_mcq->mcq.comp &&
  2127. mqp->ibqp.recv_cq->comp_handler) {
  2128. if (!recv_mcq->mcq.reset_notify_added) {
  2129. recv_mcq->mcq.reset_notify_added = 1;
  2130. list_add_tail(&recv_mcq->mcq.reset_notify,
  2131. &cq_armed_list);
  2132. }
  2133. }
  2134. spin_unlock_irqrestore(&recv_mcq->lock,
  2135. flags_cq);
  2136. }
  2137. }
  2138. spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
  2139. }
  2140. /*At that point all inflight post send were put to be executed as of we
  2141. * lock/unlock above locks Now need to arm all involved CQs.
  2142. */
  2143. list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
  2144. mcq->comp(mcq);
  2145. }
  2146. spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
  2147. }
  2148. static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
  2149. enum mlx5_dev_event event, unsigned long param)
  2150. {
  2151. struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
  2152. struct ib_event ibev;
  2153. bool fatal = false;
  2154. u8 port = 0;
  2155. switch (event) {
  2156. case MLX5_DEV_EVENT_SYS_ERROR:
  2157. ibev.event = IB_EVENT_DEVICE_FATAL;
  2158. mlx5_ib_handle_internal_error(ibdev);
  2159. fatal = true;
  2160. break;
  2161. case MLX5_DEV_EVENT_PORT_UP:
  2162. case MLX5_DEV_EVENT_PORT_DOWN:
  2163. case MLX5_DEV_EVENT_PORT_INITIALIZED:
  2164. port = (u8)param;
  2165. /* In RoCE, port up/down events are handled in
  2166. * mlx5_netdev_event().
  2167. */
  2168. if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
  2169. IB_LINK_LAYER_ETHERNET)
  2170. return;
  2171. ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
  2172. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  2173. break;
  2174. case MLX5_DEV_EVENT_LID_CHANGE:
  2175. ibev.event = IB_EVENT_LID_CHANGE;
  2176. port = (u8)param;
  2177. break;
  2178. case MLX5_DEV_EVENT_PKEY_CHANGE:
  2179. ibev.event = IB_EVENT_PKEY_CHANGE;
  2180. port = (u8)param;
  2181. schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
  2182. break;
  2183. case MLX5_DEV_EVENT_GUID_CHANGE:
  2184. ibev.event = IB_EVENT_GID_CHANGE;
  2185. port = (u8)param;
  2186. break;
  2187. case MLX5_DEV_EVENT_CLIENT_REREG:
  2188. ibev.event = IB_EVENT_CLIENT_REREGISTER;
  2189. port = (u8)param;
  2190. break;
  2191. default:
  2192. return;
  2193. }
  2194. ibev.device = &ibdev->ib_dev;
  2195. ibev.element.port_num = port;
  2196. if (port < 1 || port > ibdev->num_ports) {
  2197. mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
  2198. return;
  2199. }
  2200. if (ibdev->ib_active)
  2201. ib_dispatch_event(&ibev);
  2202. if (fatal)
  2203. ibdev->ib_active = false;
  2204. }
  2205. static int set_has_smi_cap(struct mlx5_ib_dev *dev)
  2206. {
  2207. struct mlx5_hca_vport_context vport_ctx;
  2208. int err;
  2209. int port;
  2210. for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
  2211. dev->mdev->port_caps[port - 1].has_smi = false;
  2212. if (MLX5_CAP_GEN(dev->mdev, port_type) ==
  2213. MLX5_CAP_PORT_TYPE_IB) {
  2214. if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
  2215. err = mlx5_query_hca_vport_context(dev->mdev, 0,
  2216. port, 0,
  2217. &vport_ctx);
  2218. if (err) {
  2219. mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
  2220. port, err);
  2221. return err;
  2222. }
  2223. dev->mdev->port_caps[port - 1].has_smi =
  2224. vport_ctx.has_smi;
  2225. } else {
  2226. dev->mdev->port_caps[port - 1].has_smi = true;
  2227. }
  2228. }
  2229. }
  2230. return 0;
  2231. }
  2232. static void get_ext_port_caps(struct mlx5_ib_dev *dev)
  2233. {
  2234. int port;
  2235. for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
  2236. mlx5_query_ext_port_caps(dev, port);
  2237. }
  2238. static int get_port_caps(struct mlx5_ib_dev *dev)
  2239. {
  2240. struct ib_device_attr *dprops = NULL;
  2241. struct ib_port_attr *pprops = NULL;
  2242. int err = -ENOMEM;
  2243. int port;
  2244. struct ib_udata uhw = {.inlen = 0, .outlen = 0};
  2245. pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
  2246. if (!pprops)
  2247. goto out;
  2248. dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
  2249. if (!dprops)
  2250. goto out;
  2251. err = set_has_smi_cap(dev);
  2252. if (err)
  2253. goto out;
  2254. err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
  2255. if (err) {
  2256. mlx5_ib_warn(dev, "query_device failed %d\n", err);
  2257. goto out;
  2258. }
  2259. for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
  2260. memset(pprops, 0, sizeof(*pprops));
  2261. err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
  2262. if (err) {
  2263. mlx5_ib_warn(dev, "query_port %d failed %d\n",
  2264. port, err);
  2265. break;
  2266. }
  2267. dev->mdev->port_caps[port - 1].pkey_table_len =
  2268. dprops->max_pkeys;
  2269. dev->mdev->port_caps[port - 1].gid_table_len =
  2270. pprops->gid_tbl_len;
  2271. mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
  2272. dprops->max_pkeys, pprops->gid_tbl_len);
  2273. }
  2274. out:
  2275. kfree(pprops);
  2276. kfree(dprops);
  2277. return err;
  2278. }
  2279. static void destroy_umrc_res(struct mlx5_ib_dev *dev)
  2280. {
  2281. int err;
  2282. err = mlx5_mr_cache_cleanup(dev);
  2283. if (err)
  2284. mlx5_ib_warn(dev, "mr cache cleanup failed\n");
  2285. mlx5_ib_destroy_qp(dev->umrc.qp);
  2286. ib_free_cq(dev->umrc.cq);
  2287. ib_dealloc_pd(dev->umrc.pd);
  2288. }
  2289. enum {
  2290. MAX_UMR_WR = 128,
  2291. };
  2292. static int create_umr_res(struct mlx5_ib_dev *dev)
  2293. {
  2294. struct ib_qp_init_attr *init_attr = NULL;
  2295. struct ib_qp_attr *attr = NULL;
  2296. struct ib_pd *pd;
  2297. struct ib_cq *cq;
  2298. struct ib_qp *qp;
  2299. int ret;
  2300. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  2301. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  2302. if (!attr || !init_attr) {
  2303. ret = -ENOMEM;
  2304. goto error_0;
  2305. }
  2306. pd = ib_alloc_pd(&dev->ib_dev, 0);
  2307. if (IS_ERR(pd)) {
  2308. mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
  2309. ret = PTR_ERR(pd);
  2310. goto error_0;
  2311. }
  2312. cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
  2313. if (IS_ERR(cq)) {
  2314. mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
  2315. ret = PTR_ERR(cq);
  2316. goto error_2;
  2317. }
  2318. init_attr->send_cq = cq;
  2319. init_attr->recv_cq = cq;
  2320. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  2321. init_attr->cap.max_send_wr = MAX_UMR_WR;
  2322. init_attr->cap.max_send_sge = 1;
  2323. init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
  2324. init_attr->port_num = 1;
  2325. qp = mlx5_ib_create_qp(pd, init_attr, NULL);
  2326. if (IS_ERR(qp)) {
  2327. mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
  2328. ret = PTR_ERR(qp);
  2329. goto error_3;
  2330. }
  2331. qp->device = &dev->ib_dev;
  2332. qp->real_qp = qp;
  2333. qp->uobject = NULL;
  2334. qp->qp_type = MLX5_IB_QPT_REG_UMR;
  2335. attr->qp_state = IB_QPS_INIT;
  2336. attr->port_num = 1;
  2337. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
  2338. IB_QP_PORT, NULL);
  2339. if (ret) {
  2340. mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
  2341. goto error_4;
  2342. }
  2343. memset(attr, 0, sizeof(*attr));
  2344. attr->qp_state = IB_QPS_RTR;
  2345. attr->path_mtu = IB_MTU_256;
  2346. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  2347. if (ret) {
  2348. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
  2349. goto error_4;
  2350. }
  2351. memset(attr, 0, sizeof(*attr));
  2352. attr->qp_state = IB_QPS_RTS;
  2353. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  2354. if (ret) {
  2355. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
  2356. goto error_4;
  2357. }
  2358. dev->umrc.qp = qp;
  2359. dev->umrc.cq = cq;
  2360. dev->umrc.pd = pd;
  2361. sema_init(&dev->umrc.sem, MAX_UMR_WR);
  2362. ret = mlx5_mr_cache_init(dev);
  2363. if (ret) {
  2364. mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
  2365. goto error_4;
  2366. }
  2367. kfree(attr);
  2368. kfree(init_attr);
  2369. return 0;
  2370. error_4:
  2371. mlx5_ib_destroy_qp(qp);
  2372. error_3:
  2373. ib_free_cq(cq);
  2374. error_2:
  2375. ib_dealloc_pd(pd);
  2376. error_0:
  2377. kfree(attr);
  2378. kfree(init_attr);
  2379. return ret;
  2380. }
  2381. static int create_dev_resources(struct mlx5_ib_resources *devr)
  2382. {
  2383. struct ib_srq_init_attr attr;
  2384. struct mlx5_ib_dev *dev;
  2385. struct ib_cq_init_attr cq_attr = {.cqe = 1};
  2386. int port;
  2387. int ret = 0;
  2388. dev = container_of(devr, struct mlx5_ib_dev, devr);
  2389. mutex_init(&devr->mutex);
  2390. devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
  2391. if (IS_ERR(devr->p0)) {
  2392. ret = PTR_ERR(devr->p0);
  2393. goto error0;
  2394. }
  2395. devr->p0->device = &dev->ib_dev;
  2396. devr->p0->uobject = NULL;
  2397. atomic_set(&devr->p0->usecnt, 0);
  2398. devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
  2399. if (IS_ERR(devr->c0)) {
  2400. ret = PTR_ERR(devr->c0);
  2401. goto error1;
  2402. }
  2403. devr->c0->device = &dev->ib_dev;
  2404. devr->c0->uobject = NULL;
  2405. devr->c0->comp_handler = NULL;
  2406. devr->c0->event_handler = NULL;
  2407. devr->c0->cq_context = NULL;
  2408. atomic_set(&devr->c0->usecnt, 0);
  2409. devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  2410. if (IS_ERR(devr->x0)) {
  2411. ret = PTR_ERR(devr->x0);
  2412. goto error2;
  2413. }
  2414. devr->x0->device = &dev->ib_dev;
  2415. devr->x0->inode = NULL;
  2416. atomic_set(&devr->x0->usecnt, 0);
  2417. mutex_init(&devr->x0->tgt_qp_mutex);
  2418. INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
  2419. devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  2420. if (IS_ERR(devr->x1)) {
  2421. ret = PTR_ERR(devr->x1);
  2422. goto error3;
  2423. }
  2424. devr->x1->device = &dev->ib_dev;
  2425. devr->x1->inode = NULL;
  2426. atomic_set(&devr->x1->usecnt, 0);
  2427. mutex_init(&devr->x1->tgt_qp_mutex);
  2428. INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
  2429. memset(&attr, 0, sizeof(attr));
  2430. attr.attr.max_sge = 1;
  2431. attr.attr.max_wr = 1;
  2432. attr.srq_type = IB_SRQT_XRC;
  2433. attr.ext.xrc.cq = devr->c0;
  2434. attr.ext.xrc.xrcd = devr->x0;
  2435. devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  2436. if (IS_ERR(devr->s0)) {
  2437. ret = PTR_ERR(devr->s0);
  2438. goto error4;
  2439. }
  2440. devr->s0->device = &dev->ib_dev;
  2441. devr->s0->pd = devr->p0;
  2442. devr->s0->uobject = NULL;
  2443. devr->s0->event_handler = NULL;
  2444. devr->s0->srq_context = NULL;
  2445. devr->s0->srq_type = IB_SRQT_XRC;
  2446. devr->s0->ext.xrc.xrcd = devr->x0;
  2447. devr->s0->ext.xrc.cq = devr->c0;
  2448. atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
  2449. atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
  2450. atomic_inc(&devr->p0->usecnt);
  2451. atomic_set(&devr->s0->usecnt, 0);
  2452. memset(&attr, 0, sizeof(attr));
  2453. attr.attr.max_sge = 1;
  2454. attr.attr.max_wr = 1;
  2455. attr.srq_type = IB_SRQT_BASIC;
  2456. devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  2457. if (IS_ERR(devr->s1)) {
  2458. ret = PTR_ERR(devr->s1);
  2459. goto error5;
  2460. }
  2461. devr->s1->device = &dev->ib_dev;
  2462. devr->s1->pd = devr->p0;
  2463. devr->s1->uobject = NULL;
  2464. devr->s1->event_handler = NULL;
  2465. devr->s1->srq_context = NULL;
  2466. devr->s1->srq_type = IB_SRQT_BASIC;
  2467. devr->s1->ext.xrc.cq = devr->c0;
  2468. atomic_inc(&devr->p0->usecnt);
  2469. atomic_set(&devr->s0->usecnt, 0);
  2470. for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
  2471. INIT_WORK(&devr->ports[port].pkey_change_work,
  2472. pkey_change_handler);
  2473. devr->ports[port].devr = devr;
  2474. }
  2475. return 0;
  2476. error5:
  2477. mlx5_ib_destroy_srq(devr->s0);
  2478. error4:
  2479. mlx5_ib_dealloc_xrcd(devr->x1);
  2480. error3:
  2481. mlx5_ib_dealloc_xrcd(devr->x0);
  2482. error2:
  2483. mlx5_ib_destroy_cq(devr->c0);
  2484. error1:
  2485. mlx5_ib_dealloc_pd(devr->p0);
  2486. error0:
  2487. return ret;
  2488. }
  2489. static void destroy_dev_resources(struct mlx5_ib_resources *devr)
  2490. {
  2491. struct mlx5_ib_dev *dev =
  2492. container_of(devr, struct mlx5_ib_dev, devr);
  2493. int port;
  2494. mlx5_ib_destroy_srq(devr->s1);
  2495. mlx5_ib_destroy_srq(devr->s0);
  2496. mlx5_ib_dealloc_xrcd(devr->x0);
  2497. mlx5_ib_dealloc_xrcd(devr->x1);
  2498. mlx5_ib_destroy_cq(devr->c0);
  2499. mlx5_ib_dealloc_pd(devr->p0);
  2500. /* Make sure no change P_Key work items are still executing */
  2501. for (port = 0; port < dev->num_ports; ++port)
  2502. cancel_work_sync(&devr->ports[port].pkey_change_work);
  2503. }
  2504. static u32 get_core_cap_flags(struct ib_device *ibdev)
  2505. {
  2506. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2507. enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
  2508. u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
  2509. u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
  2510. u32 ret = 0;
  2511. if (ll == IB_LINK_LAYER_INFINIBAND)
  2512. return RDMA_CORE_PORT_IBA_IB;
  2513. ret = RDMA_CORE_PORT_RAW_PACKET;
  2514. if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
  2515. return ret;
  2516. if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
  2517. return ret;
  2518. if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
  2519. ret |= RDMA_CORE_PORT_IBA_ROCE;
  2520. if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
  2521. ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  2522. return ret;
  2523. }
  2524. static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
  2525. struct ib_port_immutable *immutable)
  2526. {
  2527. struct ib_port_attr attr;
  2528. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2529. enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
  2530. int err;
  2531. immutable->core_cap_flags = get_core_cap_flags(ibdev);
  2532. err = ib_query_port(ibdev, port_num, &attr);
  2533. if (err)
  2534. return err;
  2535. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  2536. immutable->gid_tbl_len = attr.gid_tbl_len;
  2537. immutable->core_cap_flags = get_core_cap_flags(ibdev);
  2538. if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
  2539. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  2540. return 0;
  2541. }
  2542. static void get_dev_fw_str(struct ib_device *ibdev, char *str,
  2543. size_t str_len)
  2544. {
  2545. struct mlx5_ib_dev *dev =
  2546. container_of(ibdev, struct mlx5_ib_dev, ib_dev);
  2547. snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
  2548. fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
  2549. }
  2550. static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
  2551. {
  2552. struct mlx5_core_dev *mdev = dev->mdev;
  2553. struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
  2554. MLX5_FLOW_NAMESPACE_LAG);
  2555. struct mlx5_flow_table *ft;
  2556. int err;
  2557. if (!ns || !mlx5_lag_is_active(mdev))
  2558. return 0;
  2559. err = mlx5_cmd_create_vport_lag(mdev);
  2560. if (err)
  2561. return err;
  2562. ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
  2563. if (IS_ERR(ft)) {
  2564. err = PTR_ERR(ft);
  2565. goto err_destroy_vport_lag;
  2566. }
  2567. dev->flow_db.lag_demux_ft = ft;
  2568. return 0;
  2569. err_destroy_vport_lag:
  2570. mlx5_cmd_destroy_vport_lag(mdev);
  2571. return err;
  2572. }
  2573. static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
  2574. {
  2575. struct mlx5_core_dev *mdev = dev->mdev;
  2576. if (dev->flow_db.lag_demux_ft) {
  2577. mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
  2578. dev->flow_db.lag_demux_ft = NULL;
  2579. mlx5_cmd_destroy_vport_lag(mdev);
  2580. }
  2581. }
  2582. static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
  2583. {
  2584. int err;
  2585. dev->roce.nb.notifier_call = mlx5_netdev_event;
  2586. err = register_netdevice_notifier(&dev->roce.nb);
  2587. if (err) {
  2588. dev->roce.nb.notifier_call = NULL;
  2589. return err;
  2590. }
  2591. return 0;
  2592. }
  2593. static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
  2594. {
  2595. if (dev->roce.nb.notifier_call) {
  2596. unregister_netdevice_notifier(&dev->roce.nb);
  2597. dev->roce.nb.notifier_call = NULL;
  2598. }
  2599. }
  2600. static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
  2601. {
  2602. int err;
  2603. err = mlx5_add_netdev_notifier(dev);
  2604. if (err)
  2605. return err;
  2606. if (MLX5_CAP_GEN(dev->mdev, roce)) {
  2607. err = mlx5_nic_vport_enable_roce(dev->mdev);
  2608. if (err)
  2609. goto err_unregister_netdevice_notifier;
  2610. }
  2611. err = mlx5_eth_lag_init(dev);
  2612. if (err)
  2613. goto err_disable_roce;
  2614. return 0;
  2615. err_disable_roce:
  2616. if (MLX5_CAP_GEN(dev->mdev, roce))
  2617. mlx5_nic_vport_disable_roce(dev->mdev);
  2618. err_unregister_netdevice_notifier:
  2619. mlx5_remove_netdev_notifier(dev);
  2620. return err;
  2621. }
  2622. static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
  2623. {
  2624. mlx5_eth_lag_cleanup(dev);
  2625. if (MLX5_CAP_GEN(dev->mdev, roce))
  2626. mlx5_nic_vport_disable_roce(dev->mdev);
  2627. }
  2628. struct mlx5_ib_q_counter {
  2629. const char *name;
  2630. size_t offset;
  2631. };
  2632. #define INIT_Q_COUNTER(_name) \
  2633. { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
  2634. static const struct mlx5_ib_q_counter basic_q_cnts[] = {
  2635. INIT_Q_COUNTER(rx_write_requests),
  2636. INIT_Q_COUNTER(rx_read_requests),
  2637. INIT_Q_COUNTER(rx_atomic_requests),
  2638. INIT_Q_COUNTER(out_of_buffer),
  2639. };
  2640. static const struct mlx5_ib_q_counter out_of_seq_q_cnts[] = {
  2641. INIT_Q_COUNTER(out_of_sequence),
  2642. };
  2643. static const struct mlx5_ib_q_counter retrans_q_cnts[] = {
  2644. INIT_Q_COUNTER(duplicate_request),
  2645. INIT_Q_COUNTER(rnr_nak_retry_err),
  2646. INIT_Q_COUNTER(packet_seq_err),
  2647. INIT_Q_COUNTER(implied_nak_seq_err),
  2648. INIT_Q_COUNTER(local_ack_timeout_err),
  2649. };
  2650. static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
  2651. {
  2652. unsigned int i;
  2653. for (i = 0; i < dev->num_ports; i++) {
  2654. mlx5_core_dealloc_q_counter(dev->mdev,
  2655. dev->port[i].q_cnts.set_id);
  2656. kfree(dev->port[i].q_cnts.names);
  2657. kfree(dev->port[i].q_cnts.offsets);
  2658. }
  2659. }
  2660. static int __mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev,
  2661. const char ***names,
  2662. size_t **offsets,
  2663. u32 *num)
  2664. {
  2665. u32 num_counters;
  2666. num_counters = ARRAY_SIZE(basic_q_cnts);
  2667. if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
  2668. num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
  2669. if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
  2670. num_counters += ARRAY_SIZE(retrans_q_cnts);
  2671. *names = kcalloc(num_counters, sizeof(**names), GFP_KERNEL);
  2672. if (!*names)
  2673. return -ENOMEM;
  2674. *offsets = kcalloc(num_counters, sizeof(**offsets), GFP_KERNEL);
  2675. if (!*offsets)
  2676. goto err_names;
  2677. *num = num_counters;
  2678. return 0;
  2679. err_names:
  2680. kfree(*names);
  2681. return -ENOMEM;
  2682. }
  2683. static void mlx5_ib_fill_q_counters(struct mlx5_ib_dev *dev,
  2684. const char **names,
  2685. size_t *offsets)
  2686. {
  2687. int i;
  2688. int j = 0;
  2689. for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
  2690. names[j] = basic_q_cnts[i].name;
  2691. offsets[j] = basic_q_cnts[i].offset;
  2692. }
  2693. if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
  2694. for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
  2695. names[j] = out_of_seq_q_cnts[i].name;
  2696. offsets[j] = out_of_seq_q_cnts[i].offset;
  2697. }
  2698. }
  2699. if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
  2700. for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
  2701. names[j] = retrans_q_cnts[i].name;
  2702. offsets[j] = retrans_q_cnts[i].offset;
  2703. }
  2704. }
  2705. }
  2706. static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
  2707. {
  2708. int i;
  2709. int ret;
  2710. for (i = 0; i < dev->num_ports; i++) {
  2711. struct mlx5_ib_port *port = &dev->port[i];
  2712. ret = mlx5_core_alloc_q_counter(dev->mdev,
  2713. &port->q_cnts.set_id);
  2714. if (ret) {
  2715. mlx5_ib_warn(dev,
  2716. "couldn't allocate queue counter for port %d, err %d\n",
  2717. i + 1, ret);
  2718. goto dealloc_counters;
  2719. }
  2720. ret = __mlx5_ib_alloc_q_counters(dev,
  2721. &port->q_cnts.names,
  2722. &port->q_cnts.offsets,
  2723. &port->q_cnts.num_counters);
  2724. if (ret)
  2725. goto dealloc_counters;
  2726. mlx5_ib_fill_q_counters(dev, port->q_cnts.names,
  2727. port->q_cnts.offsets);
  2728. }
  2729. return 0;
  2730. dealloc_counters:
  2731. while (--i >= 0)
  2732. mlx5_core_dealloc_q_counter(dev->mdev,
  2733. dev->port[i].q_cnts.set_id);
  2734. return ret;
  2735. }
  2736. static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
  2737. u8 port_num)
  2738. {
  2739. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2740. struct mlx5_ib_port *port = &dev->port[port_num - 1];
  2741. /* We support only per port stats */
  2742. if (port_num == 0)
  2743. return NULL;
  2744. return rdma_alloc_hw_stats_struct(port->q_cnts.names,
  2745. port->q_cnts.num_counters,
  2746. RDMA_HW_STATS_DEFAULT_LIFESPAN);
  2747. }
  2748. static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
  2749. struct rdma_hw_stats *stats,
  2750. u8 port_num, int index)
  2751. {
  2752. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  2753. struct mlx5_ib_port *port = &dev->port[port_num - 1];
  2754. int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
  2755. void *out;
  2756. __be32 val;
  2757. int ret;
  2758. int i;
  2759. if (!stats)
  2760. return -ENOSYS;
  2761. out = mlx5_vzalloc(outlen);
  2762. if (!out)
  2763. return -ENOMEM;
  2764. ret = mlx5_core_query_q_counter(dev->mdev,
  2765. port->q_cnts.set_id, 0,
  2766. out, outlen);
  2767. if (ret)
  2768. goto free;
  2769. for (i = 0; i < port->q_cnts.num_counters; i++) {
  2770. val = *(__be32 *)(out + port->q_cnts.offsets[i]);
  2771. stats->value[i] = (u64)be32_to_cpu(val);
  2772. }
  2773. free:
  2774. kvfree(out);
  2775. return port->q_cnts.num_counters;
  2776. }
  2777. static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
  2778. {
  2779. struct mlx5_ib_dev *dev;
  2780. enum rdma_link_layer ll;
  2781. int port_type_cap;
  2782. const char *name;
  2783. int err;
  2784. int i;
  2785. port_type_cap = MLX5_CAP_GEN(mdev, port_type);
  2786. ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
  2787. printk_once(KERN_INFO "%s", mlx5_version);
  2788. dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
  2789. if (!dev)
  2790. return NULL;
  2791. dev->mdev = mdev;
  2792. dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
  2793. GFP_KERNEL);
  2794. if (!dev->port)
  2795. goto err_dealloc;
  2796. rwlock_init(&dev->roce.netdev_lock);
  2797. err = get_port_caps(dev);
  2798. if (err)
  2799. goto err_free_port;
  2800. if (mlx5_use_mad_ifc(dev))
  2801. get_ext_port_caps(dev);
  2802. if (!mlx5_lag_is_active(mdev))
  2803. name = "mlx5_%d";
  2804. else
  2805. name = "mlx5_bond_%d";
  2806. strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
  2807. dev->ib_dev.owner = THIS_MODULE;
  2808. dev->ib_dev.node_type = RDMA_NODE_IB_CA;
  2809. dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
  2810. dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
  2811. dev->ib_dev.phys_port_cnt = dev->num_ports;
  2812. dev->ib_dev.num_comp_vectors =
  2813. dev->mdev->priv.eq_table.num_comp_vectors;
  2814. dev->ib_dev.dma_device = &mdev->pdev->dev;
  2815. dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
  2816. dev->ib_dev.uverbs_cmd_mask =
  2817. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  2818. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  2819. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  2820. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  2821. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  2822. (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
  2823. (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
  2824. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  2825. (1ull << IB_USER_VERBS_CMD_REREG_MR) |
  2826. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  2827. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  2828. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  2829. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  2830. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  2831. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  2832. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  2833. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  2834. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  2835. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  2836. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  2837. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  2838. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  2839. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  2840. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  2841. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  2842. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  2843. dev->ib_dev.uverbs_ex_cmd_mask =
  2844. (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
  2845. (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
  2846. (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
  2847. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
  2848. dev->ib_dev.query_device = mlx5_ib_query_device;
  2849. dev->ib_dev.query_port = mlx5_ib_query_port;
  2850. dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
  2851. if (ll == IB_LINK_LAYER_ETHERNET)
  2852. dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
  2853. dev->ib_dev.query_gid = mlx5_ib_query_gid;
  2854. dev->ib_dev.add_gid = mlx5_ib_add_gid;
  2855. dev->ib_dev.del_gid = mlx5_ib_del_gid;
  2856. dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
  2857. dev->ib_dev.modify_device = mlx5_ib_modify_device;
  2858. dev->ib_dev.modify_port = mlx5_ib_modify_port;
  2859. dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
  2860. dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
  2861. dev->ib_dev.mmap = mlx5_ib_mmap;
  2862. dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
  2863. dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
  2864. dev->ib_dev.create_ah = mlx5_ib_create_ah;
  2865. dev->ib_dev.query_ah = mlx5_ib_query_ah;
  2866. dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
  2867. dev->ib_dev.create_srq = mlx5_ib_create_srq;
  2868. dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
  2869. dev->ib_dev.query_srq = mlx5_ib_query_srq;
  2870. dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
  2871. dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
  2872. dev->ib_dev.create_qp = mlx5_ib_create_qp;
  2873. dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
  2874. dev->ib_dev.query_qp = mlx5_ib_query_qp;
  2875. dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
  2876. dev->ib_dev.post_send = mlx5_ib_post_send;
  2877. dev->ib_dev.post_recv = mlx5_ib_post_recv;
  2878. dev->ib_dev.create_cq = mlx5_ib_create_cq;
  2879. dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
  2880. dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
  2881. dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
  2882. dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
  2883. dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
  2884. dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
  2885. dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
  2886. dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
  2887. dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
  2888. dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
  2889. dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
  2890. dev->ib_dev.process_mad = mlx5_ib_process_mad;
  2891. dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
  2892. dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
  2893. dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
  2894. dev->ib_dev.get_port_immutable = mlx5_port_immutable;
  2895. dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
  2896. if (mlx5_core_is_pf(mdev)) {
  2897. dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
  2898. dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
  2899. dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
  2900. dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
  2901. }
  2902. dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
  2903. mlx5_ib_internal_fill_odp_caps(dev);
  2904. if (MLX5_CAP_GEN(mdev, imaicl)) {
  2905. dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
  2906. dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
  2907. dev->ib_dev.uverbs_cmd_mask |=
  2908. (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
  2909. (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
  2910. }
  2911. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
  2912. dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
  2913. dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
  2914. }
  2915. if (MLX5_CAP_GEN(mdev, xrc)) {
  2916. dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
  2917. dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
  2918. dev->ib_dev.uverbs_cmd_mask |=
  2919. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  2920. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  2921. }
  2922. if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
  2923. IB_LINK_LAYER_ETHERNET) {
  2924. dev->ib_dev.create_flow = mlx5_ib_create_flow;
  2925. dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
  2926. dev->ib_dev.create_wq = mlx5_ib_create_wq;
  2927. dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
  2928. dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
  2929. dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
  2930. dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
  2931. dev->ib_dev.uverbs_ex_cmd_mask |=
  2932. (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
  2933. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
  2934. (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
  2935. (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
  2936. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
  2937. (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
  2938. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
  2939. }
  2940. err = init_node_data(dev);
  2941. if (err)
  2942. goto err_free_port;
  2943. mutex_init(&dev->flow_db.lock);
  2944. mutex_init(&dev->cap_mask_mutex);
  2945. INIT_LIST_HEAD(&dev->qp_list);
  2946. spin_lock_init(&dev->reset_flow_resource_lock);
  2947. if (ll == IB_LINK_LAYER_ETHERNET) {
  2948. err = mlx5_enable_eth(dev);
  2949. if (err)
  2950. goto err_free_port;
  2951. }
  2952. err = create_dev_resources(&dev->devr);
  2953. if (err)
  2954. goto err_disable_eth;
  2955. err = mlx5_ib_odp_init_one(dev);
  2956. if (err)
  2957. goto err_rsrc;
  2958. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
  2959. err = mlx5_ib_alloc_q_counters(dev);
  2960. if (err)
  2961. goto err_odp;
  2962. }
  2963. dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
  2964. if (!dev->mdev->priv.uar)
  2965. goto err_q_cnt;
  2966. err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
  2967. if (err)
  2968. goto err_uar_page;
  2969. err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
  2970. if (err)
  2971. goto err_bfreg;
  2972. err = ib_register_device(&dev->ib_dev, NULL);
  2973. if (err)
  2974. goto err_fp_bfreg;
  2975. err = create_umr_res(dev);
  2976. if (err)
  2977. goto err_dev;
  2978. for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
  2979. err = device_create_file(&dev->ib_dev.dev,
  2980. mlx5_class_attributes[i]);
  2981. if (err)
  2982. goto err_umrc;
  2983. }
  2984. dev->ib_active = true;
  2985. return dev;
  2986. err_umrc:
  2987. destroy_umrc_res(dev);
  2988. err_dev:
  2989. ib_unregister_device(&dev->ib_dev);
  2990. err_fp_bfreg:
  2991. mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
  2992. err_bfreg:
  2993. mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  2994. err_uar_page:
  2995. mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
  2996. err_q_cnt:
  2997. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
  2998. mlx5_ib_dealloc_q_counters(dev);
  2999. err_odp:
  3000. mlx5_ib_odp_remove_one(dev);
  3001. err_rsrc:
  3002. destroy_dev_resources(&dev->devr);
  3003. err_disable_eth:
  3004. if (ll == IB_LINK_LAYER_ETHERNET) {
  3005. mlx5_disable_eth(dev);
  3006. mlx5_remove_netdev_notifier(dev);
  3007. }
  3008. err_free_port:
  3009. kfree(dev->port);
  3010. err_dealloc:
  3011. ib_dealloc_device((struct ib_device *)dev);
  3012. return NULL;
  3013. }
  3014. static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
  3015. {
  3016. struct mlx5_ib_dev *dev = context;
  3017. enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
  3018. mlx5_remove_netdev_notifier(dev);
  3019. ib_unregister_device(&dev->ib_dev);
  3020. mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
  3021. mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  3022. mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
  3023. if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
  3024. mlx5_ib_dealloc_q_counters(dev);
  3025. destroy_umrc_res(dev);
  3026. mlx5_ib_odp_remove_one(dev);
  3027. destroy_dev_resources(&dev->devr);
  3028. if (ll == IB_LINK_LAYER_ETHERNET)
  3029. mlx5_disable_eth(dev);
  3030. kfree(dev->port);
  3031. ib_dealloc_device(&dev->ib_dev);
  3032. }
  3033. static struct mlx5_interface mlx5_ib_interface = {
  3034. .add = mlx5_ib_add,
  3035. .remove = mlx5_ib_remove,
  3036. .event = mlx5_ib_event,
  3037. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  3038. .pfault = mlx5_ib_pfault,
  3039. #endif
  3040. .protocol = MLX5_INTERFACE_PROTOCOL_IB,
  3041. };
  3042. static int __init mlx5_ib_init(void)
  3043. {
  3044. int err;
  3045. mlx5_ib_odp_init();
  3046. err = mlx5_register_interface(&mlx5_ib_interface);
  3047. return err;
  3048. }
  3049. static void __exit mlx5_ib_cleanup(void)
  3050. {
  3051. mlx5_unregister_interface(&mlx5_ib_interface);
  3052. }
  3053. module_init(mlx5_ib_init);
  3054. module_exit(mlx5_ib_cleanup);