main.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/ipv6.h>
  42. #include <net/addrconf.h>
  43. #include <rdma/ib_smi.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_addr.h>
  46. #include <linux/mlx4/driver.h>
  47. #include <linux/mlx4/cmd.h>
  48. #include <linux/mlx4/qp.h>
  49. #include "mlx4_ib.h"
  50. #include "user.h"
  51. #define DRV_NAME MLX4_IB_DRV_NAME
  52. #define DRV_VERSION "2.2-1"
  53. #define DRV_RELDATE "Feb 2014"
  54. #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
  55. #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
  56. #define MLX4_IB_CARD_REV_A0 0xA0
  57. MODULE_AUTHOR("Roland Dreier");
  58. MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
  59. MODULE_LICENSE("Dual BSD/GPL");
  60. MODULE_VERSION(DRV_VERSION);
  61. int mlx4_ib_sm_guid_assign = 0;
  62. module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
  63. MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
  64. static const char mlx4_ib_version[] =
  65. DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
  66. DRV_VERSION " (" DRV_RELDATE ")\n";
  67. struct update_gid_work {
  68. struct work_struct work;
  69. union ib_gid gids[128];
  70. struct mlx4_ib_dev *dev;
  71. int port;
  72. };
  73. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
  74. static struct workqueue_struct *wq;
  75. static void init_query_mad(struct ib_smp *mad)
  76. {
  77. mad->base_version = 1;
  78. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  79. mad->class_version = 1;
  80. mad->method = IB_MGMT_METHOD_GET;
  81. }
  82. static union ib_gid zgid;
  83. static int check_flow_steering_support(struct mlx4_dev *dev)
  84. {
  85. int eth_num_ports = 0;
  86. int ib_num_ports = 0;
  87. int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
  88. if (dmfs) {
  89. int i;
  90. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  91. eth_num_ports++;
  92. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  93. ib_num_ports++;
  94. dmfs &= (!ib_num_ports ||
  95. (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
  96. (!eth_num_ports ||
  97. (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
  98. if (ib_num_ports && mlx4_is_mfunc(dev)) {
  99. pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
  100. dmfs = 0;
  101. }
  102. }
  103. return dmfs;
  104. }
  105. static int num_ib_ports(struct mlx4_dev *dev)
  106. {
  107. int ib_ports = 0;
  108. int i;
  109. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  110. ib_ports++;
  111. return ib_ports;
  112. }
  113. static int mlx4_ib_query_device(struct ib_device *ibdev,
  114. struct ib_device_attr *props)
  115. {
  116. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  117. struct ib_smp *in_mad = NULL;
  118. struct ib_smp *out_mad = NULL;
  119. int err = -ENOMEM;
  120. int have_ib_ports;
  121. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  122. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  123. if (!in_mad || !out_mad)
  124. goto out;
  125. init_query_mad(in_mad);
  126. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  127. err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
  128. 1, NULL, NULL, in_mad, out_mad);
  129. if (err)
  130. goto out;
  131. memset(props, 0, sizeof *props);
  132. have_ib_ports = num_ib_ports(dev->dev);
  133. props->fw_ver = dev->dev->caps.fw_ver;
  134. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  135. IB_DEVICE_PORT_ACTIVE_EVENT |
  136. IB_DEVICE_SYS_IMAGE_GUID |
  137. IB_DEVICE_RC_RNR_NAK_GEN |
  138. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  139. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
  140. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  141. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
  142. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  143. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
  144. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  145. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
  146. props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
  147. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
  148. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  149. if (dev->dev->caps.max_gso_sz &&
  150. (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
  151. (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
  152. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  153. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
  154. props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
  155. if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
  156. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
  157. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
  158. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  159. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
  160. props->device_cap_flags |= IB_DEVICE_XRC;
  161. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
  162. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
  163. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
  164. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
  165. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
  166. else
  167. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
  168. if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
  169. props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
  170. }
  171. props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  172. 0xffffff;
  173. props->vendor_part_id = dev->dev->persist->pdev->device;
  174. props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
  175. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  176. props->max_mr_size = ~0ull;
  177. props->page_size_cap = dev->dev->caps.page_size_cap;
  178. props->max_qp = dev->dev->quotas.qp;
  179. props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
  180. props->max_sge = min(dev->dev->caps.max_sq_sg,
  181. dev->dev->caps.max_rq_sg);
  182. props->max_cq = dev->dev->quotas.cq;
  183. props->max_cqe = dev->dev->caps.max_cqes;
  184. props->max_mr = dev->dev->quotas.mpt;
  185. props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
  186. props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
  187. props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
  188. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  189. props->max_srq = dev->dev->quotas.srq;
  190. props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
  191. props->max_srq_sge = dev->dev->caps.max_srq_sge;
  192. props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
  193. props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
  194. props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
  195. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  196. props->masked_atomic_cap = props->atomic_cap;
  197. props->max_pkeys = dev->dev->caps.pkey_table_len[1];
  198. props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
  199. props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
  200. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  201. props->max_mcast_grp;
  202. props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
  203. out:
  204. kfree(in_mad);
  205. kfree(out_mad);
  206. return err;
  207. }
  208. static enum rdma_link_layer
  209. mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
  210. {
  211. struct mlx4_dev *dev = to_mdev(device)->dev;
  212. return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
  213. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  214. }
  215. static int ib_link_query_port(struct ib_device *ibdev, u8 port,
  216. struct ib_port_attr *props, int netw_view)
  217. {
  218. struct ib_smp *in_mad = NULL;
  219. struct ib_smp *out_mad = NULL;
  220. int ext_active_speed;
  221. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  222. int err = -ENOMEM;
  223. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  224. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  225. if (!in_mad || !out_mad)
  226. goto out;
  227. init_query_mad(in_mad);
  228. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  229. in_mad->attr_mod = cpu_to_be32(port);
  230. if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
  231. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  232. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
  233. in_mad, out_mad);
  234. if (err)
  235. goto out;
  236. props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
  237. props->lmc = out_mad->data[34] & 0x7;
  238. props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
  239. props->sm_sl = out_mad->data[36] & 0xf;
  240. props->state = out_mad->data[32] & 0xf;
  241. props->phys_state = out_mad->data[33] >> 4;
  242. props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
  243. if (netw_view)
  244. props->gid_tbl_len = out_mad->data[50];
  245. else
  246. props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
  247. props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
  248. props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
  249. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  250. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  251. props->active_width = out_mad->data[31] & 0xf;
  252. props->active_speed = out_mad->data[35] >> 4;
  253. props->max_mtu = out_mad->data[41] & 0xf;
  254. props->active_mtu = out_mad->data[36] >> 4;
  255. props->subnet_timeout = out_mad->data[51] & 0x1f;
  256. props->max_vl_num = out_mad->data[37] >> 4;
  257. props->init_type_reply = out_mad->data[41] >> 4;
  258. /* Check if extended speeds (EDR/FDR/...) are supported */
  259. if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
  260. ext_active_speed = out_mad->data[62] >> 4;
  261. switch (ext_active_speed) {
  262. case 1:
  263. props->active_speed = IB_SPEED_FDR;
  264. break;
  265. case 2:
  266. props->active_speed = IB_SPEED_EDR;
  267. break;
  268. }
  269. }
  270. /* If reported active speed is QDR, check if is FDR-10 */
  271. if (props->active_speed == IB_SPEED_QDR) {
  272. init_query_mad(in_mad);
  273. in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
  274. in_mad->attr_mod = cpu_to_be32(port);
  275. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
  276. NULL, NULL, in_mad, out_mad);
  277. if (err)
  278. goto out;
  279. /* Checking LinkSpeedActive for FDR-10 */
  280. if (out_mad->data[15] & 0x1)
  281. props->active_speed = IB_SPEED_FDR10;
  282. }
  283. /* Avoid wrong speed value returned by FW if the IB link is down. */
  284. if (props->state == IB_PORT_DOWN)
  285. props->active_speed = IB_SPEED_SDR;
  286. out:
  287. kfree(in_mad);
  288. kfree(out_mad);
  289. return err;
  290. }
  291. static u8 state_to_phys_state(enum ib_port_state state)
  292. {
  293. return state == IB_PORT_ACTIVE ? 5 : 3;
  294. }
  295. static int eth_link_query_port(struct ib_device *ibdev, u8 port,
  296. struct ib_port_attr *props, int netw_view)
  297. {
  298. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  299. struct mlx4_ib_iboe *iboe = &mdev->iboe;
  300. struct net_device *ndev;
  301. enum ib_mtu tmp;
  302. struct mlx4_cmd_mailbox *mailbox;
  303. int err = 0;
  304. int is_bonded = mlx4_is_bonded(mdev->dev);
  305. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  306. if (IS_ERR(mailbox))
  307. return PTR_ERR(mailbox);
  308. err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
  309. MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
  310. MLX4_CMD_WRAPPED);
  311. if (err)
  312. goto out;
  313. props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
  314. IB_WIDTH_4X : IB_WIDTH_1X;
  315. props->active_speed = IB_SPEED_QDR;
  316. props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
  317. props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
  318. props->max_msg_sz = mdev->dev->caps.max_msg_sz;
  319. props->pkey_tbl_len = 1;
  320. props->max_mtu = IB_MTU_4096;
  321. props->max_vl_num = 2;
  322. props->state = IB_PORT_DOWN;
  323. props->phys_state = state_to_phys_state(props->state);
  324. props->active_mtu = IB_MTU_256;
  325. if (is_bonded)
  326. rtnl_lock(); /* required to get upper dev */
  327. spin_lock_bh(&iboe->lock);
  328. ndev = iboe->netdevs[port - 1];
  329. if (ndev && is_bonded)
  330. ndev = netdev_master_upper_dev_get(ndev);
  331. if (!ndev)
  332. goto out_unlock;
  333. tmp = iboe_get_mtu(ndev->mtu);
  334. props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
  335. props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
  336. IB_PORT_ACTIVE : IB_PORT_DOWN;
  337. props->phys_state = state_to_phys_state(props->state);
  338. out_unlock:
  339. spin_unlock_bh(&iboe->lock);
  340. if (is_bonded)
  341. rtnl_unlock();
  342. out:
  343. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  344. return err;
  345. }
  346. int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  347. struct ib_port_attr *props, int netw_view)
  348. {
  349. int err;
  350. memset(props, 0, sizeof *props);
  351. err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
  352. ib_link_query_port(ibdev, port, props, netw_view) :
  353. eth_link_query_port(ibdev, port, props, netw_view);
  354. return err;
  355. }
  356. static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  357. struct ib_port_attr *props)
  358. {
  359. /* returns host view */
  360. return __mlx4_ib_query_port(ibdev, port, props, 0);
  361. }
  362. int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  363. union ib_gid *gid, int netw_view)
  364. {
  365. struct ib_smp *in_mad = NULL;
  366. struct ib_smp *out_mad = NULL;
  367. int err = -ENOMEM;
  368. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  369. int clear = 0;
  370. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  371. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  372. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  373. if (!in_mad || !out_mad)
  374. goto out;
  375. init_query_mad(in_mad);
  376. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  377. in_mad->attr_mod = cpu_to_be32(port);
  378. if (mlx4_is_mfunc(dev->dev) && netw_view)
  379. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  380. err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
  381. if (err)
  382. goto out;
  383. memcpy(gid->raw, out_mad->data + 8, 8);
  384. if (mlx4_is_mfunc(dev->dev) && !netw_view) {
  385. if (index) {
  386. /* For any index > 0, return the null guid */
  387. err = 0;
  388. clear = 1;
  389. goto out;
  390. }
  391. }
  392. init_query_mad(in_mad);
  393. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  394. in_mad->attr_mod = cpu_to_be32(index / 8);
  395. err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
  396. NULL, NULL, in_mad, out_mad);
  397. if (err)
  398. goto out;
  399. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  400. out:
  401. if (clear)
  402. memset(gid->raw + 8, 0, 8);
  403. kfree(in_mad);
  404. kfree(out_mad);
  405. return err;
  406. }
  407. static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
  408. union ib_gid *gid)
  409. {
  410. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  411. *gid = dev->iboe.gid_table[port - 1][index];
  412. return 0;
  413. }
  414. static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  415. union ib_gid *gid)
  416. {
  417. if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
  418. return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
  419. else
  420. return iboe_query_gid(ibdev, port, index, gid);
  421. }
  422. int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  423. u16 *pkey, int netw_view)
  424. {
  425. struct ib_smp *in_mad = NULL;
  426. struct ib_smp *out_mad = NULL;
  427. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  428. int err = -ENOMEM;
  429. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  430. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  431. if (!in_mad || !out_mad)
  432. goto out;
  433. init_query_mad(in_mad);
  434. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  435. in_mad->attr_mod = cpu_to_be32(index / 32);
  436. if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
  437. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  438. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
  439. in_mad, out_mad);
  440. if (err)
  441. goto out;
  442. *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
  443. out:
  444. kfree(in_mad);
  445. kfree(out_mad);
  446. return err;
  447. }
  448. static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
  449. {
  450. return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
  451. }
  452. static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
  453. struct ib_device_modify *props)
  454. {
  455. struct mlx4_cmd_mailbox *mailbox;
  456. unsigned long flags;
  457. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  458. return -EOPNOTSUPP;
  459. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  460. return 0;
  461. if (mlx4_is_slave(to_mdev(ibdev)->dev))
  462. return -EOPNOTSUPP;
  463. spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
  464. memcpy(ibdev->node_desc, props->node_desc, 64);
  465. spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
  466. /*
  467. * If possible, pass node desc to FW, so it can generate
  468. * a 144 trap. If cmd fails, just ignore.
  469. */
  470. mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
  471. if (IS_ERR(mailbox))
  472. return 0;
  473. memcpy(mailbox->buf, props->node_desc, 64);
  474. mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
  475. MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  476. mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
  477. return 0;
  478. }
  479. static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
  480. u32 cap_mask)
  481. {
  482. struct mlx4_cmd_mailbox *mailbox;
  483. int err;
  484. mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  485. if (IS_ERR(mailbox))
  486. return PTR_ERR(mailbox);
  487. if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  488. *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
  489. ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
  490. } else {
  491. ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
  492. ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
  493. }
  494. err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
  495. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  496. MLX4_CMD_WRAPPED);
  497. mlx4_free_cmd_mailbox(dev->dev, mailbox);
  498. return err;
  499. }
  500. static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  501. struct ib_port_modify *props)
  502. {
  503. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  504. u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  505. struct ib_port_attr attr;
  506. u32 cap_mask;
  507. int err;
  508. /* return OK if this is RoCE. CM calls ib_modify_port() regardless
  509. * of whether port link layer is ETH or IB. For ETH ports, qkey
  510. * violations and port capabilities are not meaningful.
  511. */
  512. if (is_eth)
  513. return 0;
  514. mutex_lock(&mdev->cap_mask_mutex);
  515. err = mlx4_ib_query_port(ibdev, port, &attr);
  516. if (err)
  517. goto out;
  518. cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
  519. ~props->clr_port_cap_mask;
  520. err = mlx4_ib_SET_PORT(mdev, port,
  521. !!(mask & IB_PORT_RESET_QKEY_CNTR),
  522. cap_mask);
  523. out:
  524. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  525. return err;
  526. }
  527. static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
  528. struct ib_udata *udata)
  529. {
  530. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  531. struct mlx4_ib_ucontext *context;
  532. struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
  533. struct mlx4_ib_alloc_ucontext_resp resp;
  534. int err;
  535. if (!dev->ib_active)
  536. return ERR_PTR(-EAGAIN);
  537. if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
  538. resp_v3.qp_tab_size = dev->dev->caps.num_qps;
  539. resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
  540. resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  541. } else {
  542. resp.dev_caps = dev->dev->caps.userspace_caps;
  543. resp.qp_tab_size = dev->dev->caps.num_qps;
  544. resp.bf_reg_size = dev->dev->caps.bf_reg_size;
  545. resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  546. resp.cqe_size = dev->dev->caps.cqe_size;
  547. }
  548. context = kmalloc(sizeof *context, GFP_KERNEL);
  549. if (!context)
  550. return ERR_PTR(-ENOMEM);
  551. err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
  552. if (err) {
  553. kfree(context);
  554. return ERR_PTR(err);
  555. }
  556. INIT_LIST_HEAD(&context->db_page_list);
  557. mutex_init(&context->db_page_mutex);
  558. if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
  559. err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
  560. else
  561. err = ib_copy_to_udata(udata, &resp, sizeof(resp));
  562. if (err) {
  563. mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
  564. kfree(context);
  565. return ERR_PTR(-EFAULT);
  566. }
  567. return &context->ibucontext;
  568. }
  569. static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  570. {
  571. struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
  572. mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
  573. kfree(context);
  574. return 0;
  575. }
  576. static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  577. {
  578. struct mlx4_ib_dev *dev = to_mdev(context->device);
  579. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  580. return -EINVAL;
  581. if (vma->vm_pgoff == 0) {
  582. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  583. if (io_remap_pfn_range(vma, vma->vm_start,
  584. to_mucontext(context)->uar.pfn,
  585. PAGE_SIZE, vma->vm_page_prot))
  586. return -EAGAIN;
  587. } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
  588. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  589. if (io_remap_pfn_range(vma, vma->vm_start,
  590. to_mucontext(context)->uar.pfn +
  591. dev->dev->caps.num_uars,
  592. PAGE_SIZE, vma->vm_page_prot))
  593. return -EAGAIN;
  594. } else
  595. return -EINVAL;
  596. return 0;
  597. }
  598. static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
  599. struct ib_ucontext *context,
  600. struct ib_udata *udata)
  601. {
  602. struct mlx4_ib_pd *pd;
  603. int err;
  604. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  605. if (!pd)
  606. return ERR_PTR(-ENOMEM);
  607. err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
  608. if (err) {
  609. kfree(pd);
  610. return ERR_PTR(err);
  611. }
  612. if (context)
  613. if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
  614. mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
  615. kfree(pd);
  616. return ERR_PTR(-EFAULT);
  617. }
  618. return &pd->ibpd;
  619. }
  620. static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
  621. {
  622. mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
  623. kfree(pd);
  624. return 0;
  625. }
  626. static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
  627. struct ib_ucontext *context,
  628. struct ib_udata *udata)
  629. {
  630. struct mlx4_ib_xrcd *xrcd;
  631. int err;
  632. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
  633. return ERR_PTR(-ENOSYS);
  634. xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
  635. if (!xrcd)
  636. return ERR_PTR(-ENOMEM);
  637. err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
  638. if (err)
  639. goto err1;
  640. xrcd->pd = ib_alloc_pd(ibdev);
  641. if (IS_ERR(xrcd->pd)) {
  642. err = PTR_ERR(xrcd->pd);
  643. goto err2;
  644. }
  645. xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
  646. if (IS_ERR(xrcd->cq)) {
  647. err = PTR_ERR(xrcd->cq);
  648. goto err3;
  649. }
  650. return &xrcd->ibxrcd;
  651. err3:
  652. ib_dealloc_pd(xrcd->pd);
  653. err2:
  654. mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
  655. err1:
  656. kfree(xrcd);
  657. return ERR_PTR(err);
  658. }
  659. static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  660. {
  661. ib_destroy_cq(to_mxrcd(xrcd)->cq);
  662. ib_dealloc_pd(to_mxrcd(xrcd)->pd);
  663. mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
  664. kfree(xrcd);
  665. return 0;
  666. }
  667. static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
  668. {
  669. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  670. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  671. struct mlx4_ib_gid_entry *ge;
  672. ge = kzalloc(sizeof *ge, GFP_KERNEL);
  673. if (!ge)
  674. return -ENOMEM;
  675. ge->gid = *gid;
  676. if (mlx4_ib_add_mc(mdev, mqp, gid)) {
  677. ge->port = mqp->port;
  678. ge->added = 1;
  679. }
  680. mutex_lock(&mqp->mutex);
  681. list_add_tail(&ge->list, &mqp->gid_list);
  682. mutex_unlock(&mqp->mutex);
  683. return 0;
  684. }
  685. int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  686. union ib_gid *gid)
  687. {
  688. struct net_device *ndev;
  689. int ret = 0;
  690. if (!mqp->port)
  691. return 0;
  692. spin_lock_bh(&mdev->iboe.lock);
  693. ndev = mdev->iboe.netdevs[mqp->port - 1];
  694. if (ndev)
  695. dev_hold(ndev);
  696. spin_unlock_bh(&mdev->iboe.lock);
  697. if (ndev) {
  698. ret = 1;
  699. dev_put(ndev);
  700. }
  701. return ret;
  702. }
  703. struct mlx4_ib_steering {
  704. struct list_head list;
  705. struct mlx4_flow_reg_id reg_id;
  706. union ib_gid gid;
  707. };
  708. static int parse_flow_attr(struct mlx4_dev *dev,
  709. u32 qp_num,
  710. union ib_flow_spec *ib_spec,
  711. struct _rule_hw *mlx4_spec)
  712. {
  713. enum mlx4_net_trans_rule_id type;
  714. switch (ib_spec->type) {
  715. case IB_FLOW_SPEC_ETH:
  716. type = MLX4_NET_TRANS_RULE_ID_ETH;
  717. memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
  718. ETH_ALEN);
  719. memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
  720. ETH_ALEN);
  721. mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
  722. mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
  723. break;
  724. case IB_FLOW_SPEC_IB:
  725. type = MLX4_NET_TRANS_RULE_ID_IB;
  726. mlx4_spec->ib.l3_qpn =
  727. cpu_to_be32(qp_num);
  728. mlx4_spec->ib.qpn_mask =
  729. cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
  730. break;
  731. case IB_FLOW_SPEC_IPV4:
  732. type = MLX4_NET_TRANS_RULE_ID_IPV4;
  733. mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
  734. mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
  735. mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
  736. mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
  737. break;
  738. case IB_FLOW_SPEC_TCP:
  739. case IB_FLOW_SPEC_UDP:
  740. type = ib_spec->type == IB_FLOW_SPEC_TCP ?
  741. MLX4_NET_TRANS_RULE_ID_TCP :
  742. MLX4_NET_TRANS_RULE_ID_UDP;
  743. mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
  744. mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
  745. mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
  746. mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
  747. break;
  748. default:
  749. return -EINVAL;
  750. }
  751. if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
  752. mlx4_hw_rule_sz(dev, type) < 0)
  753. return -EINVAL;
  754. mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
  755. mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
  756. return mlx4_hw_rule_sz(dev, type);
  757. }
  758. struct default_rules {
  759. __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
  760. __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
  761. __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
  762. __u8 link_layer;
  763. };
  764. static const struct default_rules default_table[] = {
  765. {
  766. .mandatory_fields = {IB_FLOW_SPEC_IPV4},
  767. .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
  768. .rules_create_list = {IB_FLOW_SPEC_IB},
  769. .link_layer = IB_LINK_LAYER_INFINIBAND
  770. }
  771. };
  772. static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
  773. struct ib_flow_attr *flow_attr)
  774. {
  775. int i, j, k;
  776. void *ib_flow;
  777. const struct default_rules *pdefault_rules = default_table;
  778. u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
  779. for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
  780. __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
  781. memset(&field_types, 0, sizeof(field_types));
  782. if (link_layer != pdefault_rules->link_layer)
  783. continue;
  784. ib_flow = flow_attr + 1;
  785. /* we assume the specs are sorted */
  786. for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
  787. j < flow_attr->num_of_specs; k++) {
  788. union ib_flow_spec *current_flow =
  789. (union ib_flow_spec *)ib_flow;
  790. /* same layer but different type */
  791. if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
  792. (pdefault_rules->mandatory_fields[k] &
  793. IB_FLOW_SPEC_LAYER_MASK)) &&
  794. (current_flow->type !=
  795. pdefault_rules->mandatory_fields[k]))
  796. goto out;
  797. /* same layer, try match next one */
  798. if (current_flow->type ==
  799. pdefault_rules->mandatory_fields[k]) {
  800. j++;
  801. ib_flow +=
  802. ((union ib_flow_spec *)ib_flow)->size;
  803. }
  804. }
  805. ib_flow = flow_attr + 1;
  806. for (j = 0; j < flow_attr->num_of_specs;
  807. j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
  808. for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
  809. /* same layer and same type */
  810. if (((union ib_flow_spec *)ib_flow)->type ==
  811. pdefault_rules->mandatory_not_fields[k])
  812. goto out;
  813. return i;
  814. }
  815. out:
  816. return -1;
  817. }
  818. static int __mlx4_ib_create_default_rules(
  819. struct mlx4_ib_dev *mdev,
  820. struct ib_qp *qp,
  821. const struct default_rules *pdefault_rules,
  822. struct _rule_hw *mlx4_spec) {
  823. int size = 0;
  824. int i;
  825. for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
  826. int ret;
  827. union ib_flow_spec ib_spec;
  828. switch (pdefault_rules->rules_create_list[i]) {
  829. case 0:
  830. /* no rule */
  831. continue;
  832. case IB_FLOW_SPEC_IB:
  833. ib_spec.type = IB_FLOW_SPEC_IB;
  834. ib_spec.size = sizeof(struct ib_flow_spec_ib);
  835. break;
  836. default:
  837. /* invalid rule */
  838. return -EINVAL;
  839. }
  840. /* We must put empty rule, qpn is being ignored */
  841. ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
  842. mlx4_spec);
  843. if (ret < 0) {
  844. pr_info("invalid parsing\n");
  845. return -EINVAL;
  846. }
  847. mlx4_spec = (void *)mlx4_spec + ret;
  848. size += ret;
  849. }
  850. return size;
  851. }
  852. static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
  853. int domain,
  854. enum mlx4_net_trans_promisc_mode flow_type,
  855. u64 *reg_id)
  856. {
  857. int ret, i;
  858. int size = 0;
  859. void *ib_flow;
  860. struct mlx4_ib_dev *mdev = to_mdev(qp->device);
  861. struct mlx4_cmd_mailbox *mailbox;
  862. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  863. int default_flow;
  864. static const u16 __mlx4_domain[] = {
  865. [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
  866. [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
  867. [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
  868. [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
  869. };
  870. if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
  871. pr_err("Invalid priority value %d\n", flow_attr->priority);
  872. return -EINVAL;
  873. }
  874. if (domain >= IB_FLOW_DOMAIN_NUM) {
  875. pr_err("Invalid domain value %d\n", domain);
  876. return -EINVAL;
  877. }
  878. if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
  879. return -EINVAL;
  880. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  881. if (IS_ERR(mailbox))
  882. return PTR_ERR(mailbox);
  883. ctrl = mailbox->buf;
  884. ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
  885. flow_attr->priority);
  886. ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
  887. ctrl->port = flow_attr->port;
  888. ctrl->qpn = cpu_to_be32(qp->qp_num);
  889. ib_flow = flow_attr + 1;
  890. size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
  891. /* Add default flows */
  892. default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
  893. if (default_flow >= 0) {
  894. ret = __mlx4_ib_create_default_rules(
  895. mdev, qp, default_table + default_flow,
  896. mailbox->buf + size);
  897. if (ret < 0) {
  898. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  899. return -EINVAL;
  900. }
  901. size += ret;
  902. }
  903. for (i = 0; i < flow_attr->num_of_specs; i++) {
  904. ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
  905. mailbox->buf + size);
  906. if (ret < 0) {
  907. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  908. return -EINVAL;
  909. }
  910. ib_flow += ((union ib_flow_spec *) ib_flow)->size;
  911. size += ret;
  912. }
  913. ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
  914. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  915. MLX4_CMD_NATIVE);
  916. if (ret == -ENOMEM)
  917. pr_err("mcg table is full. Fail to register network rule.\n");
  918. else if (ret == -ENXIO)
  919. pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
  920. else if (ret)
  921. pr_err("Invalid argumant. Fail to register network rule.\n");
  922. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  923. return ret;
  924. }
  925. static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
  926. {
  927. int err;
  928. err = mlx4_cmd(dev, reg_id, 0, 0,
  929. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  930. MLX4_CMD_NATIVE);
  931. if (err)
  932. pr_err("Fail to detach network rule. registration id = 0x%llx\n",
  933. reg_id);
  934. return err;
  935. }
  936. static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
  937. u64 *reg_id)
  938. {
  939. void *ib_flow;
  940. union ib_flow_spec *ib_spec;
  941. struct mlx4_dev *dev = to_mdev(qp->device)->dev;
  942. int err = 0;
  943. if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
  944. dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
  945. return 0; /* do nothing */
  946. ib_flow = flow_attr + 1;
  947. ib_spec = (union ib_flow_spec *)ib_flow;
  948. if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
  949. return 0; /* do nothing */
  950. err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
  951. flow_attr->port, qp->qp_num,
  952. MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
  953. reg_id);
  954. return err;
  955. }
  956. static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
  957. struct ib_flow_attr *flow_attr,
  958. int domain)
  959. {
  960. int err = 0, i = 0, j = 0;
  961. struct mlx4_ib_flow *mflow;
  962. enum mlx4_net_trans_promisc_mode type[2];
  963. struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
  964. int is_bonded = mlx4_is_bonded(dev);
  965. memset(type, 0, sizeof(type));
  966. mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
  967. if (!mflow) {
  968. err = -ENOMEM;
  969. goto err_free;
  970. }
  971. switch (flow_attr->type) {
  972. case IB_FLOW_ATTR_NORMAL:
  973. type[0] = MLX4_FS_REGULAR;
  974. break;
  975. case IB_FLOW_ATTR_ALL_DEFAULT:
  976. type[0] = MLX4_FS_ALL_DEFAULT;
  977. break;
  978. case IB_FLOW_ATTR_MC_DEFAULT:
  979. type[0] = MLX4_FS_MC_DEFAULT;
  980. break;
  981. case IB_FLOW_ATTR_SNIFFER:
  982. type[0] = MLX4_FS_UC_SNIFFER;
  983. type[1] = MLX4_FS_MC_SNIFFER;
  984. break;
  985. default:
  986. err = -EINVAL;
  987. goto err_free;
  988. }
  989. while (i < ARRAY_SIZE(type) && type[i]) {
  990. err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
  991. &mflow->reg_id[i].id);
  992. if (err)
  993. goto err_create_flow;
  994. i++;
  995. if (is_bonded) {
  996. /* Application always sees one port so the mirror rule
  997. * must be on port #2
  998. */
  999. flow_attr->port = 2;
  1000. err = __mlx4_ib_create_flow(qp, flow_attr,
  1001. domain, type[j],
  1002. &mflow->reg_id[j].mirror);
  1003. flow_attr->port = 1;
  1004. if (err)
  1005. goto err_create_flow;
  1006. j++;
  1007. }
  1008. }
  1009. if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  1010. err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
  1011. &mflow->reg_id[i].id);
  1012. if (err)
  1013. goto err_create_flow;
  1014. i++;
  1015. if (is_bonded) {
  1016. flow_attr->port = 2;
  1017. err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
  1018. &mflow->reg_id[j].mirror);
  1019. flow_attr->port = 1;
  1020. if (err)
  1021. goto err_create_flow;
  1022. j++;
  1023. }
  1024. /* function to create mirror rule */
  1025. }
  1026. return &mflow->ibflow;
  1027. err_create_flow:
  1028. while (i) {
  1029. (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
  1030. mflow->reg_id[i].id);
  1031. i--;
  1032. }
  1033. while (j) {
  1034. (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
  1035. mflow->reg_id[j].mirror);
  1036. j--;
  1037. }
  1038. err_free:
  1039. kfree(mflow);
  1040. return ERR_PTR(err);
  1041. }
  1042. static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
  1043. {
  1044. int err, ret = 0;
  1045. int i = 0;
  1046. struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
  1047. struct mlx4_ib_flow *mflow = to_mflow(flow_id);
  1048. while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
  1049. err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
  1050. if (err)
  1051. ret = err;
  1052. if (mflow->reg_id[i].mirror) {
  1053. err = __mlx4_ib_destroy_flow(mdev->dev,
  1054. mflow->reg_id[i].mirror);
  1055. if (err)
  1056. ret = err;
  1057. }
  1058. i++;
  1059. }
  1060. kfree(mflow);
  1061. return ret;
  1062. }
  1063. static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  1064. {
  1065. int err;
  1066. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1067. struct mlx4_dev *dev = mdev->dev;
  1068. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1069. struct mlx4_ib_steering *ib_steering = NULL;
  1070. enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
  1071. struct mlx4_flow_reg_id reg_id;
  1072. if (mdev->dev->caps.steering_mode ==
  1073. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1074. ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
  1075. if (!ib_steering)
  1076. return -ENOMEM;
  1077. }
  1078. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
  1079. !!(mqp->flags &
  1080. MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  1081. prot, &reg_id.id);
  1082. if (err) {
  1083. pr_err("multicast attach op failed, err %d\n", err);
  1084. goto err_malloc;
  1085. }
  1086. reg_id.mirror = 0;
  1087. if (mlx4_is_bonded(dev)) {
  1088. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
  1089. (mqp->port == 1) ? 2 : 1,
  1090. !!(mqp->flags &
  1091. MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  1092. prot, &reg_id.mirror);
  1093. if (err)
  1094. goto err_add;
  1095. }
  1096. err = add_gid_entry(ibqp, gid);
  1097. if (err)
  1098. goto err_add;
  1099. if (ib_steering) {
  1100. memcpy(ib_steering->gid.raw, gid->raw, 16);
  1101. ib_steering->reg_id = reg_id;
  1102. mutex_lock(&mqp->mutex);
  1103. list_add(&ib_steering->list, &mqp->steering_rules);
  1104. mutex_unlock(&mqp->mutex);
  1105. }
  1106. return 0;
  1107. err_add:
  1108. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1109. prot, reg_id.id);
  1110. if (reg_id.mirror)
  1111. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1112. prot, reg_id.mirror);
  1113. err_malloc:
  1114. kfree(ib_steering);
  1115. return err;
  1116. }
  1117. static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
  1118. {
  1119. struct mlx4_ib_gid_entry *ge;
  1120. struct mlx4_ib_gid_entry *tmp;
  1121. struct mlx4_ib_gid_entry *ret = NULL;
  1122. list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
  1123. if (!memcmp(raw, ge->gid.raw, 16)) {
  1124. ret = ge;
  1125. break;
  1126. }
  1127. }
  1128. return ret;
  1129. }
  1130. static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  1131. {
  1132. int err;
  1133. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1134. struct mlx4_dev *dev = mdev->dev;
  1135. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1136. struct net_device *ndev;
  1137. struct mlx4_ib_gid_entry *ge;
  1138. struct mlx4_flow_reg_id reg_id = {0, 0};
  1139. enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
  1140. if (mdev->dev->caps.steering_mode ==
  1141. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1142. struct mlx4_ib_steering *ib_steering;
  1143. mutex_lock(&mqp->mutex);
  1144. list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
  1145. if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
  1146. list_del(&ib_steering->list);
  1147. break;
  1148. }
  1149. }
  1150. mutex_unlock(&mqp->mutex);
  1151. if (&ib_steering->list == &mqp->steering_rules) {
  1152. pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
  1153. return -EINVAL;
  1154. }
  1155. reg_id = ib_steering->reg_id;
  1156. kfree(ib_steering);
  1157. }
  1158. err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1159. prot, reg_id.id);
  1160. if (err)
  1161. return err;
  1162. if (mlx4_is_bonded(dev)) {
  1163. err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1164. prot, reg_id.mirror);
  1165. if (err)
  1166. return err;
  1167. }
  1168. mutex_lock(&mqp->mutex);
  1169. ge = find_gid_entry(mqp, gid->raw);
  1170. if (ge) {
  1171. spin_lock_bh(&mdev->iboe.lock);
  1172. ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
  1173. if (ndev)
  1174. dev_hold(ndev);
  1175. spin_unlock_bh(&mdev->iboe.lock);
  1176. if (ndev)
  1177. dev_put(ndev);
  1178. list_del(&ge->list);
  1179. kfree(ge);
  1180. } else
  1181. pr_warn("could not find mgid entry\n");
  1182. mutex_unlock(&mqp->mutex);
  1183. return 0;
  1184. }
  1185. static int init_node_data(struct mlx4_ib_dev *dev)
  1186. {
  1187. struct ib_smp *in_mad = NULL;
  1188. struct ib_smp *out_mad = NULL;
  1189. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  1190. int err = -ENOMEM;
  1191. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  1192. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  1193. if (!in_mad || !out_mad)
  1194. goto out;
  1195. init_query_mad(in_mad);
  1196. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  1197. if (mlx4_is_master(dev->dev))
  1198. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  1199. err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
  1200. if (err)
  1201. goto out;
  1202. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  1203. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  1204. err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
  1205. if (err)
  1206. goto out;
  1207. dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
  1208. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  1209. out:
  1210. kfree(in_mad);
  1211. kfree(out_mad);
  1212. return err;
  1213. }
  1214. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  1215. char *buf)
  1216. {
  1217. struct mlx4_ib_dev *dev =
  1218. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1219. return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
  1220. }
  1221. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  1222. char *buf)
  1223. {
  1224. struct mlx4_ib_dev *dev =
  1225. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1226. return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
  1227. (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
  1228. (int) dev->dev->caps.fw_ver & 0xffff);
  1229. }
  1230. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  1231. char *buf)
  1232. {
  1233. struct mlx4_ib_dev *dev =
  1234. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1235. return sprintf(buf, "%x\n", dev->dev->rev_id);
  1236. }
  1237. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  1238. char *buf)
  1239. {
  1240. struct mlx4_ib_dev *dev =
  1241. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1242. return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
  1243. dev->dev->board_id);
  1244. }
  1245. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  1246. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  1247. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  1248. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  1249. static struct device_attribute *mlx4_class_attributes[] = {
  1250. &dev_attr_hw_rev,
  1251. &dev_attr_fw_ver,
  1252. &dev_attr_hca_type,
  1253. &dev_attr_board_id
  1254. };
  1255. static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
  1256. struct net_device *dev)
  1257. {
  1258. memcpy(eui, dev->dev_addr, 3);
  1259. memcpy(eui + 5, dev->dev_addr + 3, 3);
  1260. if (vlan_id < 0x1000) {
  1261. eui[3] = vlan_id >> 8;
  1262. eui[4] = vlan_id & 0xff;
  1263. } else {
  1264. eui[3] = 0xff;
  1265. eui[4] = 0xfe;
  1266. }
  1267. eui[0] ^= 2;
  1268. }
  1269. static void update_gids_task(struct work_struct *work)
  1270. {
  1271. struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
  1272. struct mlx4_cmd_mailbox *mailbox;
  1273. union ib_gid *gids;
  1274. int err;
  1275. struct mlx4_dev *dev = gw->dev->dev;
  1276. int is_bonded = mlx4_is_bonded(dev);
  1277. if (!gw->dev->ib_active)
  1278. return;
  1279. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1280. if (IS_ERR(mailbox)) {
  1281. pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
  1282. return;
  1283. }
  1284. gids = mailbox->buf;
  1285. memcpy(gids, gw->gids, sizeof gw->gids);
  1286. err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
  1287. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  1288. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  1289. if (err)
  1290. pr_warn("set port command failed\n");
  1291. else
  1292. if ((gw->port == 1) || !is_bonded)
  1293. mlx4_ib_dispatch_event(gw->dev,
  1294. is_bonded ? 1 : gw->port,
  1295. IB_EVENT_GID_CHANGE);
  1296. mlx4_free_cmd_mailbox(dev, mailbox);
  1297. kfree(gw);
  1298. }
  1299. static void reset_gids_task(struct work_struct *work)
  1300. {
  1301. struct update_gid_work *gw =
  1302. container_of(work, struct update_gid_work, work);
  1303. struct mlx4_cmd_mailbox *mailbox;
  1304. union ib_gid *gids;
  1305. int err;
  1306. struct mlx4_dev *dev = gw->dev->dev;
  1307. if (!gw->dev->ib_active)
  1308. return;
  1309. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1310. if (IS_ERR(mailbox)) {
  1311. pr_warn("reset gid table failed\n");
  1312. goto free;
  1313. }
  1314. gids = mailbox->buf;
  1315. memcpy(gids, gw->gids, sizeof(gw->gids));
  1316. if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
  1317. IB_LINK_LAYER_ETHERNET) {
  1318. err = mlx4_cmd(dev, mailbox->dma,
  1319. MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
  1320. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  1321. MLX4_CMD_TIME_CLASS_B,
  1322. MLX4_CMD_WRAPPED);
  1323. if (err)
  1324. pr_warn("set port %d command failed\n", gw->port);
  1325. }
  1326. mlx4_free_cmd_mailbox(dev, mailbox);
  1327. free:
  1328. kfree(gw);
  1329. }
  1330. static int update_gid_table(struct mlx4_ib_dev *dev, int port,
  1331. union ib_gid *gid, int clear,
  1332. int default_gid)
  1333. {
  1334. struct update_gid_work *work;
  1335. int i;
  1336. int need_update = 0;
  1337. int free = -1;
  1338. int found = -1;
  1339. int max_gids;
  1340. if (default_gid) {
  1341. free = 0;
  1342. } else {
  1343. max_gids = dev->dev->caps.gid_table_len[port];
  1344. for (i = 1; i < max_gids; ++i) {
  1345. if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
  1346. sizeof(*gid)))
  1347. found = i;
  1348. if (clear) {
  1349. if (found >= 0) {
  1350. need_update = 1;
  1351. dev->iboe.gid_table[port - 1][found] =
  1352. zgid;
  1353. break;
  1354. }
  1355. } else {
  1356. if (found >= 0)
  1357. break;
  1358. if (free < 0 &&
  1359. !memcmp(&dev->iboe.gid_table[port - 1][i],
  1360. &zgid, sizeof(*gid)))
  1361. free = i;
  1362. }
  1363. }
  1364. }
  1365. if (found == -1 && !clear && free >= 0) {
  1366. dev->iboe.gid_table[port - 1][free] = *gid;
  1367. need_update = 1;
  1368. }
  1369. if (!need_update)
  1370. return 0;
  1371. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1372. if (!work)
  1373. return -ENOMEM;
  1374. memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
  1375. INIT_WORK(&work->work, update_gids_task);
  1376. work->port = port;
  1377. work->dev = dev;
  1378. queue_work(wq, &work->work);
  1379. return 0;
  1380. }
  1381. static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
  1382. {
  1383. gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  1384. mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
  1385. }
  1386. static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
  1387. {
  1388. struct update_gid_work *work;
  1389. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1390. if (!work)
  1391. return -ENOMEM;
  1392. memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
  1393. memset(work->gids, 0, sizeof(work->gids));
  1394. INIT_WORK(&work->work, reset_gids_task);
  1395. work->dev = dev;
  1396. work->port = port;
  1397. queue_work(wq, &work->work);
  1398. return 0;
  1399. }
  1400. static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
  1401. struct mlx4_ib_dev *ibdev, union ib_gid *gid)
  1402. {
  1403. struct mlx4_ib_iboe *iboe;
  1404. int port = 0;
  1405. struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
  1406. rdma_vlan_dev_real_dev(event_netdev) :
  1407. event_netdev;
  1408. union ib_gid default_gid;
  1409. mlx4_make_default_gid(real_dev, &default_gid);
  1410. if (!memcmp(gid, &default_gid, sizeof(*gid)))
  1411. return 0;
  1412. if (event != NETDEV_DOWN && event != NETDEV_UP)
  1413. return 0;
  1414. if ((real_dev != event_netdev) &&
  1415. (event == NETDEV_DOWN) &&
  1416. rdma_link_local_addr((struct in6_addr *)gid))
  1417. return 0;
  1418. iboe = &ibdev->iboe;
  1419. spin_lock_bh(&iboe->lock);
  1420. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
  1421. if ((netif_is_bond_master(real_dev) &&
  1422. (real_dev == iboe->masters[port - 1])) ||
  1423. (!netif_is_bond_master(real_dev) &&
  1424. (real_dev == iboe->netdevs[port - 1])))
  1425. update_gid_table(ibdev, port, gid,
  1426. event == NETDEV_DOWN, 0);
  1427. spin_unlock_bh(&iboe->lock);
  1428. return 0;
  1429. }
  1430. static u8 mlx4_ib_get_dev_port(struct net_device *dev,
  1431. struct mlx4_ib_dev *ibdev)
  1432. {
  1433. u8 port = 0;
  1434. struct mlx4_ib_iboe *iboe;
  1435. struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
  1436. rdma_vlan_dev_real_dev(dev) : dev;
  1437. iboe = &ibdev->iboe;
  1438. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
  1439. if ((netif_is_bond_master(real_dev) &&
  1440. (real_dev == iboe->masters[port - 1])) ||
  1441. (!netif_is_bond_master(real_dev) &&
  1442. (real_dev == iboe->netdevs[port - 1])))
  1443. break;
  1444. if ((port == 0) || (port > ibdev->dev->caps.num_ports))
  1445. return 0;
  1446. else
  1447. return port;
  1448. }
  1449. static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
  1450. void *ptr)
  1451. {
  1452. struct mlx4_ib_dev *ibdev;
  1453. struct in_ifaddr *ifa = ptr;
  1454. union ib_gid gid;
  1455. struct net_device *event_netdev = ifa->ifa_dev->dev;
  1456. ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
  1457. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
  1458. mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
  1459. return NOTIFY_DONE;
  1460. }
  1461. #if IS_ENABLED(CONFIG_IPV6)
  1462. static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
  1463. void *ptr)
  1464. {
  1465. struct mlx4_ib_dev *ibdev;
  1466. struct inet6_ifaddr *ifa = ptr;
  1467. union ib_gid *gid = (union ib_gid *)&ifa->addr;
  1468. struct net_device *event_netdev = ifa->idev->dev;
  1469. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
  1470. mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
  1471. return NOTIFY_DONE;
  1472. }
  1473. #endif
  1474. #define MLX4_IB_INVALID_MAC ((u64)-1)
  1475. static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
  1476. struct net_device *dev,
  1477. int port)
  1478. {
  1479. u64 new_smac = 0;
  1480. u64 release_mac = MLX4_IB_INVALID_MAC;
  1481. struct mlx4_ib_qp *qp;
  1482. read_lock(&dev_base_lock);
  1483. new_smac = mlx4_mac_to_u64(dev->dev_addr);
  1484. read_unlock(&dev_base_lock);
  1485. atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
  1486. /* no need for update QP1 and mac registration in non-SRIOV */
  1487. if (!mlx4_is_mfunc(ibdev->dev))
  1488. return;
  1489. mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
  1490. qp = ibdev->qp1_proxy[port - 1];
  1491. if (qp) {
  1492. int new_smac_index;
  1493. u64 old_smac;
  1494. struct mlx4_update_qp_params update_params;
  1495. mutex_lock(&qp->mutex);
  1496. old_smac = qp->pri.smac;
  1497. if (new_smac == old_smac)
  1498. goto unlock;
  1499. new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
  1500. if (new_smac_index < 0)
  1501. goto unlock;
  1502. update_params.smac_index = new_smac_index;
  1503. if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
  1504. &update_params)) {
  1505. release_mac = new_smac;
  1506. goto unlock;
  1507. }
  1508. /* if old port was zero, no mac was yet registered for this QP */
  1509. if (qp->pri.smac_port)
  1510. release_mac = old_smac;
  1511. qp->pri.smac = new_smac;
  1512. qp->pri.smac_port = port;
  1513. qp->pri.smac_index = new_smac_index;
  1514. }
  1515. unlock:
  1516. if (release_mac != MLX4_IB_INVALID_MAC)
  1517. mlx4_unregister_mac(ibdev->dev, port, release_mac);
  1518. if (qp)
  1519. mutex_unlock(&qp->mutex);
  1520. mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
  1521. }
  1522. static void mlx4_ib_get_dev_addr(struct net_device *dev,
  1523. struct mlx4_ib_dev *ibdev, u8 port)
  1524. {
  1525. struct in_device *in_dev;
  1526. #if IS_ENABLED(CONFIG_IPV6)
  1527. struct inet6_dev *in6_dev;
  1528. union ib_gid *pgid;
  1529. struct inet6_ifaddr *ifp;
  1530. union ib_gid default_gid;
  1531. #endif
  1532. union ib_gid gid;
  1533. if ((port == 0) || (port > ibdev->dev->caps.num_ports))
  1534. return;
  1535. /* IPv4 gids */
  1536. in_dev = in_dev_get(dev);
  1537. if (in_dev) {
  1538. for_ifa(in_dev) {
  1539. /*ifa->ifa_address;*/
  1540. ipv6_addr_set_v4mapped(ifa->ifa_address,
  1541. (struct in6_addr *)&gid);
  1542. update_gid_table(ibdev, port, &gid, 0, 0);
  1543. }
  1544. endfor_ifa(in_dev);
  1545. in_dev_put(in_dev);
  1546. }
  1547. #if IS_ENABLED(CONFIG_IPV6)
  1548. mlx4_make_default_gid(dev, &default_gid);
  1549. /* IPv6 gids */
  1550. in6_dev = in6_dev_get(dev);
  1551. if (in6_dev) {
  1552. read_lock_bh(&in6_dev->lock);
  1553. list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
  1554. pgid = (union ib_gid *)&ifp->addr;
  1555. if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
  1556. continue;
  1557. update_gid_table(ibdev, port, pgid, 0, 0);
  1558. }
  1559. read_unlock_bh(&in6_dev->lock);
  1560. in6_dev_put(in6_dev);
  1561. }
  1562. #endif
  1563. }
  1564. static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
  1565. struct net_device *dev, u8 port)
  1566. {
  1567. union ib_gid gid;
  1568. mlx4_make_default_gid(dev, &gid);
  1569. update_gid_table(ibdev, port, &gid, 0, 1);
  1570. }
  1571. static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
  1572. {
  1573. struct net_device *dev;
  1574. struct mlx4_ib_iboe *iboe = &ibdev->iboe;
  1575. int i;
  1576. int err = 0;
  1577. for (i = 1; i <= ibdev->num_ports; ++i) {
  1578. if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
  1579. IB_LINK_LAYER_ETHERNET) {
  1580. err = reset_gid_table(ibdev, i);
  1581. if (err)
  1582. goto out;
  1583. }
  1584. }
  1585. read_lock(&dev_base_lock);
  1586. spin_lock_bh(&iboe->lock);
  1587. for_each_netdev(&init_net, dev) {
  1588. u8 port = mlx4_ib_get_dev_port(dev, ibdev);
  1589. /* port will be non-zero only for ETH ports */
  1590. if (port) {
  1591. mlx4_ib_set_default_gid(ibdev, dev, port);
  1592. mlx4_ib_get_dev_addr(dev, ibdev, port);
  1593. }
  1594. }
  1595. spin_unlock_bh(&iboe->lock);
  1596. read_unlock(&dev_base_lock);
  1597. out:
  1598. return err;
  1599. }
  1600. static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
  1601. struct net_device *dev,
  1602. unsigned long event)
  1603. {
  1604. struct mlx4_ib_iboe *iboe;
  1605. int update_qps_port = -1;
  1606. int port;
  1607. iboe = &ibdev->iboe;
  1608. spin_lock_bh(&iboe->lock);
  1609. mlx4_foreach_ib_transport_port(port, ibdev->dev) {
  1610. enum ib_port_state port_state = IB_PORT_NOP;
  1611. struct net_device *old_master = iboe->masters[port - 1];
  1612. struct net_device *curr_netdev;
  1613. struct net_device *curr_master;
  1614. iboe->netdevs[port - 1] =
  1615. mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
  1616. if (iboe->netdevs[port - 1])
  1617. mlx4_ib_set_default_gid(ibdev,
  1618. iboe->netdevs[port - 1], port);
  1619. curr_netdev = iboe->netdevs[port - 1];
  1620. if (iboe->netdevs[port - 1] &&
  1621. netif_is_bond_slave(iboe->netdevs[port - 1])) {
  1622. iboe->masters[port - 1] = netdev_master_upper_dev_get(
  1623. iboe->netdevs[port - 1]);
  1624. } else {
  1625. iboe->masters[port - 1] = NULL;
  1626. }
  1627. curr_master = iboe->masters[port - 1];
  1628. if (dev == iboe->netdevs[port - 1] &&
  1629. (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
  1630. event == NETDEV_UP || event == NETDEV_CHANGE))
  1631. update_qps_port = port;
  1632. if (curr_netdev) {
  1633. port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
  1634. IB_PORT_ACTIVE : IB_PORT_DOWN;
  1635. mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
  1636. if (curr_master) {
  1637. /* if using bonding/team and a slave port is down, we
  1638. * don't want the bond IP based gids in the table since
  1639. * flows that select port by gid may get the down port.
  1640. */
  1641. if (port_state == IB_PORT_DOWN &&
  1642. !mlx4_is_bonded(ibdev->dev)) {
  1643. reset_gid_table(ibdev, port);
  1644. mlx4_ib_set_default_gid(ibdev,
  1645. curr_netdev,
  1646. port);
  1647. } else {
  1648. /* gids from the upper dev (bond/team)
  1649. * should appear in port's gid table
  1650. */
  1651. mlx4_ib_get_dev_addr(curr_master,
  1652. ibdev, port);
  1653. }
  1654. }
  1655. /* if bonding is used it is possible that we add it to
  1656. * masters only after IP address is assigned to the
  1657. * net bonding interface.
  1658. */
  1659. if (curr_master && (old_master != curr_master)) {
  1660. reset_gid_table(ibdev, port);
  1661. mlx4_ib_set_default_gid(ibdev,
  1662. curr_netdev, port);
  1663. mlx4_ib_get_dev_addr(curr_master, ibdev, port);
  1664. }
  1665. if (!curr_master && (old_master != curr_master)) {
  1666. reset_gid_table(ibdev, port);
  1667. mlx4_ib_set_default_gid(ibdev,
  1668. curr_netdev, port);
  1669. mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
  1670. }
  1671. } else {
  1672. reset_gid_table(ibdev, port);
  1673. }
  1674. }
  1675. spin_unlock_bh(&iboe->lock);
  1676. if (update_qps_port > 0)
  1677. mlx4_ib_update_qps(ibdev, dev, update_qps_port);
  1678. }
  1679. static int mlx4_ib_netdev_event(struct notifier_block *this,
  1680. unsigned long event, void *ptr)
  1681. {
  1682. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1683. struct mlx4_ib_dev *ibdev;
  1684. if (!net_eq(dev_net(dev), &init_net))
  1685. return NOTIFY_DONE;
  1686. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
  1687. mlx4_ib_scan_netdevs(ibdev, dev, event);
  1688. return NOTIFY_DONE;
  1689. }
  1690. static void init_pkeys(struct mlx4_ib_dev *ibdev)
  1691. {
  1692. int port;
  1693. int slave;
  1694. int i;
  1695. if (mlx4_is_master(ibdev->dev)) {
  1696. for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
  1697. ++slave) {
  1698. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
  1699. for (i = 0;
  1700. i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
  1701. ++i) {
  1702. ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
  1703. /* master has the identity virt2phys pkey mapping */
  1704. (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
  1705. ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
  1706. mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
  1707. ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
  1708. }
  1709. }
  1710. }
  1711. /* initialize pkey cache */
  1712. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
  1713. for (i = 0;
  1714. i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
  1715. ++i)
  1716. ibdev->pkeys.phys_pkey_cache[port-1][i] =
  1717. (i) ? 0 : 0xFFFF;
  1718. }
  1719. }
  1720. }
  1721. static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  1722. {
  1723. char name[80];
  1724. int eq_per_port = 0;
  1725. int added_eqs = 0;
  1726. int total_eqs = 0;
  1727. int i, j, eq;
  1728. /* Legacy mode or comp_pool is not large enough */
  1729. if (dev->caps.comp_pool == 0 ||
  1730. dev->caps.num_ports > dev->caps.comp_pool)
  1731. return;
  1732. eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
  1733. /* Init eq table */
  1734. added_eqs = 0;
  1735. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  1736. added_eqs += eq_per_port;
  1737. total_eqs = dev->caps.num_comp_vectors + added_eqs;
  1738. ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
  1739. if (!ibdev->eq_table)
  1740. return;
  1741. ibdev->eq_added = added_eqs;
  1742. eq = 0;
  1743. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
  1744. for (j = 0; j < eq_per_port; j++) {
  1745. snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
  1746. i, j, dev->persist->pdev->bus->name);
  1747. /* Set IRQ for specific name (per ring) */
  1748. if (mlx4_assign_eq(dev, name, NULL,
  1749. &ibdev->eq_table[eq])) {
  1750. /* Use legacy (same as mlx4_en driver) */
  1751. pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
  1752. ibdev->eq_table[eq] =
  1753. (eq % dev->caps.num_comp_vectors);
  1754. }
  1755. eq++;
  1756. }
  1757. }
  1758. /* Fill the reset of the vector with legacy EQ */
  1759. for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
  1760. ibdev->eq_table[eq++] = i;
  1761. /* Advertise the new number of EQs to clients */
  1762. ibdev->ib_dev.num_comp_vectors = total_eqs;
  1763. }
  1764. static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  1765. {
  1766. int i;
  1767. /* no additional eqs were added */
  1768. if (!ibdev->eq_table)
  1769. return;
  1770. /* Reset the advertised EQ number */
  1771. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  1772. /* Free only the added eqs */
  1773. for (i = 0; i < ibdev->eq_added; i++) {
  1774. /* Don't free legacy eqs if used */
  1775. if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
  1776. continue;
  1777. mlx4_release_eq(dev, ibdev->eq_table[i]);
  1778. }
  1779. kfree(ibdev->eq_table);
  1780. }
  1781. static void *mlx4_ib_add(struct mlx4_dev *dev)
  1782. {
  1783. struct mlx4_ib_dev *ibdev;
  1784. int num_ports = 0;
  1785. int i, j;
  1786. int err;
  1787. struct mlx4_ib_iboe *iboe;
  1788. int ib_num_ports = 0;
  1789. int num_req_counters;
  1790. pr_info_once("%s", mlx4_ib_version);
  1791. num_ports = 0;
  1792. mlx4_foreach_ib_transport_port(i, dev)
  1793. num_ports++;
  1794. /* No point in registering a device with no ports... */
  1795. if (num_ports == 0)
  1796. return NULL;
  1797. ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
  1798. if (!ibdev) {
  1799. dev_err(&dev->persist->pdev->dev,
  1800. "Device struct alloc failed\n");
  1801. return NULL;
  1802. }
  1803. iboe = &ibdev->iboe;
  1804. if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
  1805. goto err_dealloc;
  1806. if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
  1807. goto err_pd;
  1808. ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
  1809. PAGE_SIZE);
  1810. if (!ibdev->uar_map)
  1811. goto err_uar;
  1812. MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
  1813. ibdev->dev = dev;
  1814. ibdev->bond_next_port = 0;
  1815. strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
  1816. ibdev->ib_dev.owner = THIS_MODULE;
  1817. ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
  1818. ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
  1819. ibdev->num_ports = num_ports;
  1820. ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
  1821. 1 : ibdev->num_ports;
  1822. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  1823. ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
  1824. if (dev->caps.userspace_caps)
  1825. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
  1826. else
  1827. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
  1828. ibdev->ib_dev.uverbs_cmd_mask =
  1829. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1830. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1831. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1832. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1833. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1834. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1835. (1ull << IB_USER_VERBS_CMD_REREG_MR) |
  1836. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1837. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1838. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1839. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1840. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1841. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1842. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1843. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1844. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1845. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1846. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1847. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1848. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1849. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1850. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1851. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  1852. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  1853. ibdev->ib_dev.query_device = mlx4_ib_query_device;
  1854. ibdev->ib_dev.query_port = mlx4_ib_query_port;
  1855. ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
  1856. ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
  1857. ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
  1858. ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
  1859. ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
  1860. ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
  1861. ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
  1862. ibdev->ib_dev.mmap = mlx4_ib_mmap;
  1863. ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
  1864. ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
  1865. ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
  1866. ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
  1867. ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
  1868. ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
  1869. ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
  1870. ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
  1871. ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
  1872. ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
  1873. ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
  1874. ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
  1875. ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
  1876. ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
  1877. ibdev->ib_dev.post_send = mlx4_ib_post_send;
  1878. ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
  1879. ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
  1880. ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
  1881. ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
  1882. ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
  1883. ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
  1884. ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
  1885. ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
  1886. ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
  1887. ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
  1888. ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
  1889. ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
  1890. ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
  1891. ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
  1892. ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
  1893. ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
  1894. ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
  1895. if (!mlx4_is_slave(ibdev->dev)) {
  1896. ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
  1897. ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
  1898. ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
  1899. ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
  1900. }
  1901. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
  1902. dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
  1903. ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
  1904. ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
  1905. ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
  1906. ibdev->ib_dev.uverbs_cmd_mask |=
  1907. (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
  1908. (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
  1909. }
  1910. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
  1911. ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
  1912. ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
  1913. ibdev->ib_dev.uverbs_cmd_mask |=
  1914. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  1915. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  1916. }
  1917. if (check_flow_steering_support(dev)) {
  1918. ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
  1919. ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
  1920. ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
  1921. ibdev->ib_dev.uverbs_ex_cmd_mask |=
  1922. (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
  1923. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
  1924. }
  1925. mlx4_ib_alloc_eqs(dev, ibdev);
  1926. spin_lock_init(&iboe->lock);
  1927. if (init_node_data(ibdev))
  1928. goto err_map;
  1929. num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
  1930. for (i = 0; i < num_req_counters; ++i) {
  1931. mutex_init(&ibdev->qp1_proxy_lock[i]);
  1932. if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
  1933. IB_LINK_LAYER_ETHERNET) {
  1934. err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
  1935. if (err)
  1936. ibdev->counters[i] = -1;
  1937. } else {
  1938. ibdev->counters[i] = -1;
  1939. }
  1940. }
  1941. if (mlx4_is_bonded(dev))
  1942. for (i = 1; i < ibdev->num_ports ; ++i)
  1943. ibdev->counters[i] = ibdev->counters[0];
  1944. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  1945. ib_num_ports++;
  1946. spin_lock_init(&ibdev->sm_lock);
  1947. mutex_init(&ibdev->cap_mask_mutex);
  1948. INIT_LIST_HEAD(&ibdev->qp_list);
  1949. spin_lock_init(&ibdev->reset_flow_resource_lock);
  1950. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
  1951. ib_num_ports) {
  1952. ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
  1953. err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
  1954. MLX4_IB_UC_STEER_QPN_ALIGN,
  1955. &ibdev->steer_qpn_base, 0);
  1956. if (err)
  1957. goto err_counter;
  1958. ibdev->ib_uc_qpns_bitmap =
  1959. kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
  1960. sizeof(long),
  1961. GFP_KERNEL);
  1962. if (!ibdev->ib_uc_qpns_bitmap) {
  1963. dev_err(&dev->persist->pdev->dev,
  1964. "bit map alloc failed\n");
  1965. goto err_steer_qp_release;
  1966. }
  1967. bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
  1968. err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
  1969. dev, ibdev->steer_qpn_base,
  1970. ibdev->steer_qpn_base +
  1971. ibdev->steer_qpn_count - 1);
  1972. if (err)
  1973. goto err_steer_free_bitmap;
  1974. }
  1975. for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
  1976. atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
  1977. if (ib_register_device(&ibdev->ib_dev, NULL))
  1978. goto err_steer_free_bitmap;
  1979. if (mlx4_ib_mad_init(ibdev))
  1980. goto err_reg;
  1981. if (mlx4_ib_init_sriov(ibdev))
  1982. goto err_mad;
  1983. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
  1984. if (!iboe->nb.notifier_call) {
  1985. iboe->nb.notifier_call = mlx4_ib_netdev_event;
  1986. err = register_netdevice_notifier(&iboe->nb);
  1987. if (err) {
  1988. iboe->nb.notifier_call = NULL;
  1989. goto err_notif;
  1990. }
  1991. }
  1992. if (!iboe->nb_inet.notifier_call) {
  1993. iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
  1994. err = register_inetaddr_notifier(&iboe->nb_inet);
  1995. if (err) {
  1996. iboe->nb_inet.notifier_call = NULL;
  1997. goto err_notif;
  1998. }
  1999. }
  2000. #if IS_ENABLED(CONFIG_IPV6)
  2001. if (!iboe->nb_inet6.notifier_call) {
  2002. iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
  2003. err = register_inet6addr_notifier(&iboe->nb_inet6);
  2004. if (err) {
  2005. iboe->nb_inet6.notifier_call = NULL;
  2006. goto err_notif;
  2007. }
  2008. }
  2009. #endif
  2010. if (mlx4_ib_init_gid_table(ibdev))
  2011. goto err_notif;
  2012. }
  2013. for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
  2014. if (device_create_file(&ibdev->ib_dev.dev,
  2015. mlx4_class_attributes[j]))
  2016. goto err_notif;
  2017. }
  2018. ibdev->ib_active = true;
  2019. if (mlx4_is_mfunc(ibdev->dev))
  2020. init_pkeys(ibdev);
  2021. /* create paravirt contexts for any VFs which are active */
  2022. if (mlx4_is_master(ibdev->dev)) {
  2023. for (j = 0; j < MLX4_MFUNC_MAX; j++) {
  2024. if (j == mlx4_master_func_num(ibdev->dev))
  2025. continue;
  2026. if (mlx4_is_slave_active(ibdev->dev, j))
  2027. do_slave_init(ibdev, j, 1);
  2028. }
  2029. }
  2030. return ibdev;
  2031. err_notif:
  2032. if (ibdev->iboe.nb.notifier_call) {
  2033. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  2034. pr_warn("failure unregistering notifier\n");
  2035. ibdev->iboe.nb.notifier_call = NULL;
  2036. }
  2037. if (ibdev->iboe.nb_inet.notifier_call) {
  2038. if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
  2039. pr_warn("failure unregistering notifier\n");
  2040. ibdev->iboe.nb_inet.notifier_call = NULL;
  2041. }
  2042. #if IS_ENABLED(CONFIG_IPV6)
  2043. if (ibdev->iboe.nb_inet6.notifier_call) {
  2044. if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
  2045. pr_warn("failure unregistering notifier\n");
  2046. ibdev->iboe.nb_inet6.notifier_call = NULL;
  2047. }
  2048. #endif
  2049. flush_workqueue(wq);
  2050. mlx4_ib_close_sriov(ibdev);
  2051. err_mad:
  2052. mlx4_ib_mad_cleanup(ibdev);
  2053. err_reg:
  2054. ib_unregister_device(&ibdev->ib_dev);
  2055. err_steer_free_bitmap:
  2056. kfree(ibdev->ib_uc_qpns_bitmap);
  2057. err_steer_qp_release:
  2058. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
  2059. mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
  2060. ibdev->steer_qpn_count);
  2061. err_counter:
  2062. for (; i; --i)
  2063. if (ibdev->counters[i - 1] != -1)
  2064. mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
  2065. err_map:
  2066. iounmap(ibdev->uar_map);
  2067. err_uar:
  2068. mlx4_uar_free(dev, &ibdev->priv_uar);
  2069. err_pd:
  2070. mlx4_pd_free(dev, ibdev->priv_pdn);
  2071. err_dealloc:
  2072. ib_dealloc_device(&ibdev->ib_dev);
  2073. return NULL;
  2074. }
  2075. int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
  2076. {
  2077. int offset;
  2078. WARN_ON(!dev->ib_uc_qpns_bitmap);
  2079. offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
  2080. dev->steer_qpn_count,
  2081. get_count_order(count));
  2082. if (offset < 0)
  2083. return offset;
  2084. *qpn = dev->steer_qpn_base + offset;
  2085. return 0;
  2086. }
  2087. void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
  2088. {
  2089. if (!qpn ||
  2090. dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
  2091. return;
  2092. BUG_ON(qpn < dev->steer_qpn_base);
  2093. bitmap_release_region(dev->ib_uc_qpns_bitmap,
  2094. qpn - dev->steer_qpn_base,
  2095. get_count_order(count));
  2096. }
  2097. int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  2098. int is_attach)
  2099. {
  2100. int err;
  2101. size_t flow_size;
  2102. struct ib_flow_attr *flow = NULL;
  2103. struct ib_flow_spec_ib *ib_spec;
  2104. if (is_attach) {
  2105. flow_size = sizeof(struct ib_flow_attr) +
  2106. sizeof(struct ib_flow_spec_ib);
  2107. flow = kzalloc(flow_size, GFP_KERNEL);
  2108. if (!flow)
  2109. return -ENOMEM;
  2110. flow->port = mqp->port;
  2111. flow->num_of_specs = 1;
  2112. flow->size = flow_size;
  2113. ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
  2114. ib_spec->type = IB_FLOW_SPEC_IB;
  2115. ib_spec->size = sizeof(struct ib_flow_spec_ib);
  2116. /* Add an empty rule for IB L2 */
  2117. memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
  2118. err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
  2119. IB_FLOW_DOMAIN_NIC,
  2120. MLX4_FS_REGULAR,
  2121. &mqp->reg_id);
  2122. } else {
  2123. err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
  2124. }
  2125. kfree(flow);
  2126. return err;
  2127. }
  2128. static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
  2129. {
  2130. struct mlx4_ib_dev *ibdev = ibdev_ptr;
  2131. int p;
  2132. ibdev->ib_active = false;
  2133. flush_workqueue(wq);
  2134. mlx4_ib_close_sriov(ibdev);
  2135. mlx4_ib_mad_cleanup(ibdev);
  2136. ib_unregister_device(&ibdev->ib_dev);
  2137. if (ibdev->iboe.nb.notifier_call) {
  2138. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  2139. pr_warn("failure unregistering notifier\n");
  2140. ibdev->iboe.nb.notifier_call = NULL;
  2141. }
  2142. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
  2143. mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
  2144. ibdev->steer_qpn_count);
  2145. kfree(ibdev->ib_uc_qpns_bitmap);
  2146. }
  2147. if (ibdev->iboe.nb_inet.notifier_call) {
  2148. if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
  2149. pr_warn("failure unregistering notifier\n");
  2150. ibdev->iboe.nb_inet.notifier_call = NULL;
  2151. }
  2152. #if IS_ENABLED(CONFIG_IPV6)
  2153. if (ibdev->iboe.nb_inet6.notifier_call) {
  2154. if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
  2155. pr_warn("failure unregistering notifier\n");
  2156. ibdev->iboe.nb_inet6.notifier_call = NULL;
  2157. }
  2158. #endif
  2159. iounmap(ibdev->uar_map);
  2160. for (p = 0; p < ibdev->num_ports; ++p)
  2161. if (ibdev->counters[p] != -1)
  2162. mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
  2163. mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
  2164. mlx4_CLOSE_PORT(dev, p);
  2165. mlx4_ib_free_eqs(dev, ibdev);
  2166. mlx4_uar_free(dev, &ibdev->priv_uar);
  2167. mlx4_pd_free(dev, ibdev->priv_pdn);
  2168. ib_dealloc_device(&ibdev->ib_dev);
  2169. }
  2170. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
  2171. {
  2172. struct mlx4_ib_demux_work **dm = NULL;
  2173. struct mlx4_dev *dev = ibdev->dev;
  2174. int i;
  2175. unsigned long flags;
  2176. struct mlx4_active_ports actv_ports;
  2177. unsigned int ports;
  2178. unsigned int first_port;
  2179. if (!mlx4_is_master(dev))
  2180. return;
  2181. actv_ports = mlx4_get_active_ports(dev, slave);
  2182. ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
  2183. first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
  2184. dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
  2185. if (!dm) {
  2186. pr_err("failed to allocate memory for tunneling qp update\n");
  2187. goto out;
  2188. }
  2189. for (i = 0; i < ports; i++) {
  2190. dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
  2191. if (!dm[i]) {
  2192. pr_err("failed to allocate memory for tunneling qp update work struct\n");
  2193. for (i = 0; i < dev->caps.num_ports; i++) {
  2194. if (dm[i])
  2195. kfree(dm[i]);
  2196. }
  2197. goto out;
  2198. }
  2199. }
  2200. /* initialize or tear down tunnel QPs for the slave */
  2201. for (i = 0; i < ports; i++) {
  2202. INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
  2203. dm[i]->port = first_port + i + 1;
  2204. dm[i]->slave = slave;
  2205. dm[i]->do_init = do_init;
  2206. dm[i]->dev = ibdev;
  2207. spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
  2208. if (!ibdev->sriov.is_going_down)
  2209. queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
  2210. spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
  2211. }
  2212. out:
  2213. kfree(dm);
  2214. return;
  2215. }
  2216. static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
  2217. {
  2218. struct mlx4_ib_qp *mqp;
  2219. unsigned long flags_qp;
  2220. unsigned long flags_cq;
  2221. struct mlx4_ib_cq *send_mcq, *recv_mcq;
  2222. struct list_head cq_notify_list;
  2223. struct mlx4_cq *mcq;
  2224. unsigned long flags;
  2225. pr_warn("mlx4_ib_handle_catas_error was started\n");
  2226. INIT_LIST_HEAD(&cq_notify_list);
  2227. /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
  2228. spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
  2229. list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
  2230. spin_lock_irqsave(&mqp->sq.lock, flags_qp);
  2231. if (mqp->sq.tail != mqp->sq.head) {
  2232. send_mcq = to_mcq(mqp->ibqp.send_cq);
  2233. spin_lock_irqsave(&send_mcq->lock, flags_cq);
  2234. if (send_mcq->mcq.comp &&
  2235. mqp->ibqp.send_cq->comp_handler) {
  2236. if (!send_mcq->mcq.reset_notify_added) {
  2237. send_mcq->mcq.reset_notify_added = 1;
  2238. list_add_tail(&send_mcq->mcq.reset_notify,
  2239. &cq_notify_list);
  2240. }
  2241. }
  2242. spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
  2243. }
  2244. spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
  2245. /* Now, handle the QP's receive queue */
  2246. spin_lock_irqsave(&mqp->rq.lock, flags_qp);
  2247. /* no handling is needed for SRQ */
  2248. if (!mqp->ibqp.srq) {
  2249. if (mqp->rq.tail != mqp->rq.head) {
  2250. recv_mcq = to_mcq(mqp->ibqp.recv_cq);
  2251. spin_lock_irqsave(&recv_mcq->lock, flags_cq);
  2252. if (recv_mcq->mcq.comp &&
  2253. mqp->ibqp.recv_cq->comp_handler) {
  2254. if (!recv_mcq->mcq.reset_notify_added) {
  2255. recv_mcq->mcq.reset_notify_added = 1;
  2256. list_add_tail(&recv_mcq->mcq.reset_notify,
  2257. &cq_notify_list);
  2258. }
  2259. }
  2260. spin_unlock_irqrestore(&recv_mcq->lock,
  2261. flags_cq);
  2262. }
  2263. }
  2264. spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
  2265. }
  2266. list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
  2267. mcq->comp(mcq);
  2268. }
  2269. spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
  2270. pr_warn("mlx4_ib_handle_catas_error ended\n");
  2271. }
  2272. static void handle_bonded_port_state_event(struct work_struct *work)
  2273. {
  2274. struct ib_event_work *ew =
  2275. container_of(work, struct ib_event_work, work);
  2276. struct mlx4_ib_dev *ibdev = ew->ib_dev;
  2277. enum ib_port_state bonded_port_state = IB_PORT_NOP;
  2278. int i;
  2279. struct ib_event ibev;
  2280. kfree(ew);
  2281. spin_lock_bh(&ibdev->iboe.lock);
  2282. for (i = 0; i < MLX4_MAX_PORTS; ++i) {
  2283. struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
  2284. enum ib_port_state curr_port_state;
  2285. if (!curr_netdev)
  2286. continue;
  2287. curr_port_state =
  2288. (netif_running(curr_netdev) &&
  2289. netif_carrier_ok(curr_netdev)) ?
  2290. IB_PORT_ACTIVE : IB_PORT_DOWN;
  2291. bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
  2292. curr_port_state : IB_PORT_ACTIVE;
  2293. }
  2294. spin_unlock_bh(&ibdev->iboe.lock);
  2295. ibev.device = &ibdev->ib_dev;
  2296. ibev.element.port_num = 1;
  2297. ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
  2298. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  2299. ib_dispatch_event(&ibev);
  2300. }
  2301. static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
  2302. enum mlx4_dev_event event, unsigned long param)
  2303. {
  2304. struct ib_event ibev;
  2305. struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
  2306. struct mlx4_eqe *eqe = NULL;
  2307. struct ib_event_work *ew;
  2308. int p = 0;
  2309. if (mlx4_is_bonded(dev) &&
  2310. ((event == MLX4_DEV_EVENT_PORT_UP) ||
  2311. (event == MLX4_DEV_EVENT_PORT_DOWN))) {
  2312. ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
  2313. if (!ew)
  2314. return;
  2315. INIT_WORK(&ew->work, handle_bonded_port_state_event);
  2316. ew->ib_dev = ibdev;
  2317. queue_work(wq, &ew->work);
  2318. return;
  2319. }
  2320. if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
  2321. eqe = (struct mlx4_eqe *)param;
  2322. else
  2323. p = (int) param;
  2324. switch (event) {
  2325. case MLX4_DEV_EVENT_PORT_UP:
  2326. if (p > ibdev->num_ports)
  2327. return;
  2328. if (mlx4_is_master(dev) &&
  2329. rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
  2330. IB_LINK_LAYER_INFINIBAND) {
  2331. mlx4_ib_invalidate_all_guid_record(ibdev, p);
  2332. }
  2333. ibev.event = IB_EVENT_PORT_ACTIVE;
  2334. break;
  2335. case MLX4_DEV_EVENT_PORT_DOWN:
  2336. if (p > ibdev->num_ports)
  2337. return;
  2338. ibev.event = IB_EVENT_PORT_ERR;
  2339. break;
  2340. case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
  2341. ibdev->ib_active = false;
  2342. ibev.event = IB_EVENT_DEVICE_FATAL;
  2343. mlx4_ib_handle_catas_error(ibdev);
  2344. break;
  2345. case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
  2346. ew = kmalloc(sizeof *ew, GFP_ATOMIC);
  2347. if (!ew) {
  2348. pr_err("failed to allocate memory for events work\n");
  2349. break;
  2350. }
  2351. INIT_WORK(&ew->work, handle_port_mgmt_change_event);
  2352. memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
  2353. ew->ib_dev = ibdev;
  2354. /* need to queue only for port owner, which uses GEN_EQE */
  2355. if (mlx4_is_master(dev))
  2356. queue_work(wq, &ew->work);
  2357. else
  2358. handle_port_mgmt_change_event(&ew->work);
  2359. return;
  2360. case MLX4_DEV_EVENT_SLAVE_INIT:
  2361. /* here, p is the slave id */
  2362. do_slave_init(ibdev, p, 1);
  2363. if (mlx4_is_master(dev)) {
  2364. int i;
  2365. for (i = 1; i <= ibdev->num_ports; i++) {
  2366. if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
  2367. == IB_LINK_LAYER_INFINIBAND)
  2368. mlx4_ib_slave_alias_guid_event(ibdev,
  2369. p, i,
  2370. 1);
  2371. }
  2372. }
  2373. return;
  2374. case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
  2375. if (mlx4_is_master(dev)) {
  2376. int i;
  2377. for (i = 1; i <= ibdev->num_ports; i++) {
  2378. if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
  2379. == IB_LINK_LAYER_INFINIBAND)
  2380. mlx4_ib_slave_alias_guid_event(ibdev,
  2381. p, i,
  2382. 0);
  2383. }
  2384. }
  2385. /* here, p is the slave id */
  2386. do_slave_init(ibdev, p, 0);
  2387. return;
  2388. default:
  2389. return;
  2390. }
  2391. ibev.device = ibdev_ptr;
  2392. ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
  2393. ib_dispatch_event(&ibev);
  2394. }
  2395. static struct mlx4_interface mlx4_ib_interface = {
  2396. .add = mlx4_ib_add,
  2397. .remove = mlx4_ib_remove,
  2398. .event = mlx4_ib_event,
  2399. .protocol = MLX4_PROT_IB_IPV6,
  2400. .flags = MLX4_INTFF_BONDING
  2401. };
  2402. static int __init mlx4_ib_init(void)
  2403. {
  2404. int err;
  2405. wq = create_singlethread_workqueue("mlx4_ib");
  2406. if (!wq)
  2407. return -ENOMEM;
  2408. err = mlx4_ib_mcg_init();
  2409. if (err)
  2410. goto clean_wq;
  2411. err = mlx4_register_interface(&mlx4_ib_interface);
  2412. if (err)
  2413. goto clean_mcg;
  2414. return 0;
  2415. clean_mcg:
  2416. mlx4_ib_mcg_destroy();
  2417. clean_wq:
  2418. destroy_workqueue(wq);
  2419. return err;
  2420. }
  2421. static void __exit mlx4_ib_cleanup(void)
  2422. {
  2423. mlx4_unregister_interface(&mlx4_ib_interface);
  2424. mlx4_ib_mcg_destroy();
  2425. destroy_workqueue(wq);
  2426. }
  2427. module_init(mlx4_ib_init);
  2428. module_exit(mlx4_ib_cleanup);