main.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/ipv6.h>
  42. #include <net/addrconf.h>
  43. #include <rdma/ib_smi.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/ib_addr.h>
  46. #include <linux/mlx4/driver.h>
  47. #include <linux/mlx4/cmd.h>
  48. #include <linux/mlx4/qp.h>
  49. #include "mlx4_ib.h"
  50. #include "user.h"
  51. #define DRV_NAME MLX4_IB_DRV_NAME
  52. #define DRV_VERSION "2.2-1"
  53. #define DRV_RELDATE "Feb 2014"
  54. #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
  55. #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
  56. #define MLX4_IB_CARD_REV_A0 0xA0
  57. MODULE_AUTHOR("Roland Dreier");
  58. MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
  59. MODULE_LICENSE("Dual BSD/GPL");
  60. MODULE_VERSION(DRV_VERSION);
  61. int mlx4_ib_sm_guid_assign = 1;
  62. module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
  63. MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
  64. static const char mlx4_ib_version[] =
  65. DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
  66. DRV_VERSION " (" DRV_RELDATE ")\n";
  67. struct update_gid_work {
  68. struct work_struct work;
  69. union ib_gid gids[128];
  70. struct mlx4_ib_dev *dev;
  71. int port;
  72. };
  73. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
  74. static struct workqueue_struct *wq;
  75. static void init_query_mad(struct ib_smp *mad)
  76. {
  77. mad->base_version = 1;
  78. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  79. mad->class_version = 1;
  80. mad->method = IB_MGMT_METHOD_GET;
  81. }
  82. static union ib_gid zgid;
  83. static int check_flow_steering_support(struct mlx4_dev *dev)
  84. {
  85. int eth_num_ports = 0;
  86. int ib_num_ports = 0;
  87. int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
  88. if (dmfs) {
  89. int i;
  90. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  91. eth_num_ports++;
  92. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  93. ib_num_ports++;
  94. dmfs &= (!ib_num_ports ||
  95. (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
  96. (!eth_num_ports ||
  97. (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
  98. if (ib_num_ports && mlx4_is_mfunc(dev)) {
  99. pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
  100. dmfs = 0;
  101. }
  102. }
  103. return dmfs;
  104. }
  105. static int num_ib_ports(struct mlx4_dev *dev)
  106. {
  107. int ib_ports = 0;
  108. int i;
  109. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  110. ib_ports++;
  111. return ib_ports;
  112. }
  113. static int mlx4_ib_query_device(struct ib_device *ibdev,
  114. struct ib_device_attr *props)
  115. {
  116. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  117. struct ib_smp *in_mad = NULL;
  118. struct ib_smp *out_mad = NULL;
  119. int err = -ENOMEM;
  120. int have_ib_ports;
  121. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  122. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  123. if (!in_mad || !out_mad)
  124. goto out;
  125. init_query_mad(in_mad);
  126. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  127. err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
  128. 1, NULL, NULL, in_mad, out_mad);
  129. if (err)
  130. goto out;
  131. memset(props, 0, sizeof *props);
  132. have_ib_ports = num_ib_ports(dev->dev);
  133. props->fw_ver = dev->dev->caps.fw_ver;
  134. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  135. IB_DEVICE_PORT_ACTIVE_EVENT |
  136. IB_DEVICE_SYS_IMAGE_GUID |
  137. IB_DEVICE_RC_RNR_NAK_GEN |
  138. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  139. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
  140. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  141. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
  142. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  143. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
  144. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  145. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
  146. props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
  147. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
  148. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  149. if (dev->dev->caps.max_gso_sz &&
  150. (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
  151. (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
  152. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  153. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
  154. props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
  155. if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
  156. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
  157. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
  158. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  159. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
  160. props->device_cap_flags |= IB_DEVICE_XRC;
  161. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
  162. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
  163. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
  164. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
  165. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
  166. else
  167. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
  168. if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
  169. props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
  170. }
  171. props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  172. 0xffffff;
  173. props->vendor_part_id = dev->dev->pdev->device;
  174. props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
  175. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  176. props->max_mr_size = ~0ull;
  177. props->page_size_cap = dev->dev->caps.page_size_cap;
  178. props->max_qp = dev->dev->quotas.qp;
  179. props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
  180. props->max_sge = min(dev->dev->caps.max_sq_sg,
  181. dev->dev->caps.max_rq_sg);
  182. props->max_cq = dev->dev->quotas.cq;
  183. props->max_cqe = dev->dev->caps.max_cqes;
  184. props->max_mr = dev->dev->quotas.mpt;
  185. props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
  186. props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
  187. props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
  188. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  189. props->max_srq = dev->dev->quotas.srq;
  190. props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
  191. props->max_srq_sge = dev->dev->caps.max_srq_sge;
  192. props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
  193. props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
  194. props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
  195. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  196. props->masked_atomic_cap = props->atomic_cap;
  197. props->max_pkeys = dev->dev->caps.pkey_table_len[1];
  198. props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
  199. props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
  200. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  201. props->max_mcast_grp;
  202. props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
  203. out:
  204. kfree(in_mad);
  205. kfree(out_mad);
  206. return err;
  207. }
  208. static enum rdma_link_layer
  209. mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
  210. {
  211. struct mlx4_dev *dev = to_mdev(device)->dev;
  212. return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
  213. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  214. }
  215. static int ib_link_query_port(struct ib_device *ibdev, u8 port,
  216. struct ib_port_attr *props, int netw_view)
  217. {
  218. struct ib_smp *in_mad = NULL;
  219. struct ib_smp *out_mad = NULL;
  220. int ext_active_speed;
  221. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  222. int err = -ENOMEM;
  223. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  224. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  225. if (!in_mad || !out_mad)
  226. goto out;
  227. init_query_mad(in_mad);
  228. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  229. in_mad->attr_mod = cpu_to_be32(port);
  230. if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
  231. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  232. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
  233. in_mad, out_mad);
  234. if (err)
  235. goto out;
  236. props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
  237. props->lmc = out_mad->data[34] & 0x7;
  238. props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
  239. props->sm_sl = out_mad->data[36] & 0xf;
  240. props->state = out_mad->data[32] & 0xf;
  241. props->phys_state = out_mad->data[33] >> 4;
  242. props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
  243. if (netw_view)
  244. props->gid_tbl_len = out_mad->data[50];
  245. else
  246. props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
  247. props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
  248. props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
  249. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  250. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  251. props->active_width = out_mad->data[31] & 0xf;
  252. props->active_speed = out_mad->data[35] >> 4;
  253. props->max_mtu = out_mad->data[41] & 0xf;
  254. props->active_mtu = out_mad->data[36] >> 4;
  255. props->subnet_timeout = out_mad->data[51] & 0x1f;
  256. props->max_vl_num = out_mad->data[37] >> 4;
  257. props->init_type_reply = out_mad->data[41] >> 4;
  258. /* Check if extended speeds (EDR/FDR/...) are supported */
  259. if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
  260. ext_active_speed = out_mad->data[62] >> 4;
  261. switch (ext_active_speed) {
  262. case 1:
  263. props->active_speed = IB_SPEED_FDR;
  264. break;
  265. case 2:
  266. props->active_speed = IB_SPEED_EDR;
  267. break;
  268. }
  269. }
  270. /* If reported active speed is QDR, check if is FDR-10 */
  271. if (props->active_speed == IB_SPEED_QDR) {
  272. init_query_mad(in_mad);
  273. in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
  274. in_mad->attr_mod = cpu_to_be32(port);
  275. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
  276. NULL, NULL, in_mad, out_mad);
  277. if (err)
  278. goto out;
  279. /* Checking LinkSpeedActive for FDR-10 */
  280. if (out_mad->data[15] & 0x1)
  281. props->active_speed = IB_SPEED_FDR10;
  282. }
  283. /* Avoid wrong speed value returned by FW if the IB link is down. */
  284. if (props->state == IB_PORT_DOWN)
  285. props->active_speed = IB_SPEED_SDR;
  286. out:
  287. kfree(in_mad);
  288. kfree(out_mad);
  289. return err;
  290. }
  291. static u8 state_to_phys_state(enum ib_port_state state)
  292. {
  293. return state == IB_PORT_ACTIVE ? 5 : 3;
  294. }
  295. static int eth_link_query_port(struct ib_device *ibdev, u8 port,
  296. struct ib_port_attr *props, int netw_view)
  297. {
  298. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  299. struct mlx4_ib_iboe *iboe = &mdev->iboe;
  300. struct net_device *ndev;
  301. enum ib_mtu tmp;
  302. struct mlx4_cmd_mailbox *mailbox;
  303. int err = 0;
  304. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  305. if (IS_ERR(mailbox))
  306. return PTR_ERR(mailbox);
  307. err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
  308. MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
  309. MLX4_CMD_WRAPPED);
  310. if (err)
  311. goto out;
  312. props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
  313. IB_WIDTH_4X : IB_WIDTH_1X;
  314. props->active_speed = IB_SPEED_QDR;
  315. props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
  316. props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
  317. props->max_msg_sz = mdev->dev->caps.max_msg_sz;
  318. props->pkey_tbl_len = 1;
  319. props->max_mtu = IB_MTU_4096;
  320. props->max_vl_num = 2;
  321. props->state = IB_PORT_DOWN;
  322. props->phys_state = state_to_phys_state(props->state);
  323. props->active_mtu = IB_MTU_256;
  324. spin_lock_bh(&iboe->lock);
  325. ndev = iboe->netdevs[port - 1];
  326. if (!ndev)
  327. goto out_unlock;
  328. tmp = iboe_get_mtu(ndev->mtu);
  329. props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
  330. props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
  331. IB_PORT_ACTIVE : IB_PORT_DOWN;
  332. props->phys_state = state_to_phys_state(props->state);
  333. out_unlock:
  334. spin_unlock_bh(&iboe->lock);
  335. out:
  336. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  337. return err;
  338. }
  339. int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  340. struct ib_port_attr *props, int netw_view)
  341. {
  342. int err;
  343. memset(props, 0, sizeof *props);
  344. err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
  345. ib_link_query_port(ibdev, port, props, netw_view) :
  346. eth_link_query_port(ibdev, port, props, netw_view);
  347. return err;
  348. }
  349. static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  350. struct ib_port_attr *props)
  351. {
  352. /* returns host view */
  353. return __mlx4_ib_query_port(ibdev, port, props, 0);
  354. }
  355. int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  356. union ib_gid *gid, int netw_view)
  357. {
  358. struct ib_smp *in_mad = NULL;
  359. struct ib_smp *out_mad = NULL;
  360. int err = -ENOMEM;
  361. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  362. int clear = 0;
  363. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  364. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  365. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  366. if (!in_mad || !out_mad)
  367. goto out;
  368. init_query_mad(in_mad);
  369. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  370. in_mad->attr_mod = cpu_to_be32(port);
  371. if (mlx4_is_mfunc(dev->dev) && netw_view)
  372. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  373. err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
  374. if (err)
  375. goto out;
  376. memcpy(gid->raw, out_mad->data + 8, 8);
  377. if (mlx4_is_mfunc(dev->dev) && !netw_view) {
  378. if (index) {
  379. /* For any index > 0, return the null guid */
  380. err = 0;
  381. clear = 1;
  382. goto out;
  383. }
  384. }
  385. init_query_mad(in_mad);
  386. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  387. in_mad->attr_mod = cpu_to_be32(index / 8);
  388. err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
  389. NULL, NULL, in_mad, out_mad);
  390. if (err)
  391. goto out;
  392. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  393. out:
  394. if (clear)
  395. memset(gid->raw + 8, 0, 8);
  396. kfree(in_mad);
  397. kfree(out_mad);
  398. return err;
  399. }
  400. static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
  401. union ib_gid *gid)
  402. {
  403. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  404. *gid = dev->iboe.gid_table[port - 1][index];
  405. return 0;
  406. }
  407. static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  408. union ib_gid *gid)
  409. {
  410. if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
  411. return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
  412. else
  413. return iboe_query_gid(ibdev, port, index, gid);
  414. }
  415. int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  416. u16 *pkey, int netw_view)
  417. {
  418. struct ib_smp *in_mad = NULL;
  419. struct ib_smp *out_mad = NULL;
  420. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  421. int err = -ENOMEM;
  422. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  423. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  424. if (!in_mad || !out_mad)
  425. goto out;
  426. init_query_mad(in_mad);
  427. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  428. in_mad->attr_mod = cpu_to_be32(index / 32);
  429. if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
  430. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  431. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
  432. in_mad, out_mad);
  433. if (err)
  434. goto out;
  435. *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
  436. out:
  437. kfree(in_mad);
  438. kfree(out_mad);
  439. return err;
  440. }
  441. static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
  442. {
  443. return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
  444. }
  445. static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
  446. struct ib_device_modify *props)
  447. {
  448. struct mlx4_cmd_mailbox *mailbox;
  449. unsigned long flags;
  450. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  451. return -EOPNOTSUPP;
  452. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  453. return 0;
  454. if (mlx4_is_slave(to_mdev(ibdev)->dev))
  455. return -EOPNOTSUPP;
  456. spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
  457. memcpy(ibdev->node_desc, props->node_desc, 64);
  458. spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
  459. /*
  460. * If possible, pass node desc to FW, so it can generate
  461. * a 144 trap. If cmd fails, just ignore.
  462. */
  463. mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
  464. if (IS_ERR(mailbox))
  465. return 0;
  466. memcpy(mailbox->buf, props->node_desc, 64);
  467. mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
  468. MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  469. mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
  470. return 0;
  471. }
  472. static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
  473. u32 cap_mask)
  474. {
  475. struct mlx4_cmd_mailbox *mailbox;
  476. int err;
  477. mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  478. if (IS_ERR(mailbox))
  479. return PTR_ERR(mailbox);
  480. if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  481. *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
  482. ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
  483. } else {
  484. ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
  485. ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
  486. }
  487. err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
  488. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  489. mlx4_free_cmd_mailbox(dev->dev, mailbox);
  490. return err;
  491. }
  492. static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  493. struct ib_port_modify *props)
  494. {
  495. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  496. u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  497. struct ib_port_attr attr;
  498. u32 cap_mask;
  499. int err;
  500. /* return OK if this is RoCE. CM calls ib_modify_port() regardless
  501. * of whether port link layer is ETH or IB. For ETH ports, qkey
  502. * violations and port capabilities are not meaningful.
  503. */
  504. if (is_eth)
  505. return 0;
  506. mutex_lock(&mdev->cap_mask_mutex);
  507. err = mlx4_ib_query_port(ibdev, port, &attr);
  508. if (err)
  509. goto out;
  510. cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
  511. ~props->clr_port_cap_mask;
  512. err = mlx4_ib_SET_PORT(mdev, port,
  513. !!(mask & IB_PORT_RESET_QKEY_CNTR),
  514. cap_mask);
  515. out:
  516. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  517. return err;
  518. }
  519. static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
  520. struct ib_udata *udata)
  521. {
  522. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  523. struct mlx4_ib_ucontext *context;
  524. struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
  525. struct mlx4_ib_alloc_ucontext_resp resp;
  526. int err;
  527. if (!dev->ib_active)
  528. return ERR_PTR(-EAGAIN);
  529. if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
  530. resp_v3.qp_tab_size = dev->dev->caps.num_qps;
  531. resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
  532. resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  533. } else {
  534. resp.dev_caps = dev->dev->caps.userspace_caps;
  535. resp.qp_tab_size = dev->dev->caps.num_qps;
  536. resp.bf_reg_size = dev->dev->caps.bf_reg_size;
  537. resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  538. resp.cqe_size = dev->dev->caps.cqe_size;
  539. }
  540. context = kmalloc(sizeof *context, GFP_KERNEL);
  541. if (!context)
  542. return ERR_PTR(-ENOMEM);
  543. err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
  544. if (err) {
  545. kfree(context);
  546. return ERR_PTR(err);
  547. }
  548. INIT_LIST_HEAD(&context->db_page_list);
  549. mutex_init(&context->db_page_mutex);
  550. if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
  551. err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
  552. else
  553. err = ib_copy_to_udata(udata, &resp, sizeof(resp));
  554. if (err) {
  555. mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
  556. kfree(context);
  557. return ERR_PTR(-EFAULT);
  558. }
  559. return &context->ibucontext;
  560. }
  561. static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  562. {
  563. struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
  564. mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
  565. kfree(context);
  566. return 0;
  567. }
  568. static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  569. {
  570. struct mlx4_ib_dev *dev = to_mdev(context->device);
  571. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  572. return -EINVAL;
  573. if (vma->vm_pgoff == 0) {
  574. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  575. if (io_remap_pfn_range(vma, vma->vm_start,
  576. to_mucontext(context)->uar.pfn,
  577. PAGE_SIZE, vma->vm_page_prot))
  578. return -EAGAIN;
  579. } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
  580. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  581. if (io_remap_pfn_range(vma, vma->vm_start,
  582. to_mucontext(context)->uar.pfn +
  583. dev->dev->caps.num_uars,
  584. PAGE_SIZE, vma->vm_page_prot))
  585. return -EAGAIN;
  586. } else
  587. return -EINVAL;
  588. return 0;
  589. }
  590. static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
  591. struct ib_ucontext *context,
  592. struct ib_udata *udata)
  593. {
  594. struct mlx4_ib_pd *pd;
  595. int err;
  596. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  597. if (!pd)
  598. return ERR_PTR(-ENOMEM);
  599. err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
  600. if (err) {
  601. kfree(pd);
  602. return ERR_PTR(err);
  603. }
  604. if (context)
  605. if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
  606. mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
  607. kfree(pd);
  608. return ERR_PTR(-EFAULT);
  609. }
  610. return &pd->ibpd;
  611. }
  612. static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
  613. {
  614. mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
  615. kfree(pd);
  616. return 0;
  617. }
  618. static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
  619. struct ib_ucontext *context,
  620. struct ib_udata *udata)
  621. {
  622. struct mlx4_ib_xrcd *xrcd;
  623. int err;
  624. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
  625. return ERR_PTR(-ENOSYS);
  626. xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
  627. if (!xrcd)
  628. return ERR_PTR(-ENOMEM);
  629. err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
  630. if (err)
  631. goto err1;
  632. xrcd->pd = ib_alloc_pd(ibdev);
  633. if (IS_ERR(xrcd->pd)) {
  634. err = PTR_ERR(xrcd->pd);
  635. goto err2;
  636. }
  637. xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
  638. if (IS_ERR(xrcd->cq)) {
  639. err = PTR_ERR(xrcd->cq);
  640. goto err3;
  641. }
  642. return &xrcd->ibxrcd;
  643. err3:
  644. ib_dealloc_pd(xrcd->pd);
  645. err2:
  646. mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
  647. err1:
  648. kfree(xrcd);
  649. return ERR_PTR(err);
  650. }
  651. static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  652. {
  653. ib_destroy_cq(to_mxrcd(xrcd)->cq);
  654. ib_dealloc_pd(to_mxrcd(xrcd)->pd);
  655. mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
  656. kfree(xrcd);
  657. return 0;
  658. }
  659. static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
  660. {
  661. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  662. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  663. struct mlx4_ib_gid_entry *ge;
  664. ge = kzalloc(sizeof *ge, GFP_KERNEL);
  665. if (!ge)
  666. return -ENOMEM;
  667. ge->gid = *gid;
  668. if (mlx4_ib_add_mc(mdev, mqp, gid)) {
  669. ge->port = mqp->port;
  670. ge->added = 1;
  671. }
  672. mutex_lock(&mqp->mutex);
  673. list_add_tail(&ge->list, &mqp->gid_list);
  674. mutex_unlock(&mqp->mutex);
  675. return 0;
  676. }
  677. int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  678. union ib_gid *gid)
  679. {
  680. struct net_device *ndev;
  681. int ret = 0;
  682. if (!mqp->port)
  683. return 0;
  684. spin_lock_bh(&mdev->iboe.lock);
  685. ndev = mdev->iboe.netdevs[mqp->port - 1];
  686. if (ndev)
  687. dev_hold(ndev);
  688. spin_unlock_bh(&mdev->iboe.lock);
  689. if (ndev) {
  690. ret = 1;
  691. dev_put(ndev);
  692. }
  693. return ret;
  694. }
  695. struct mlx4_ib_steering {
  696. struct list_head list;
  697. u64 reg_id;
  698. union ib_gid gid;
  699. };
  700. static int parse_flow_attr(struct mlx4_dev *dev,
  701. u32 qp_num,
  702. union ib_flow_spec *ib_spec,
  703. struct _rule_hw *mlx4_spec)
  704. {
  705. enum mlx4_net_trans_rule_id type;
  706. switch (ib_spec->type) {
  707. case IB_FLOW_SPEC_ETH:
  708. type = MLX4_NET_TRANS_RULE_ID_ETH;
  709. memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
  710. ETH_ALEN);
  711. memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
  712. ETH_ALEN);
  713. mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
  714. mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
  715. break;
  716. case IB_FLOW_SPEC_IB:
  717. type = MLX4_NET_TRANS_RULE_ID_IB;
  718. mlx4_spec->ib.l3_qpn =
  719. cpu_to_be32(qp_num);
  720. mlx4_spec->ib.qpn_mask =
  721. cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
  722. break;
  723. case IB_FLOW_SPEC_IPV4:
  724. type = MLX4_NET_TRANS_RULE_ID_IPV4;
  725. mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
  726. mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
  727. mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
  728. mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
  729. break;
  730. case IB_FLOW_SPEC_TCP:
  731. case IB_FLOW_SPEC_UDP:
  732. type = ib_spec->type == IB_FLOW_SPEC_TCP ?
  733. MLX4_NET_TRANS_RULE_ID_TCP :
  734. MLX4_NET_TRANS_RULE_ID_UDP;
  735. mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
  736. mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
  737. mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
  738. mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
  739. break;
  740. default:
  741. return -EINVAL;
  742. }
  743. if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
  744. mlx4_hw_rule_sz(dev, type) < 0)
  745. return -EINVAL;
  746. mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
  747. mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
  748. return mlx4_hw_rule_sz(dev, type);
  749. }
  750. struct default_rules {
  751. __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
  752. __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
  753. __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
  754. __u8 link_layer;
  755. };
  756. static const struct default_rules default_table[] = {
  757. {
  758. .mandatory_fields = {IB_FLOW_SPEC_IPV4},
  759. .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
  760. .rules_create_list = {IB_FLOW_SPEC_IB},
  761. .link_layer = IB_LINK_LAYER_INFINIBAND
  762. }
  763. };
  764. static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
  765. struct ib_flow_attr *flow_attr)
  766. {
  767. int i, j, k;
  768. void *ib_flow;
  769. const struct default_rules *pdefault_rules = default_table;
  770. u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
  771. for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
  772. __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
  773. memset(&field_types, 0, sizeof(field_types));
  774. if (link_layer != pdefault_rules->link_layer)
  775. continue;
  776. ib_flow = flow_attr + 1;
  777. /* we assume the specs are sorted */
  778. for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
  779. j < flow_attr->num_of_specs; k++) {
  780. union ib_flow_spec *current_flow =
  781. (union ib_flow_spec *)ib_flow;
  782. /* same layer but different type */
  783. if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
  784. (pdefault_rules->mandatory_fields[k] &
  785. IB_FLOW_SPEC_LAYER_MASK)) &&
  786. (current_flow->type !=
  787. pdefault_rules->mandatory_fields[k]))
  788. goto out;
  789. /* same layer, try match next one */
  790. if (current_flow->type ==
  791. pdefault_rules->mandatory_fields[k]) {
  792. j++;
  793. ib_flow +=
  794. ((union ib_flow_spec *)ib_flow)->size;
  795. }
  796. }
  797. ib_flow = flow_attr + 1;
  798. for (j = 0; j < flow_attr->num_of_specs;
  799. j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
  800. for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
  801. /* same layer and same type */
  802. if (((union ib_flow_spec *)ib_flow)->type ==
  803. pdefault_rules->mandatory_not_fields[k])
  804. goto out;
  805. return i;
  806. }
  807. out:
  808. return -1;
  809. }
  810. static int __mlx4_ib_create_default_rules(
  811. struct mlx4_ib_dev *mdev,
  812. struct ib_qp *qp,
  813. const struct default_rules *pdefault_rules,
  814. struct _rule_hw *mlx4_spec) {
  815. int size = 0;
  816. int i;
  817. for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
  818. int ret;
  819. union ib_flow_spec ib_spec;
  820. switch (pdefault_rules->rules_create_list[i]) {
  821. case 0:
  822. /* no rule */
  823. continue;
  824. case IB_FLOW_SPEC_IB:
  825. ib_spec.type = IB_FLOW_SPEC_IB;
  826. ib_spec.size = sizeof(struct ib_flow_spec_ib);
  827. break;
  828. default:
  829. /* invalid rule */
  830. return -EINVAL;
  831. }
  832. /* We must put empty rule, qpn is being ignored */
  833. ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
  834. mlx4_spec);
  835. if (ret < 0) {
  836. pr_info("invalid parsing\n");
  837. return -EINVAL;
  838. }
  839. mlx4_spec = (void *)mlx4_spec + ret;
  840. size += ret;
  841. }
  842. return size;
  843. }
  844. static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
  845. int domain,
  846. enum mlx4_net_trans_promisc_mode flow_type,
  847. u64 *reg_id)
  848. {
  849. int ret, i;
  850. int size = 0;
  851. void *ib_flow;
  852. struct mlx4_ib_dev *mdev = to_mdev(qp->device);
  853. struct mlx4_cmd_mailbox *mailbox;
  854. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  855. int default_flow;
  856. static const u16 __mlx4_domain[] = {
  857. [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
  858. [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
  859. [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
  860. [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
  861. };
  862. if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
  863. pr_err("Invalid priority value %d\n", flow_attr->priority);
  864. return -EINVAL;
  865. }
  866. if (domain >= IB_FLOW_DOMAIN_NUM) {
  867. pr_err("Invalid domain value %d\n", domain);
  868. return -EINVAL;
  869. }
  870. if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
  871. return -EINVAL;
  872. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  873. if (IS_ERR(mailbox))
  874. return PTR_ERR(mailbox);
  875. ctrl = mailbox->buf;
  876. ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
  877. flow_attr->priority);
  878. ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
  879. ctrl->port = flow_attr->port;
  880. ctrl->qpn = cpu_to_be32(qp->qp_num);
  881. ib_flow = flow_attr + 1;
  882. size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
  883. /* Add default flows */
  884. default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
  885. if (default_flow >= 0) {
  886. ret = __mlx4_ib_create_default_rules(
  887. mdev, qp, default_table + default_flow,
  888. mailbox->buf + size);
  889. if (ret < 0) {
  890. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  891. return -EINVAL;
  892. }
  893. size += ret;
  894. }
  895. for (i = 0; i < flow_attr->num_of_specs; i++) {
  896. ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
  897. mailbox->buf + size);
  898. if (ret < 0) {
  899. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  900. return -EINVAL;
  901. }
  902. ib_flow += ((union ib_flow_spec *) ib_flow)->size;
  903. size += ret;
  904. }
  905. ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
  906. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  907. MLX4_CMD_NATIVE);
  908. if (ret == -ENOMEM)
  909. pr_err("mcg table is full. Fail to register network rule.\n");
  910. else if (ret == -ENXIO)
  911. pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
  912. else if (ret)
  913. pr_err("Invalid argumant. Fail to register network rule.\n");
  914. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  915. return ret;
  916. }
  917. static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
  918. {
  919. int err;
  920. err = mlx4_cmd(dev, reg_id, 0, 0,
  921. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  922. MLX4_CMD_NATIVE);
  923. if (err)
  924. pr_err("Fail to detach network rule. registration id = 0x%llx\n",
  925. reg_id);
  926. return err;
  927. }
  928. static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
  929. u64 *reg_id)
  930. {
  931. void *ib_flow;
  932. union ib_flow_spec *ib_spec;
  933. struct mlx4_dev *dev = to_mdev(qp->device)->dev;
  934. int err = 0;
  935. if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
  936. dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
  937. return 0; /* do nothing */
  938. ib_flow = flow_attr + 1;
  939. ib_spec = (union ib_flow_spec *)ib_flow;
  940. if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
  941. return 0; /* do nothing */
  942. err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
  943. flow_attr->port, qp->qp_num,
  944. MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
  945. reg_id);
  946. return err;
  947. }
  948. static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
  949. struct ib_flow_attr *flow_attr,
  950. int domain)
  951. {
  952. int err = 0, i = 0;
  953. struct mlx4_ib_flow *mflow;
  954. enum mlx4_net_trans_promisc_mode type[2];
  955. memset(type, 0, sizeof(type));
  956. mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
  957. if (!mflow) {
  958. err = -ENOMEM;
  959. goto err_free;
  960. }
  961. switch (flow_attr->type) {
  962. case IB_FLOW_ATTR_NORMAL:
  963. type[0] = MLX4_FS_REGULAR;
  964. break;
  965. case IB_FLOW_ATTR_ALL_DEFAULT:
  966. type[0] = MLX4_FS_ALL_DEFAULT;
  967. break;
  968. case IB_FLOW_ATTR_MC_DEFAULT:
  969. type[0] = MLX4_FS_MC_DEFAULT;
  970. break;
  971. case IB_FLOW_ATTR_SNIFFER:
  972. type[0] = MLX4_FS_UC_SNIFFER;
  973. type[1] = MLX4_FS_MC_SNIFFER;
  974. break;
  975. default:
  976. err = -EINVAL;
  977. goto err_free;
  978. }
  979. while (i < ARRAY_SIZE(type) && type[i]) {
  980. err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
  981. &mflow->reg_id[i]);
  982. if (err)
  983. goto err_create_flow;
  984. i++;
  985. }
  986. if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  987. err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
  988. if (err)
  989. goto err_create_flow;
  990. i++;
  991. }
  992. return &mflow->ibflow;
  993. err_create_flow:
  994. while (i) {
  995. (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
  996. i--;
  997. }
  998. err_free:
  999. kfree(mflow);
  1000. return ERR_PTR(err);
  1001. }
  1002. static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
  1003. {
  1004. int err, ret = 0;
  1005. int i = 0;
  1006. struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
  1007. struct mlx4_ib_flow *mflow = to_mflow(flow_id);
  1008. while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
  1009. err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
  1010. if (err)
  1011. ret = err;
  1012. i++;
  1013. }
  1014. kfree(mflow);
  1015. return ret;
  1016. }
  1017. static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  1018. {
  1019. int err;
  1020. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1021. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1022. u64 reg_id;
  1023. struct mlx4_ib_steering *ib_steering = NULL;
  1024. enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
  1025. MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
  1026. if (mdev->dev->caps.steering_mode ==
  1027. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1028. ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
  1029. if (!ib_steering)
  1030. return -ENOMEM;
  1031. }
  1032. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
  1033. !!(mqp->flags &
  1034. MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  1035. prot, &reg_id);
  1036. if (err)
  1037. goto err_malloc;
  1038. err = add_gid_entry(ibqp, gid);
  1039. if (err)
  1040. goto err_add;
  1041. if (ib_steering) {
  1042. memcpy(ib_steering->gid.raw, gid->raw, 16);
  1043. ib_steering->reg_id = reg_id;
  1044. mutex_lock(&mqp->mutex);
  1045. list_add(&ib_steering->list, &mqp->steering_rules);
  1046. mutex_unlock(&mqp->mutex);
  1047. }
  1048. return 0;
  1049. err_add:
  1050. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1051. prot, reg_id);
  1052. err_malloc:
  1053. kfree(ib_steering);
  1054. return err;
  1055. }
  1056. static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
  1057. {
  1058. struct mlx4_ib_gid_entry *ge;
  1059. struct mlx4_ib_gid_entry *tmp;
  1060. struct mlx4_ib_gid_entry *ret = NULL;
  1061. list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
  1062. if (!memcmp(raw, ge->gid.raw, 16)) {
  1063. ret = ge;
  1064. break;
  1065. }
  1066. }
  1067. return ret;
  1068. }
  1069. static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  1070. {
  1071. int err;
  1072. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1073. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1074. struct net_device *ndev;
  1075. struct mlx4_ib_gid_entry *ge;
  1076. u64 reg_id = 0;
  1077. enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
  1078. MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
  1079. if (mdev->dev->caps.steering_mode ==
  1080. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1081. struct mlx4_ib_steering *ib_steering;
  1082. mutex_lock(&mqp->mutex);
  1083. list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
  1084. if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
  1085. list_del(&ib_steering->list);
  1086. break;
  1087. }
  1088. }
  1089. mutex_unlock(&mqp->mutex);
  1090. if (&ib_steering->list == &mqp->steering_rules) {
  1091. pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
  1092. return -EINVAL;
  1093. }
  1094. reg_id = ib_steering->reg_id;
  1095. kfree(ib_steering);
  1096. }
  1097. err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1098. prot, reg_id);
  1099. if (err)
  1100. return err;
  1101. mutex_lock(&mqp->mutex);
  1102. ge = find_gid_entry(mqp, gid->raw);
  1103. if (ge) {
  1104. spin_lock_bh(&mdev->iboe.lock);
  1105. ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
  1106. if (ndev)
  1107. dev_hold(ndev);
  1108. spin_unlock_bh(&mdev->iboe.lock);
  1109. if (ndev)
  1110. dev_put(ndev);
  1111. list_del(&ge->list);
  1112. kfree(ge);
  1113. } else
  1114. pr_warn("could not find mgid entry\n");
  1115. mutex_unlock(&mqp->mutex);
  1116. return 0;
  1117. }
  1118. static int init_node_data(struct mlx4_ib_dev *dev)
  1119. {
  1120. struct ib_smp *in_mad = NULL;
  1121. struct ib_smp *out_mad = NULL;
  1122. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  1123. int err = -ENOMEM;
  1124. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  1125. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  1126. if (!in_mad || !out_mad)
  1127. goto out;
  1128. init_query_mad(in_mad);
  1129. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  1130. if (mlx4_is_master(dev->dev))
  1131. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  1132. err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
  1133. if (err)
  1134. goto out;
  1135. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  1136. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  1137. err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
  1138. if (err)
  1139. goto out;
  1140. dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
  1141. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  1142. out:
  1143. kfree(in_mad);
  1144. kfree(out_mad);
  1145. return err;
  1146. }
  1147. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  1148. char *buf)
  1149. {
  1150. struct mlx4_ib_dev *dev =
  1151. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1152. return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
  1153. }
  1154. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  1155. char *buf)
  1156. {
  1157. struct mlx4_ib_dev *dev =
  1158. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1159. return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
  1160. (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
  1161. (int) dev->dev->caps.fw_ver & 0xffff);
  1162. }
  1163. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  1164. char *buf)
  1165. {
  1166. struct mlx4_ib_dev *dev =
  1167. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1168. return sprintf(buf, "%x\n", dev->dev->rev_id);
  1169. }
  1170. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  1171. char *buf)
  1172. {
  1173. struct mlx4_ib_dev *dev =
  1174. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1175. return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
  1176. dev->dev->board_id);
  1177. }
  1178. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  1179. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  1180. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  1181. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  1182. static struct device_attribute *mlx4_class_attributes[] = {
  1183. &dev_attr_hw_rev,
  1184. &dev_attr_fw_ver,
  1185. &dev_attr_hca_type,
  1186. &dev_attr_board_id
  1187. };
  1188. static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
  1189. struct net_device *dev)
  1190. {
  1191. memcpy(eui, dev->dev_addr, 3);
  1192. memcpy(eui + 5, dev->dev_addr + 3, 3);
  1193. if (vlan_id < 0x1000) {
  1194. eui[3] = vlan_id >> 8;
  1195. eui[4] = vlan_id & 0xff;
  1196. } else {
  1197. eui[3] = 0xff;
  1198. eui[4] = 0xfe;
  1199. }
  1200. eui[0] ^= 2;
  1201. }
  1202. static void update_gids_task(struct work_struct *work)
  1203. {
  1204. struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
  1205. struct mlx4_cmd_mailbox *mailbox;
  1206. union ib_gid *gids;
  1207. int err;
  1208. struct mlx4_dev *dev = gw->dev->dev;
  1209. if (!gw->dev->ib_active)
  1210. return;
  1211. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1212. if (IS_ERR(mailbox)) {
  1213. pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
  1214. return;
  1215. }
  1216. gids = mailbox->buf;
  1217. memcpy(gids, gw->gids, sizeof gw->gids);
  1218. err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
  1219. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1220. MLX4_CMD_WRAPPED);
  1221. if (err)
  1222. pr_warn("set port command failed\n");
  1223. else
  1224. mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
  1225. mlx4_free_cmd_mailbox(dev, mailbox);
  1226. kfree(gw);
  1227. }
  1228. static void reset_gids_task(struct work_struct *work)
  1229. {
  1230. struct update_gid_work *gw =
  1231. container_of(work, struct update_gid_work, work);
  1232. struct mlx4_cmd_mailbox *mailbox;
  1233. union ib_gid *gids;
  1234. int err;
  1235. struct mlx4_dev *dev = gw->dev->dev;
  1236. if (!gw->dev->ib_active)
  1237. return;
  1238. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1239. if (IS_ERR(mailbox)) {
  1240. pr_warn("reset gid table failed\n");
  1241. goto free;
  1242. }
  1243. gids = mailbox->buf;
  1244. memcpy(gids, gw->gids, sizeof(gw->gids));
  1245. if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
  1246. IB_LINK_LAYER_ETHERNET) {
  1247. err = mlx4_cmd(dev, mailbox->dma,
  1248. MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
  1249. 1, MLX4_CMD_SET_PORT,
  1250. MLX4_CMD_TIME_CLASS_B,
  1251. MLX4_CMD_WRAPPED);
  1252. if (err)
  1253. pr_warn(KERN_WARNING
  1254. "set port %d command failed\n", gw->port);
  1255. }
  1256. mlx4_free_cmd_mailbox(dev, mailbox);
  1257. free:
  1258. kfree(gw);
  1259. }
  1260. static int update_gid_table(struct mlx4_ib_dev *dev, int port,
  1261. union ib_gid *gid, int clear,
  1262. int default_gid)
  1263. {
  1264. struct update_gid_work *work;
  1265. int i;
  1266. int need_update = 0;
  1267. int free = -1;
  1268. int found = -1;
  1269. int max_gids;
  1270. if (default_gid) {
  1271. free = 0;
  1272. } else {
  1273. max_gids = dev->dev->caps.gid_table_len[port];
  1274. for (i = 1; i < max_gids; ++i) {
  1275. if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
  1276. sizeof(*gid)))
  1277. found = i;
  1278. if (clear) {
  1279. if (found >= 0) {
  1280. need_update = 1;
  1281. dev->iboe.gid_table[port - 1][found] =
  1282. zgid;
  1283. break;
  1284. }
  1285. } else {
  1286. if (found >= 0)
  1287. break;
  1288. if (free < 0 &&
  1289. !memcmp(&dev->iboe.gid_table[port - 1][i],
  1290. &zgid, sizeof(*gid)))
  1291. free = i;
  1292. }
  1293. }
  1294. }
  1295. if (found == -1 && !clear && free >= 0) {
  1296. dev->iboe.gid_table[port - 1][free] = *gid;
  1297. need_update = 1;
  1298. }
  1299. if (!need_update)
  1300. return 0;
  1301. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1302. if (!work)
  1303. return -ENOMEM;
  1304. memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
  1305. INIT_WORK(&work->work, update_gids_task);
  1306. work->port = port;
  1307. work->dev = dev;
  1308. queue_work(wq, &work->work);
  1309. return 0;
  1310. }
  1311. static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
  1312. {
  1313. gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  1314. mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
  1315. }
  1316. static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
  1317. {
  1318. struct update_gid_work *work;
  1319. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1320. if (!work)
  1321. return -ENOMEM;
  1322. memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
  1323. memset(work->gids, 0, sizeof(work->gids));
  1324. INIT_WORK(&work->work, reset_gids_task);
  1325. work->dev = dev;
  1326. work->port = port;
  1327. queue_work(wq, &work->work);
  1328. return 0;
  1329. }
  1330. static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
  1331. struct mlx4_ib_dev *ibdev, union ib_gid *gid)
  1332. {
  1333. struct mlx4_ib_iboe *iboe;
  1334. int port = 0;
  1335. struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
  1336. rdma_vlan_dev_real_dev(event_netdev) :
  1337. event_netdev;
  1338. union ib_gid default_gid;
  1339. mlx4_make_default_gid(real_dev, &default_gid);
  1340. if (!memcmp(gid, &default_gid, sizeof(*gid)))
  1341. return 0;
  1342. if (event != NETDEV_DOWN && event != NETDEV_UP)
  1343. return 0;
  1344. if ((real_dev != event_netdev) &&
  1345. (event == NETDEV_DOWN) &&
  1346. rdma_link_local_addr((struct in6_addr *)gid))
  1347. return 0;
  1348. iboe = &ibdev->iboe;
  1349. spin_lock_bh(&iboe->lock);
  1350. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
  1351. if ((netif_is_bond_master(real_dev) &&
  1352. (real_dev == iboe->masters[port - 1])) ||
  1353. (!netif_is_bond_master(real_dev) &&
  1354. (real_dev == iboe->netdevs[port - 1])))
  1355. update_gid_table(ibdev, port, gid,
  1356. event == NETDEV_DOWN, 0);
  1357. spin_unlock_bh(&iboe->lock);
  1358. return 0;
  1359. }
  1360. static u8 mlx4_ib_get_dev_port(struct net_device *dev,
  1361. struct mlx4_ib_dev *ibdev)
  1362. {
  1363. u8 port = 0;
  1364. struct mlx4_ib_iboe *iboe;
  1365. struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
  1366. rdma_vlan_dev_real_dev(dev) : dev;
  1367. iboe = &ibdev->iboe;
  1368. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
  1369. if ((netif_is_bond_master(real_dev) &&
  1370. (real_dev == iboe->masters[port - 1])) ||
  1371. (!netif_is_bond_master(real_dev) &&
  1372. (real_dev == iboe->netdevs[port - 1])))
  1373. break;
  1374. if ((port == 0) || (port > ibdev->dev->caps.num_ports))
  1375. return 0;
  1376. else
  1377. return port;
  1378. }
  1379. static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
  1380. void *ptr)
  1381. {
  1382. struct mlx4_ib_dev *ibdev;
  1383. struct in_ifaddr *ifa = ptr;
  1384. union ib_gid gid;
  1385. struct net_device *event_netdev = ifa->ifa_dev->dev;
  1386. ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
  1387. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
  1388. mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
  1389. return NOTIFY_DONE;
  1390. }
  1391. #if IS_ENABLED(CONFIG_IPV6)
  1392. static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
  1393. void *ptr)
  1394. {
  1395. struct mlx4_ib_dev *ibdev;
  1396. struct inet6_ifaddr *ifa = ptr;
  1397. union ib_gid *gid = (union ib_gid *)&ifa->addr;
  1398. struct net_device *event_netdev = ifa->idev->dev;
  1399. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
  1400. mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
  1401. return NOTIFY_DONE;
  1402. }
  1403. #endif
  1404. #define MLX4_IB_INVALID_MAC ((u64)-1)
  1405. static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
  1406. struct net_device *dev,
  1407. int port)
  1408. {
  1409. u64 new_smac = 0;
  1410. u64 release_mac = MLX4_IB_INVALID_MAC;
  1411. struct mlx4_ib_qp *qp;
  1412. read_lock(&dev_base_lock);
  1413. new_smac = mlx4_mac_to_u64(dev->dev_addr);
  1414. read_unlock(&dev_base_lock);
  1415. atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
  1416. /* no need for update QP1 and mac registration in non-SRIOV */
  1417. if (!mlx4_is_mfunc(ibdev->dev))
  1418. return;
  1419. mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
  1420. qp = ibdev->qp1_proxy[port - 1];
  1421. if (qp) {
  1422. int new_smac_index;
  1423. u64 old_smac;
  1424. struct mlx4_update_qp_params update_params;
  1425. mutex_lock(&qp->mutex);
  1426. old_smac = qp->pri.smac;
  1427. if (new_smac == old_smac)
  1428. goto unlock;
  1429. new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
  1430. if (new_smac_index < 0)
  1431. goto unlock;
  1432. update_params.smac_index = new_smac_index;
  1433. if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
  1434. &update_params)) {
  1435. release_mac = new_smac;
  1436. goto unlock;
  1437. }
  1438. /* if old port was zero, no mac was yet registered for this QP */
  1439. if (qp->pri.smac_port)
  1440. release_mac = old_smac;
  1441. qp->pri.smac = new_smac;
  1442. qp->pri.smac_port = port;
  1443. qp->pri.smac_index = new_smac_index;
  1444. }
  1445. unlock:
  1446. if (release_mac != MLX4_IB_INVALID_MAC)
  1447. mlx4_unregister_mac(ibdev->dev, port, release_mac);
  1448. if (qp)
  1449. mutex_unlock(&qp->mutex);
  1450. mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
  1451. }
  1452. static void mlx4_ib_get_dev_addr(struct net_device *dev,
  1453. struct mlx4_ib_dev *ibdev, u8 port)
  1454. {
  1455. struct in_device *in_dev;
  1456. #if IS_ENABLED(CONFIG_IPV6)
  1457. struct inet6_dev *in6_dev;
  1458. union ib_gid *pgid;
  1459. struct inet6_ifaddr *ifp;
  1460. union ib_gid default_gid;
  1461. #endif
  1462. union ib_gid gid;
  1463. if ((port == 0) || (port > ibdev->dev->caps.num_ports))
  1464. return;
  1465. /* IPv4 gids */
  1466. in_dev = in_dev_get(dev);
  1467. if (in_dev) {
  1468. for_ifa(in_dev) {
  1469. /*ifa->ifa_address;*/
  1470. ipv6_addr_set_v4mapped(ifa->ifa_address,
  1471. (struct in6_addr *)&gid);
  1472. update_gid_table(ibdev, port, &gid, 0, 0);
  1473. }
  1474. endfor_ifa(in_dev);
  1475. in_dev_put(in_dev);
  1476. }
  1477. #if IS_ENABLED(CONFIG_IPV6)
  1478. mlx4_make_default_gid(dev, &default_gid);
  1479. /* IPv6 gids */
  1480. in6_dev = in6_dev_get(dev);
  1481. if (in6_dev) {
  1482. read_lock_bh(&in6_dev->lock);
  1483. list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
  1484. pgid = (union ib_gid *)&ifp->addr;
  1485. if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
  1486. continue;
  1487. update_gid_table(ibdev, port, pgid, 0, 0);
  1488. }
  1489. read_unlock_bh(&in6_dev->lock);
  1490. in6_dev_put(in6_dev);
  1491. }
  1492. #endif
  1493. }
  1494. static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
  1495. struct net_device *dev, u8 port)
  1496. {
  1497. union ib_gid gid;
  1498. mlx4_make_default_gid(dev, &gid);
  1499. update_gid_table(ibdev, port, &gid, 0, 1);
  1500. }
  1501. static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
  1502. {
  1503. struct net_device *dev;
  1504. struct mlx4_ib_iboe *iboe = &ibdev->iboe;
  1505. int i;
  1506. int err = 0;
  1507. for (i = 1; i <= ibdev->num_ports; ++i) {
  1508. if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
  1509. IB_LINK_LAYER_ETHERNET) {
  1510. err = reset_gid_table(ibdev, i);
  1511. if (err)
  1512. goto out;
  1513. }
  1514. }
  1515. read_lock(&dev_base_lock);
  1516. spin_lock_bh(&iboe->lock);
  1517. for_each_netdev(&init_net, dev) {
  1518. u8 port = mlx4_ib_get_dev_port(dev, ibdev);
  1519. /* port will be non-zero only for ETH ports */
  1520. if (port) {
  1521. mlx4_ib_set_default_gid(ibdev, dev, port);
  1522. mlx4_ib_get_dev_addr(dev, ibdev, port);
  1523. }
  1524. }
  1525. spin_unlock_bh(&iboe->lock);
  1526. read_unlock(&dev_base_lock);
  1527. out:
  1528. return err;
  1529. }
  1530. static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
  1531. struct net_device *dev,
  1532. unsigned long event)
  1533. {
  1534. struct mlx4_ib_iboe *iboe;
  1535. int update_qps_port = -1;
  1536. int port;
  1537. iboe = &ibdev->iboe;
  1538. spin_lock_bh(&iboe->lock);
  1539. mlx4_foreach_ib_transport_port(port, ibdev->dev) {
  1540. enum ib_port_state port_state = IB_PORT_NOP;
  1541. struct net_device *old_master = iboe->masters[port - 1];
  1542. struct net_device *curr_netdev;
  1543. struct net_device *curr_master;
  1544. iboe->netdevs[port - 1] =
  1545. mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
  1546. if (iboe->netdevs[port - 1])
  1547. mlx4_ib_set_default_gid(ibdev,
  1548. iboe->netdevs[port - 1], port);
  1549. curr_netdev = iboe->netdevs[port - 1];
  1550. if (iboe->netdevs[port - 1] &&
  1551. netif_is_bond_slave(iboe->netdevs[port - 1])) {
  1552. iboe->masters[port - 1] = netdev_master_upper_dev_get(
  1553. iboe->netdevs[port - 1]);
  1554. } else {
  1555. iboe->masters[port - 1] = NULL;
  1556. }
  1557. curr_master = iboe->masters[port - 1];
  1558. if (dev == iboe->netdevs[port - 1] &&
  1559. (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
  1560. event == NETDEV_UP || event == NETDEV_CHANGE))
  1561. update_qps_port = port;
  1562. if (curr_netdev) {
  1563. port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
  1564. IB_PORT_ACTIVE : IB_PORT_DOWN;
  1565. mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
  1566. if (curr_master) {
  1567. /* if using bonding/team and a slave port is down, we
  1568. * don't want the bond IP based gids in the table since
  1569. * flows that select port by gid may get the down port.
  1570. */
  1571. if (port_state == IB_PORT_DOWN) {
  1572. reset_gid_table(ibdev, port);
  1573. mlx4_ib_set_default_gid(ibdev,
  1574. curr_netdev,
  1575. port);
  1576. } else {
  1577. /* gids from the upper dev (bond/team)
  1578. * should appear in port's gid table
  1579. */
  1580. mlx4_ib_get_dev_addr(curr_master,
  1581. ibdev, port);
  1582. }
  1583. }
  1584. /* if bonding is used it is possible that we add it to
  1585. * masters only after IP address is assigned to the
  1586. * net bonding interface.
  1587. */
  1588. if (curr_master && (old_master != curr_master)) {
  1589. reset_gid_table(ibdev, port);
  1590. mlx4_ib_set_default_gid(ibdev,
  1591. curr_netdev, port);
  1592. mlx4_ib_get_dev_addr(curr_master, ibdev, port);
  1593. }
  1594. if (!curr_master && (old_master != curr_master)) {
  1595. reset_gid_table(ibdev, port);
  1596. mlx4_ib_set_default_gid(ibdev,
  1597. curr_netdev, port);
  1598. mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
  1599. }
  1600. } else {
  1601. reset_gid_table(ibdev, port);
  1602. }
  1603. }
  1604. spin_unlock_bh(&iboe->lock);
  1605. if (update_qps_port > 0)
  1606. mlx4_ib_update_qps(ibdev, dev, update_qps_port);
  1607. }
  1608. static int mlx4_ib_netdev_event(struct notifier_block *this,
  1609. unsigned long event, void *ptr)
  1610. {
  1611. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1612. struct mlx4_ib_dev *ibdev;
  1613. if (!net_eq(dev_net(dev), &init_net))
  1614. return NOTIFY_DONE;
  1615. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
  1616. mlx4_ib_scan_netdevs(ibdev, dev, event);
  1617. return NOTIFY_DONE;
  1618. }
  1619. static void init_pkeys(struct mlx4_ib_dev *ibdev)
  1620. {
  1621. int port;
  1622. int slave;
  1623. int i;
  1624. if (mlx4_is_master(ibdev->dev)) {
  1625. for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
  1626. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
  1627. for (i = 0;
  1628. i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
  1629. ++i) {
  1630. ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
  1631. /* master has the identity virt2phys pkey mapping */
  1632. (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
  1633. ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
  1634. mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
  1635. ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
  1636. }
  1637. }
  1638. }
  1639. /* initialize pkey cache */
  1640. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
  1641. for (i = 0;
  1642. i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
  1643. ++i)
  1644. ibdev->pkeys.phys_pkey_cache[port-1][i] =
  1645. (i) ? 0 : 0xFFFF;
  1646. }
  1647. }
  1648. }
  1649. static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  1650. {
  1651. char name[80];
  1652. int eq_per_port = 0;
  1653. int added_eqs = 0;
  1654. int total_eqs = 0;
  1655. int i, j, eq;
  1656. /* Legacy mode or comp_pool is not large enough */
  1657. if (dev->caps.comp_pool == 0 ||
  1658. dev->caps.num_ports > dev->caps.comp_pool)
  1659. return;
  1660. eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
  1661. /* Init eq table */
  1662. added_eqs = 0;
  1663. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  1664. added_eqs += eq_per_port;
  1665. total_eqs = dev->caps.num_comp_vectors + added_eqs;
  1666. ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
  1667. if (!ibdev->eq_table)
  1668. return;
  1669. ibdev->eq_added = added_eqs;
  1670. eq = 0;
  1671. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
  1672. for (j = 0; j < eq_per_port; j++) {
  1673. snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
  1674. i, j, dev->pdev->bus->name);
  1675. /* Set IRQ for specific name (per ring) */
  1676. if (mlx4_assign_eq(dev, name, NULL,
  1677. &ibdev->eq_table[eq])) {
  1678. /* Use legacy (same as mlx4_en driver) */
  1679. pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
  1680. ibdev->eq_table[eq] =
  1681. (eq % dev->caps.num_comp_vectors);
  1682. }
  1683. eq++;
  1684. }
  1685. }
  1686. /* Fill the reset of the vector with legacy EQ */
  1687. for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
  1688. ibdev->eq_table[eq++] = i;
  1689. /* Advertise the new number of EQs to clients */
  1690. ibdev->ib_dev.num_comp_vectors = total_eqs;
  1691. }
  1692. static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  1693. {
  1694. int i;
  1695. /* no additional eqs were added */
  1696. if (!ibdev->eq_table)
  1697. return;
  1698. /* Reset the advertised EQ number */
  1699. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  1700. /* Free only the added eqs */
  1701. for (i = 0; i < ibdev->eq_added; i++) {
  1702. /* Don't free legacy eqs if used */
  1703. if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
  1704. continue;
  1705. mlx4_release_eq(dev, ibdev->eq_table[i]);
  1706. }
  1707. kfree(ibdev->eq_table);
  1708. }
  1709. static void *mlx4_ib_add(struct mlx4_dev *dev)
  1710. {
  1711. struct mlx4_ib_dev *ibdev;
  1712. int num_ports = 0;
  1713. int i, j;
  1714. int err;
  1715. struct mlx4_ib_iboe *iboe;
  1716. int ib_num_ports = 0;
  1717. pr_info_once("%s", mlx4_ib_version);
  1718. num_ports = 0;
  1719. mlx4_foreach_ib_transport_port(i, dev)
  1720. num_ports++;
  1721. /* No point in registering a device with no ports... */
  1722. if (num_ports == 0)
  1723. return NULL;
  1724. ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
  1725. if (!ibdev) {
  1726. dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
  1727. return NULL;
  1728. }
  1729. iboe = &ibdev->iboe;
  1730. if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
  1731. goto err_dealloc;
  1732. if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
  1733. goto err_pd;
  1734. ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
  1735. PAGE_SIZE);
  1736. if (!ibdev->uar_map)
  1737. goto err_uar;
  1738. MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
  1739. ibdev->dev = dev;
  1740. strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
  1741. ibdev->ib_dev.owner = THIS_MODULE;
  1742. ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
  1743. ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
  1744. ibdev->num_ports = num_ports;
  1745. ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
  1746. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  1747. ibdev->ib_dev.dma_device = &dev->pdev->dev;
  1748. if (dev->caps.userspace_caps)
  1749. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
  1750. else
  1751. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
  1752. ibdev->ib_dev.uverbs_cmd_mask =
  1753. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1754. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1755. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1756. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1757. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1758. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1759. (1ull << IB_USER_VERBS_CMD_REREG_MR) |
  1760. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1761. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1762. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1763. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1764. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1765. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1766. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1767. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1768. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1769. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1770. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1771. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1772. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1773. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1774. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1775. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  1776. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  1777. ibdev->ib_dev.query_device = mlx4_ib_query_device;
  1778. ibdev->ib_dev.query_port = mlx4_ib_query_port;
  1779. ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
  1780. ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
  1781. ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
  1782. ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
  1783. ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
  1784. ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
  1785. ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
  1786. ibdev->ib_dev.mmap = mlx4_ib_mmap;
  1787. ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
  1788. ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
  1789. ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
  1790. ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
  1791. ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
  1792. ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
  1793. ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
  1794. ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
  1795. ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
  1796. ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
  1797. ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
  1798. ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
  1799. ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
  1800. ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
  1801. ibdev->ib_dev.post_send = mlx4_ib_post_send;
  1802. ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
  1803. ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
  1804. ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
  1805. ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
  1806. ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
  1807. ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
  1808. ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
  1809. ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
  1810. ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
  1811. ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
  1812. ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
  1813. ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
  1814. ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
  1815. ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
  1816. ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
  1817. ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
  1818. ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
  1819. if (!mlx4_is_slave(ibdev->dev)) {
  1820. ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
  1821. ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
  1822. ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
  1823. ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
  1824. }
  1825. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
  1826. dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
  1827. ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
  1828. ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
  1829. ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
  1830. ibdev->ib_dev.uverbs_cmd_mask |=
  1831. (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
  1832. (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
  1833. }
  1834. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
  1835. ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
  1836. ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
  1837. ibdev->ib_dev.uverbs_cmd_mask |=
  1838. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  1839. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  1840. }
  1841. if (check_flow_steering_support(dev)) {
  1842. ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
  1843. ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
  1844. ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
  1845. ibdev->ib_dev.uverbs_ex_cmd_mask |=
  1846. (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
  1847. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
  1848. }
  1849. mlx4_ib_alloc_eqs(dev, ibdev);
  1850. spin_lock_init(&iboe->lock);
  1851. if (init_node_data(ibdev))
  1852. goto err_map;
  1853. for (i = 0; i < ibdev->num_ports; ++i) {
  1854. mutex_init(&ibdev->qp1_proxy_lock[i]);
  1855. if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
  1856. IB_LINK_LAYER_ETHERNET) {
  1857. err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
  1858. if (err)
  1859. ibdev->counters[i] = -1;
  1860. } else {
  1861. ibdev->counters[i] = -1;
  1862. }
  1863. }
  1864. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  1865. ib_num_ports++;
  1866. spin_lock_init(&ibdev->sm_lock);
  1867. mutex_init(&ibdev->cap_mask_mutex);
  1868. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
  1869. ib_num_ports) {
  1870. ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
  1871. err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
  1872. MLX4_IB_UC_STEER_QPN_ALIGN,
  1873. &ibdev->steer_qpn_base, 0);
  1874. if (err)
  1875. goto err_counter;
  1876. ibdev->ib_uc_qpns_bitmap =
  1877. kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
  1878. sizeof(long),
  1879. GFP_KERNEL);
  1880. if (!ibdev->ib_uc_qpns_bitmap) {
  1881. dev_err(&dev->pdev->dev, "bit map alloc failed\n");
  1882. goto err_steer_qp_release;
  1883. }
  1884. bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
  1885. err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
  1886. dev, ibdev->steer_qpn_base,
  1887. ibdev->steer_qpn_base +
  1888. ibdev->steer_qpn_count - 1);
  1889. if (err)
  1890. goto err_steer_free_bitmap;
  1891. }
  1892. for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
  1893. atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
  1894. if (ib_register_device(&ibdev->ib_dev, NULL))
  1895. goto err_steer_free_bitmap;
  1896. if (mlx4_ib_mad_init(ibdev))
  1897. goto err_reg;
  1898. if (mlx4_ib_init_sriov(ibdev))
  1899. goto err_mad;
  1900. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
  1901. if (!iboe->nb.notifier_call) {
  1902. iboe->nb.notifier_call = mlx4_ib_netdev_event;
  1903. err = register_netdevice_notifier(&iboe->nb);
  1904. if (err) {
  1905. iboe->nb.notifier_call = NULL;
  1906. goto err_notif;
  1907. }
  1908. }
  1909. if (!iboe->nb_inet.notifier_call) {
  1910. iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
  1911. err = register_inetaddr_notifier(&iboe->nb_inet);
  1912. if (err) {
  1913. iboe->nb_inet.notifier_call = NULL;
  1914. goto err_notif;
  1915. }
  1916. }
  1917. #if IS_ENABLED(CONFIG_IPV6)
  1918. if (!iboe->nb_inet6.notifier_call) {
  1919. iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
  1920. err = register_inet6addr_notifier(&iboe->nb_inet6);
  1921. if (err) {
  1922. iboe->nb_inet6.notifier_call = NULL;
  1923. goto err_notif;
  1924. }
  1925. }
  1926. #endif
  1927. if (mlx4_ib_init_gid_table(ibdev))
  1928. goto err_notif;
  1929. }
  1930. for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
  1931. if (device_create_file(&ibdev->ib_dev.dev,
  1932. mlx4_class_attributes[j]))
  1933. goto err_notif;
  1934. }
  1935. ibdev->ib_active = true;
  1936. if (mlx4_is_mfunc(ibdev->dev))
  1937. init_pkeys(ibdev);
  1938. /* create paravirt contexts for any VFs which are active */
  1939. if (mlx4_is_master(ibdev->dev)) {
  1940. for (j = 0; j < MLX4_MFUNC_MAX; j++) {
  1941. if (j == mlx4_master_func_num(ibdev->dev))
  1942. continue;
  1943. if (mlx4_is_slave_active(ibdev->dev, j))
  1944. do_slave_init(ibdev, j, 1);
  1945. }
  1946. }
  1947. return ibdev;
  1948. err_notif:
  1949. if (ibdev->iboe.nb.notifier_call) {
  1950. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  1951. pr_warn("failure unregistering notifier\n");
  1952. ibdev->iboe.nb.notifier_call = NULL;
  1953. }
  1954. if (ibdev->iboe.nb_inet.notifier_call) {
  1955. if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
  1956. pr_warn("failure unregistering notifier\n");
  1957. ibdev->iboe.nb_inet.notifier_call = NULL;
  1958. }
  1959. #if IS_ENABLED(CONFIG_IPV6)
  1960. if (ibdev->iboe.nb_inet6.notifier_call) {
  1961. if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
  1962. pr_warn("failure unregistering notifier\n");
  1963. ibdev->iboe.nb_inet6.notifier_call = NULL;
  1964. }
  1965. #endif
  1966. flush_workqueue(wq);
  1967. mlx4_ib_close_sriov(ibdev);
  1968. err_mad:
  1969. mlx4_ib_mad_cleanup(ibdev);
  1970. err_reg:
  1971. ib_unregister_device(&ibdev->ib_dev);
  1972. err_steer_free_bitmap:
  1973. kfree(ibdev->ib_uc_qpns_bitmap);
  1974. err_steer_qp_release:
  1975. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
  1976. mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
  1977. ibdev->steer_qpn_count);
  1978. err_counter:
  1979. for (; i; --i)
  1980. if (ibdev->counters[i - 1] != -1)
  1981. mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
  1982. err_map:
  1983. iounmap(ibdev->uar_map);
  1984. err_uar:
  1985. mlx4_uar_free(dev, &ibdev->priv_uar);
  1986. err_pd:
  1987. mlx4_pd_free(dev, ibdev->priv_pdn);
  1988. err_dealloc:
  1989. ib_dealloc_device(&ibdev->ib_dev);
  1990. return NULL;
  1991. }
  1992. int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
  1993. {
  1994. int offset;
  1995. WARN_ON(!dev->ib_uc_qpns_bitmap);
  1996. offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
  1997. dev->steer_qpn_count,
  1998. get_count_order(count));
  1999. if (offset < 0)
  2000. return offset;
  2001. *qpn = dev->steer_qpn_base + offset;
  2002. return 0;
  2003. }
  2004. void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
  2005. {
  2006. if (!qpn ||
  2007. dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
  2008. return;
  2009. BUG_ON(qpn < dev->steer_qpn_base);
  2010. bitmap_release_region(dev->ib_uc_qpns_bitmap,
  2011. qpn - dev->steer_qpn_base,
  2012. get_count_order(count));
  2013. }
  2014. int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  2015. int is_attach)
  2016. {
  2017. int err;
  2018. size_t flow_size;
  2019. struct ib_flow_attr *flow = NULL;
  2020. struct ib_flow_spec_ib *ib_spec;
  2021. if (is_attach) {
  2022. flow_size = sizeof(struct ib_flow_attr) +
  2023. sizeof(struct ib_flow_spec_ib);
  2024. flow = kzalloc(flow_size, GFP_KERNEL);
  2025. if (!flow)
  2026. return -ENOMEM;
  2027. flow->port = mqp->port;
  2028. flow->num_of_specs = 1;
  2029. flow->size = flow_size;
  2030. ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
  2031. ib_spec->type = IB_FLOW_SPEC_IB;
  2032. ib_spec->size = sizeof(struct ib_flow_spec_ib);
  2033. /* Add an empty rule for IB L2 */
  2034. memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
  2035. err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
  2036. IB_FLOW_DOMAIN_NIC,
  2037. MLX4_FS_REGULAR,
  2038. &mqp->reg_id);
  2039. } else {
  2040. err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
  2041. }
  2042. kfree(flow);
  2043. return err;
  2044. }
  2045. static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
  2046. {
  2047. struct mlx4_ib_dev *ibdev = ibdev_ptr;
  2048. int p;
  2049. ibdev->ib_active = false;
  2050. flush_workqueue(wq);
  2051. mlx4_ib_close_sriov(ibdev);
  2052. mlx4_ib_mad_cleanup(ibdev);
  2053. ib_unregister_device(&ibdev->ib_dev);
  2054. if (ibdev->iboe.nb.notifier_call) {
  2055. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  2056. pr_warn("failure unregistering notifier\n");
  2057. ibdev->iboe.nb.notifier_call = NULL;
  2058. }
  2059. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
  2060. mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
  2061. ibdev->steer_qpn_count);
  2062. kfree(ibdev->ib_uc_qpns_bitmap);
  2063. }
  2064. if (ibdev->iboe.nb_inet.notifier_call) {
  2065. if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
  2066. pr_warn("failure unregistering notifier\n");
  2067. ibdev->iboe.nb_inet.notifier_call = NULL;
  2068. }
  2069. #if IS_ENABLED(CONFIG_IPV6)
  2070. if (ibdev->iboe.nb_inet6.notifier_call) {
  2071. if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
  2072. pr_warn("failure unregistering notifier\n");
  2073. ibdev->iboe.nb_inet6.notifier_call = NULL;
  2074. }
  2075. #endif
  2076. iounmap(ibdev->uar_map);
  2077. for (p = 0; p < ibdev->num_ports; ++p)
  2078. if (ibdev->counters[p] != -1)
  2079. mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
  2080. mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
  2081. mlx4_CLOSE_PORT(dev, p);
  2082. mlx4_ib_free_eqs(dev, ibdev);
  2083. mlx4_uar_free(dev, &ibdev->priv_uar);
  2084. mlx4_pd_free(dev, ibdev->priv_pdn);
  2085. ib_dealloc_device(&ibdev->ib_dev);
  2086. }
  2087. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
  2088. {
  2089. struct mlx4_ib_demux_work **dm = NULL;
  2090. struct mlx4_dev *dev = ibdev->dev;
  2091. int i;
  2092. unsigned long flags;
  2093. struct mlx4_active_ports actv_ports;
  2094. unsigned int ports;
  2095. unsigned int first_port;
  2096. if (!mlx4_is_master(dev))
  2097. return;
  2098. actv_ports = mlx4_get_active_ports(dev, slave);
  2099. ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
  2100. first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
  2101. dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
  2102. if (!dm) {
  2103. pr_err("failed to allocate memory for tunneling qp update\n");
  2104. goto out;
  2105. }
  2106. for (i = 0; i < ports; i++) {
  2107. dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
  2108. if (!dm[i]) {
  2109. pr_err("failed to allocate memory for tunneling qp update work struct\n");
  2110. for (i = 0; i < dev->caps.num_ports; i++) {
  2111. if (dm[i])
  2112. kfree(dm[i]);
  2113. }
  2114. goto out;
  2115. }
  2116. }
  2117. /* initialize or tear down tunnel QPs for the slave */
  2118. for (i = 0; i < ports; i++) {
  2119. INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
  2120. dm[i]->port = first_port + i + 1;
  2121. dm[i]->slave = slave;
  2122. dm[i]->do_init = do_init;
  2123. dm[i]->dev = ibdev;
  2124. spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
  2125. if (!ibdev->sriov.is_going_down)
  2126. queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
  2127. spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
  2128. }
  2129. out:
  2130. kfree(dm);
  2131. return;
  2132. }
  2133. static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
  2134. enum mlx4_dev_event event, unsigned long param)
  2135. {
  2136. struct ib_event ibev;
  2137. struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
  2138. struct mlx4_eqe *eqe = NULL;
  2139. struct ib_event_work *ew;
  2140. int p = 0;
  2141. if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
  2142. eqe = (struct mlx4_eqe *)param;
  2143. else
  2144. p = (int) param;
  2145. switch (event) {
  2146. case MLX4_DEV_EVENT_PORT_UP:
  2147. if (p > ibdev->num_ports)
  2148. return;
  2149. if (mlx4_is_master(dev) &&
  2150. rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
  2151. IB_LINK_LAYER_INFINIBAND) {
  2152. mlx4_ib_invalidate_all_guid_record(ibdev, p);
  2153. }
  2154. ibev.event = IB_EVENT_PORT_ACTIVE;
  2155. break;
  2156. case MLX4_DEV_EVENT_PORT_DOWN:
  2157. if (p > ibdev->num_ports)
  2158. return;
  2159. ibev.event = IB_EVENT_PORT_ERR;
  2160. break;
  2161. case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
  2162. ibdev->ib_active = false;
  2163. ibev.event = IB_EVENT_DEVICE_FATAL;
  2164. break;
  2165. case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
  2166. ew = kmalloc(sizeof *ew, GFP_ATOMIC);
  2167. if (!ew) {
  2168. pr_err("failed to allocate memory for events work\n");
  2169. break;
  2170. }
  2171. INIT_WORK(&ew->work, handle_port_mgmt_change_event);
  2172. memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
  2173. ew->ib_dev = ibdev;
  2174. /* need to queue only for port owner, which uses GEN_EQE */
  2175. if (mlx4_is_master(dev))
  2176. queue_work(wq, &ew->work);
  2177. else
  2178. handle_port_mgmt_change_event(&ew->work);
  2179. return;
  2180. case MLX4_DEV_EVENT_SLAVE_INIT:
  2181. /* here, p is the slave id */
  2182. do_slave_init(ibdev, p, 1);
  2183. return;
  2184. case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
  2185. /* here, p is the slave id */
  2186. do_slave_init(ibdev, p, 0);
  2187. return;
  2188. default:
  2189. return;
  2190. }
  2191. ibev.device = ibdev_ptr;
  2192. ibev.element.port_num = (u8) p;
  2193. ib_dispatch_event(&ibev);
  2194. }
  2195. static struct mlx4_interface mlx4_ib_interface = {
  2196. .add = mlx4_ib_add,
  2197. .remove = mlx4_ib_remove,
  2198. .event = mlx4_ib_event,
  2199. .protocol = MLX4_PROT_IB_IPV6
  2200. };
  2201. static int __init mlx4_ib_init(void)
  2202. {
  2203. int err;
  2204. wq = create_singlethread_workqueue("mlx4_ib");
  2205. if (!wq)
  2206. return -ENOMEM;
  2207. err = mlx4_ib_mcg_init();
  2208. if (err)
  2209. goto clean_wq;
  2210. err = mlx4_register_interface(&mlx4_ib_interface);
  2211. if (err)
  2212. goto clean_mcg;
  2213. return 0;
  2214. clean_mcg:
  2215. mlx4_ib_mcg_destroy();
  2216. clean_wq:
  2217. destroy_workqueue(wq);
  2218. return err;
  2219. }
  2220. static void __exit mlx4_ib_cleanup(void)
  2221. {
  2222. mlx4_unregister_interface(&mlx4_ib_interface);
  2223. mlx4_ib_mcg_destroy();
  2224. destroy_workqueue(wq);
  2225. }
  2226. module_init(mlx4_ib_init);
  2227. module_exit(mlx4_ib_cleanup);