mad.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246
  1. /*
  2. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/ib_mad.h>
  33. #include <rdma/ib_smi.h>
  34. #include <rdma/ib_sa.h>
  35. #include <rdma/ib_cache.h>
  36. #include <linux/random.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include <linux/gfp.h>
  39. #include <rdma/ib_pma.h>
  40. #include <linux/mlx4/driver.h>
  41. #include "mlx4_ib.h"
  42. enum {
  43. MLX4_IB_VENDOR_CLASS1 = 0x9,
  44. MLX4_IB_VENDOR_CLASS2 = 0xa
  45. };
  46. #define MLX4_TUN_SEND_WRID_SHIFT 34
  47. #define MLX4_TUN_QPN_SHIFT 32
  48. #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
  49. #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
  50. #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
  51. #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
  52. /* Port mgmt change event handling */
  53. #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
  54. #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
  55. #define NUM_IDX_IN_PKEY_TBL_BLK 32
  56. #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
  57. #define GUID_TBL_BLK_NUM_ENTRIES 8
  58. #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
  59. struct mlx4_mad_rcv_buf {
  60. struct ib_grh grh;
  61. u8 payload[256];
  62. } __packed;
  63. struct mlx4_mad_snd_buf {
  64. u8 payload[256];
  65. } __packed;
  66. struct mlx4_tunnel_mad {
  67. struct ib_grh grh;
  68. struct mlx4_ib_tunnel_header hdr;
  69. struct ib_mad mad;
  70. } __packed;
  71. struct mlx4_rcv_tunnel_mad {
  72. struct mlx4_rcv_tunnel_hdr hdr;
  73. struct ib_grh grh;
  74. struct ib_mad mad;
  75. } __packed;
  76. static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
  77. static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
  78. static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
  79. int block, u32 change_bitmap);
  80. __be64 mlx4_ib_gen_node_guid(void)
  81. {
  82. #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
  83. return cpu_to_be64(NODE_GUID_HI | prandom_u32());
  84. }
  85. __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
  86. {
  87. return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
  88. cpu_to_be64(0xff00000000000000LL);
  89. }
  90. int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
  91. int port, const struct ib_wc *in_wc,
  92. const struct ib_grh *in_grh,
  93. const void *in_mad, void *response_mad)
  94. {
  95. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  96. void *inbox;
  97. int err;
  98. u32 in_modifier = port;
  99. u8 op_modifier = 0;
  100. inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  101. if (IS_ERR(inmailbox))
  102. return PTR_ERR(inmailbox);
  103. inbox = inmailbox->buf;
  104. outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  105. if (IS_ERR(outmailbox)) {
  106. mlx4_free_cmd_mailbox(dev->dev, inmailbox);
  107. return PTR_ERR(outmailbox);
  108. }
  109. memcpy(inbox, in_mad, 256);
  110. /*
  111. * Key check traps can't be generated unless we have in_wc to
  112. * tell us where to send the trap.
  113. */
  114. if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
  115. op_modifier |= 0x1;
  116. if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
  117. op_modifier |= 0x2;
  118. if (mlx4_is_mfunc(dev->dev) &&
  119. (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
  120. op_modifier |= 0x8;
  121. if (in_wc) {
  122. struct {
  123. __be32 my_qpn;
  124. u32 reserved1;
  125. __be32 rqpn;
  126. u8 sl;
  127. u8 g_path;
  128. u16 reserved2[2];
  129. __be16 pkey;
  130. u32 reserved3[11];
  131. u8 grh[40];
  132. } *ext_info;
  133. memset(inbox + 256, 0, 256);
  134. ext_info = inbox + 256;
  135. ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
  136. ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
  137. ext_info->sl = in_wc->sl << 4;
  138. ext_info->g_path = in_wc->dlid_path_bits |
  139. (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
  140. ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
  141. if (in_grh)
  142. memcpy(ext_info->grh, in_grh, 40);
  143. op_modifier |= 0x4;
  144. in_modifier |= in_wc->slid << 16;
  145. }
  146. err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
  147. mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
  148. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  149. (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
  150. if (!err)
  151. memcpy(response_mad, outmailbox->buf, 256);
  152. mlx4_free_cmd_mailbox(dev->dev, inmailbox);
  153. mlx4_free_cmd_mailbox(dev->dev, outmailbox);
  154. return err;
  155. }
  156. static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
  157. {
  158. struct ib_ah *new_ah;
  159. struct ib_ah_attr ah_attr;
  160. unsigned long flags;
  161. if (!dev->send_agent[port_num - 1][0])
  162. return;
  163. memset(&ah_attr, 0, sizeof ah_attr);
  164. ah_attr.dlid = lid;
  165. ah_attr.sl = sl;
  166. ah_attr.port_num = port_num;
  167. new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
  168. &ah_attr);
  169. if (IS_ERR(new_ah))
  170. return;
  171. spin_lock_irqsave(&dev->sm_lock, flags);
  172. if (dev->sm_ah[port_num - 1])
  173. ib_destroy_ah(dev->sm_ah[port_num - 1]);
  174. dev->sm_ah[port_num - 1] = new_ah;
  175. spin_unlock_irqrestore(&dev->sm_lock, flags);
  176. }
  177. /*
  178. * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
  179. * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
  180. */
  181. static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
  182. u16 prev_lid)
  183. {
  184. struct ib_port_info *pinfo;
  185. u16 lid;
  186. __be16 *base;
  187. u32 bn, pkey_change_bitmap;
  188. int i;
  189. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  190. if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  191. mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  192. mad->mad_hdr.method == IB_MGMT_METHOD_SET)
  193. switch (mad->mad_hdr.attr_id) {
  194. case IB_SMP_ATTR_PORT_INFO:
  195. pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
  196. lid = be16_to_cpu(pinfo->lid);
  197. update_sm_ah(dev, port_num,
  198. be16_to_cpu(pinfo->sm_lid),
  199. pinfo->neighbormtu_mastersmsl & 0xf);
  200. if (pinfo->clientrereg_resv_subnetto & 0x80)
  201. handle_client_rereg_event(dev, port_num);
  202. if (prev_lid != lid)
  203. handle_lid_change_event(dev, port_num);
  204. break;
  205. case IB_SMP_ATTR_PKEY_TABLE:
  206. if (!mlx4_is_mfunc(dev->dev)) {
  207. mlx4_ib_dispatch_event(dev, port_num,
  208. IB_EVENT_PKEY_CHANGE);
  209. break;
  210. }
  211. /* at this point, we are running in the master.
  212. * Slaves do not receive SMPs.
  213. */
  214. bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
  215. base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
  216. pkey_change_bitmap = 0;
  217. for (i = 0; i < 32; i++) {
  218. pr_debug("PKEY[%d] = x%x\n",
  219. i + bn*32, be16_to_cpu(base[i]));
  220. if (be16_to_cpu(base[i]) !=
  221. dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
  222. pkey_change_bitmap |= (1 << i);
  223. dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
  224. be16_to_cpu(base[i]);
  225. }
  226. }
  227. pr_debug("PKEY Change event: port=%d, "
  228. "block=0x%x, change_bitmap=0x%x\n",
  229. port_num, bn, pkey_change_bitmap);
  230. if (pkey_change_bitmap) {
  231. mlx4_ib_dispatch_event(dev, port_num,
  232. IB_EVENT_PKEY_CHANGE);
  233. if (!dev->sriov.is_going_down)
  234. __propagate_pkey_ev(dev, port_num, bn,
  235. pkey_change_bitmap);
  236. }
  237. break;
  238. case IB_SMP_ATTR_GUID_INFO:
  239. /* paravirtualized master's guid is guid 0 -- does not change */
  240. if (!mlx4_is_master(dev->dev))
  241. mlx4_ib_dispatch_event(dev, port_num,
  242. IB_EVENT_GID_CHANGE);
  243. /*if master, notify relevant slaves*/
  244. if (mlx4_is_master(dev->dev) &&
  245. !dev->sriov.is_going_down) {
  246. bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
  247. mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
  248. (u8 *)(&((struct ib_smp *)mad)->data));
  249. mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
  250. (u8 *)(&((struct ib_smp *)mad)->data));
  251. }
  252. break;
  253. default:
  254. break;
  255. }
  256. }
  257. static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
  258. int block, u32 change_bitmap)
  259. {
  260. int i, ix, slave, err;
  261. int have_event = 0;
  262. for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
  263. if (slave == mlx4_master_func_num(dev->dev))
  264. continue;
  265. if (!mlx4_is_slave_active(dev->dev, slave))
  266. continue;
  267. have_event = 0;
  268. for (i = 0; i < 32; i++) {
  269. if (!(change_bitmap & (1 << i)))
  270. continue;
  271. for (ix = 0;
  272. ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
  273. if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
  274. [ix] == i + 32 * block) {
  275. err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
  276. pr_debug("propagate_pkey_ev: slave %d,"
  277. " port %d, ix %d (%d)\n",
  278. slave, port_num, ix, err);
  279. have_event = 1;
  280. break;
  281. }
  282. }
  283. if (have_event)
  284. break;
  285. }
  286. }
  287. }
  288. static void node_desc_override(struct ib_device *dev,
  289. struct ib_mad *mad)
  290. {
  291. unsigned long flags;
  292. if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  293. mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  294. mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
  295. mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
  296. spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
  297. memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
  298. spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
  299. }
  300. }
  301. static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
  302. {
  303. int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
  304. struct ib_mad_send_buf *send_buf;
  305. struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
  306. int ret;
  307. unsigned long flags;
  308. if (agent) {
  309. send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
  310. IB_MGMT_MAD_DATA, GFP_ATOMIC,
  311. IB_MGMT_BASE_VERSION);
  312. if (IS_ERR(send_buf))
  313. return;
  314. /*
  315. * We rely here on the fact that MLX QPs don't use the
  316. * address handle after the send is posted (this is
  317. * wrong following the IB spec strictly, but we know
  318. * it's OK for our devices).
  319. */
  320. spin_lock_irqsave(&dev->sm_lock, flags);
  321. memcpy(send_buf->mad, mad, sizeof *mad);
  322. if ((send_buf->ah = dev->sm_ah[port_num - 1]))
  323. ret = ib_post_send_mad(send_buf, NULL);
  324. else
  325. ret = -EINVAL;
  326. spin_unlock_irqrestore(&dev->sm_lock, flags);
  327. if (ret)
  328. ib_free_send_mad(send_buf);
  329. }
  330. }
  331. static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
  332. struct ib_sa_mad *sa_mad)
  333. {
  334. int ret = 0;
  335. /* dispatch to different sa handlers */
  336. switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
  337. case IB_SA_ATTR_MC_MEMBER_REC:
  338. ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
  339. break;
  340. default:
  341. break;
  342. }
  343. return ret;
  344. }
  345. int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
  346. {
  347. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  348. int i;
  349. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  350. if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
  351. return i;
  352. }
  353. return -1;
  354. }
  355. static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
  356. u8 port, u16 pkey, u16 *ix)
  357. {
  358. int i, ret;
  359. u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
  360. u16 slot_pkey;
  361. if (slave == mlx4_master_func_num(dev->dev))
  362. return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
  363. unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
  364. for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
  365. if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
  366. continue;
  367. pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
  368. ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
  369. if (ret)
  370. continue;
  371. if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
  372. if (slot_pkey & 0x8000) {
  373. *ix = (u16) pkey_ix;
  374. return 0;
  375. } else {
  376. /* take first partial pkey index found */
  377. if (partial_ix == 0xFF)
  378. partial_ix = pkey_ix;
  379. }
  380. }
  381. }
  382. if (partial_ix < 0xFF) {
  383. *ix = (u16) partial_ix;
  384. return 0;
  385. }
  386. return -EINVAL;
  387. }
  388. int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
  389. enum ib_qp_type dest_qpt, struct ib_wc *wc,
  390. struct ib_grh *grh, struct ib_mad *mad)
  391. {
  392. struct ib_sge list;
  393. struct ib_ud_wr wr;
  394. struct ib_send_wr *bad_wr;
  395. struct mlx4_ib_demux_pv_ctx *tun_ctx;
  396. struct mlx4_ib_demux_pv_qp *tun_qp;
  397. struct mlx4_rcv_tunnel_mad *tun_mad;
  398. struct ib_ah_attr attr;
  399. struct ib_ah *ah;
  400. struct ib_qp *src_qp = NULL;
  401. unsigned tun_tx_ix = 0;
  402. int dqpn;
  403. int ret = 0;
  404. u16 tun_pkey_ix;
  405. u16 cached_pkey;
  406. u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  407. if (dest_qpt > IB_QPT_GSI)
  408. return -EINVAL;
  409. tun_ctx = dev->sriov.demux[port-1].tun[slave];
  410. /* check if proxy qp created */
  411. if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
  412. return -EAGAIN;
  413. if (!dest_qpt)
  414. tun_qp = &tun_ctx->qp[0];
  415. else
  416. tun_qp = &tun_ctx->qp[1];
  417. /* compute P_Key index to put in tunnel header for slave */
  418. if (dest_qpt) {
  419. u16 pkey_ix;
  420. ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
  421. if (ret)
  422. return -EINVAL;
  423. ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
  424. if (ret)
  425. return -EINVAL;
  426. tun_pkey_ix = pkey_ix;
  427. } else
  428. tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
  429. dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
  430. /* get tunnel tx data buf for slave */
  431. src_qp = tun_qp->qp;
  432. /* create ah. Just need an empty one with the port num for the post send.
  433. * The driver will set the force loopback bit in post_send */
  434. memset(&attr, 0, sizeof attr);
  435. attr.port_num = port;
  436. if (is_eth) {
  437. memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
  438. attr.ah_flags = IB_AH_GRH;
  439. }
  440. ah = ib_create_ah(tun_ctx->pd, &attr);
  441. if (IS_ERR(ah))
  442. return -ENOMEM;
  443. /* allocate tunnel tx buf after pass failure returns */
  444. spin_lock(&tun_qp->tx_lock);
  445. if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
  446. (MLX4_NUM_TUNNEL_BUFS - 1))
  447. ret = -EAGAIN;
  448. else
  449. tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
  450. spin_unlock(&tun_qp->tx_lock);
  451. if (ret)
  452. goto out;
  453. tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
  454. if (tun_qp->tx_ring[tun_tx_ix].ah)
  455. ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
  456. tun_qp->tx_ring[tun_tx_ix].ah = ah;
  457. ib_dma_sync_single_for_cpu(&dev->ib_dev,
  458. tun_qp->tx_ring[tun_tx_ix].buf.map,
  459. sizeof (struct mlx4_rcv_tunnel_mad),
  460. DMA_TO_DEVICE);
  461. /* copy over to tunnel buffer */
  462. if (grh)
  463. memcpy(&tun_mad->grh, grh, sizeof *grh);
  464. memcpy(&tun_mad->mad, mad, sizeof *mad);
  465. /* adjust tunnel data */
  466. tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
  467. tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
  468. tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
  469. if (is_eth) {
  470. u16 vlan = 0;
  471. if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
  472. NULL)) {
  473. /* VST mode */
  474. if (vlan != wc->vlan_id)
  475. /* Packet vlan is not the VST-assigned vlan.
  476. * Drop the packet.
  477. */
  478. goto out;
  479. else
  480. /* Remove the vlan tag before forwarding
  481. * the packet to the VF.
  482. */
  483. vlan = 0xffff;
  484. } else {
  485. vlan = wc->vlan_id;
  486. }
  487. tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
  488. memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
  489. memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
  490. } else {
  491. tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
  492. tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
  493. }
  494. ib_dma_sync_single_for_device(&dev->ib_dev,
  495. tun_qp->tx_ring[tun_tx_ix].buf.map,
  496. sizeof (struct mlx4_rcv_tunnel_mad),
  497. DMA_TO_DEVICE);
  498. list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
  499. list.length = sizeof (struct mlx4_rcv_tunnel_mad);
  500. list.lkey = tun_ctx->pd->local_dma_lkey;
  501. wr.ah = ah;
  502. wr.port_num = port;
  503. wr.remote_qkey = IB_QP_SET_QKEY;
  504. wr.remote_qpn = dqpn;
  505. wr.wr.next = NULL;
  506. wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
  507. wr.wr.sg_list = &list;
  508. wr.wr.num_sge = 1;
  509. wr.wr.opcode = IB_WR_SEND;
  510. wr.wr.send_flags = IB_SEND_SIGNALED;
  511. ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
  512. out:
  513. if (ret)
  514. ib_destroy_ah(ah);
  515. return ret;
  516. }
  517. static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
  518. struct ib_wc *wc, struct ib_grh *grh,
  519. struct ib_mad *mad)
  520. {
  521. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  522. int err, other_port;
  523. int slave = -1;
  524. u8 *slave_id;
  525. int is_eth = 0;
  526. if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
  527. is_eth = 0;
  528. else
  529. is_eth = 1;
  530. if (is_eth) {
  531. if (!(wc->wc_flags & IB_WC_GRH)) {
  532. mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
  533. return -EINVAL;
  534. }
  535. if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
  536. mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
  537. return -EINVAL;
  538. }
  539. err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
  540. if (err && mlx4_is_mf_bonded(dev->dev)) {
  541. other_port = (port == 1) ? 2 : 1;
  542. err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
  543. if (!err) {
  544. port = other_port;
  545. pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
  546. slave, grh->dgid.raw, port, other_port);
  547. }
  548. }
  549. if (err) {
  550. mlx4_ib_warn(ibdev, "failed matching grh\n");
  551. return -ENOENT;
  552. }
  553. if (slave >= dev->dev->caps.sqp_demux) {
  554. mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
  555. slave, dev->dev->caps.sqp_demux);
  556. return -ENOENT;
  557. }
  558. if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
  559. return 0;
  560. err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
  561. if (err)
  562. pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
  563. slave, err);
  564. return 0;
  565. }
  566. /* Initially assume that this mad is for us */
  567. slave = mlx4_master_func_num(dev->dev);
  568. /* See if the slave id is encoded in a response mad */
  569. if (mad->mad_hdr.method & 0x80) {
  570. slave_id = (u8 *) &mad->mad_hdr.tid;
  571. slave = *slave_id;
  572. if (slave != 255) /*255 indicates the dom0*/
  573. *slave_id = 0; /* remap tid */
  574. }
  575. /* If a grh is present, we demux according to it */
  576. if (wc->wc_flags & IB_WC_GRH) {
  577. slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
  578. if (slave < 0) {
  579. mlx4_ib_warn(ibdev, "failed matching grh\n");
  580. return -ENOENT;
  581. }
  582. }
  583. /* Class-specific handling */
  584. switch (mad->mad_hdr.mgmt_class) {
  585. case IB_MGMT_CLASS_SUBN_LID_ROUTED:
  586. case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
  587. /* 255 indicates the dom0 */
  588. if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
  589. if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
  590. return -EPERM;
  591. /* for a VF. drop unsolicited MADs */
  592. if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
  593. mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
  594. slave, mad->mad_hdr.mgmt_class,
  595. mad->mad_hdr.method);
  596. return -EINVAL;
  597. }
  598. }
  599. break;
  600. case IB_MGMT_CLASS_SUBN_ADM:
  601. if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
  602. (struct ib_sa_mad *) mad))
  603. return 0;
  604. break;
  605. case IB_MGMT_CLASS_CM:
  606. if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
  607. return 0;
  608. break;
  609. case IB_MGMT_CLASS_DEVICE_MGMT:
  610. if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
  611. return 0;
  612. break;
  613. default:
  614. /* Drop unsupported classes for slaves in tunnel mode */
  615. if (slave != mlx4_master_func_num(dev->dev)) {
  616. pr_debug("dropping unsupported ingress mad from class:%d "
  617. "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
  618. return 0;
  619. }
  620. }
  621. /*make sure that no slave==255 was not handled yet.*/
  622. if (slave >= dev->dev->caps.sqp_demux) {
  623. mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
  624. slave, dev->dev->caps.sqp_demux);
  625. return -ENOENT;
  626. }
  627. err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
  628. if (err)
  629. pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
  630. slave, err);
  631. return 0;
  632. }
  633. static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  634. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  635. const struct ib_mad *in_mad, struct ib_mad *out_mad)
  636. {
  637. u16 slid, prev_lid = 0;
  638. int err;
  639. struct ib_port_attr pattr;
  640. if (in_wc && in_wc->qp->qp_num) {
  641. pr_debug("received MAD: slid:%d sqpn:%d "
  642. "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
  643. in_wc->slid, in_wc->src_qp,
  644. in_wc->dlid_path_bits,
  645. in_wc->qp->qp_num,
  646. in_wc->wc_flags,
  647. in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
  648. be16_to_cpu(in_mad->mad_hdr.attr_id));
  649. if (in_wc->wc_flags & IB_WC_GRH) {
  650. pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
  651. be64_to_cpu(in_grh->sgid.global.subnet_prefix),
  652. be64_to_cpu(in_grh->sgid.global.interface_id));
  653. pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
  654. be64_to_cpu(in_grh->dgid.global.subnet_prefix),
  655. be64_to_cpu(in_grh->dgid.global.interface_id));
  656. }
  657. }
  658. slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
  659. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
  660. forward_trap(to_mdev(ibdev), port_num, in_mad);
  661. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  662. }
  663. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  664. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
  665. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  666. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
  667. in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
  668. return IB_MAD_RESULT_SUCCESS;
  669. /*
  670. * Don't process SMInfo queries -- the SMA can't handle them.
  671. */
  672. if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
  673. return IB_MAD_RESULT_SUCCESS;
  674. } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
  675. in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
  676. in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
  677. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
  678. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  679. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
  680. return IB_MAD_RESULT_SUCCESS;
  681. } else
  682. return IB_MAD_RESULT_SUCCESS;
  683. if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  684. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  685. in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
  686. in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
  687. !ib_query_port(ibdev, port_num, &pattr))
  688. prev_lid = pattr.lid;
  689. err = mlx4_MAD_IFC(to_mdev(ibdev),
  690. (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
  691. (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
  692. MLX4_MAD_IFC_NET_VIEW,
  693. port_num, in_wc, in_grh, in_mad, out_mad);
  694. if (err)
  695. return IB_MAD_RESULT_FAILURE;
  696. if (!out_mad->mad_hdr.status) {
  697. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
  698. smp_snoop(ibdev, port_num, in_mad, prev_lid);
  699. /* slaves get node desc from FW */
  700. if (!mlx4_is_slave(to_mdev(ibdev)->dev))
  701. node_desc_override(ibdev, out_mad);
  702. }
  703. /* set return bit in status of directed route responses */
  704. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
  705. out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
  706. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
  707. /* no response for trap repress */
  708. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  709. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  710. }
  711. static void edit_counter(struct mlx4_counter *cnt, void *counters,
  712. __be16 attr_id)
  713. {
  714. switch (attr_id) {
  715. case IB_PMA_PORT_COUNTERS:
  716. {
  717. struct ib_pma_portcounters *pma_cnt =
  718. (struct ib_pma_portcounters *)counters;
  719. ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
  720. (be64_to_cpu(cnt->tx_bytes) >> 2));
  721. ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
  722. (be64_to_cpu(cnt->rx_bytes) >> 2));
  723. ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
  724. be64_to_cpu(cnt->tx_frames));
  725. ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
  726. be64_to_cpu(cnt->rx_frames));
  727. break;
  728. }
  729. case IB_PMA_PORT_COUNTERS_EXT:
  730. {
  731. struct ib_pma_portcounters_ext *pma_cnt_ext =
  732. (struct ib_pma_portcounters_ext *)counters;
  733. pma_cnt_ext->port_xmit_data =
  734. cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
  735. pma_cnt_ext->port_rcv_data =
  736. cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
  737. pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
  738. pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
  739. break;
  740. }
  741. }
  742. }
  743. static int iboe_process_mad_port_info(void *out_mad)
  744. {
  745. struct ib_class_port_info cpi = {};
  746. cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
  747. memcpy(out_mad, &cpi, sizeof(cpi));
  748. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  749. }
  750. static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  751. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  752. const struct ib_mad *in_mad, struct ib_mad *out_mad)
  753. {
  754. struct mlx4_counter counter_stats;
  755. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  756. struct counter_index *tmp_counter;
  757. int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
  758. if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
  759. return -EINVAL;
  760. if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
  761. return iboe_process_mad_port_info((void *)(out_mad->data + 40));
  762. memset(&counter_stats, 0, sizeof(counter_stats));
  763. mutex_lock(&dev->counters_table[port_num - 1].mutex);
  764. list_for_each_entry(tmp_counter,
  765. &dev->counters_table[port_num - 1].counters_list,
  766. list) {
  767. err = mlx4_get_counter_stats(dev->dev,
  768. tmp_counter->index,
  769. &counter_stats, 0);
  770. if (err) {
  771. err = IB_MAD_RESULT_FAILURE;
  772. stats_avail = 0;
  773. break;
  774. }
  775. stats_avail = 1;
  776. }
  777. mutex_unlock(&dev->counters_table[port_num - 1].mutex);
  778. if (stats_avail) {
  779. memset(out_mad->data, 0, sizeof out_mad->data);
  780. switch (counter_stats.counter_mode & 0xf) {
  781. case 0:
  782. edit_counter(&counter_stats,
  783. (void *)(out_mad->data + 40),
  784. in_mad->mad_hdr.attr_id);
  785. err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  786. break;
  787. default:
  788. err = IB_MAD_RESULT_FAILURE;
  789. }
  790. }
  791. return err;
  792. }
  793. int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  794. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  795. const struct ib_mad_hdr *in, size_t in_mad_size,
  796. struct ib_mad_hdr *out, size_t *out_mad_size,
  797. u16 *out_mad_pkey_index)
  798. {
  799. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  800. const struct ib_mad *in_mad = (const struct ib_mad *)in;
  801. struct ib_mad *out_mad = (struct ib_mad *)out;
  802. enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
  803. if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
  804. *out_mad_size != sizeof(*out_mad)))
  805. return IB_MAD_RESULT_FAILURE;
  806. /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
  807. * queries, should be called only by VFs and for that specific purpose
  808. */
  809. if (link == IB_LINK_LAYER_INFINIBAND) {
  810. if (mlx4_is_slave(dev->dev) &&
  811. (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
  812. (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
  813. in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
  814. in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
  815. return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
  816. in_grh, in_mad, out_mad);
  817. return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
  818. in_grh, in_mad, out_mad);
  819. }
  820. if (link == IB_LINK_LAYER_ETHERNET)
  821. return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
  822. in_grh, in_mad, out_mad);
  823. return -EINVAL;
  824. }
  825. static void send_handler(struct ib_mad_agent *agent,
  826. struct ib_mad_send_wc *mad_send_wc)
  827. {
  828. if (mad_send_wc->send_buf->context[0])
  829. ib_destroy_ah(mad_send_wc->send_buf->context[0]);
  830. ib_free_send_mad(mad_send_wc->send_buf);
  831. }
  832. int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
  833. {
  834. struct ib_mad_agent *agent;
  835. int p, q;
  836. int ret;
  837. enum rdma_link_layer ll;
  838. for (p = 0; p < dev->num_ports; ++p) {
  839. ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
  840. for (q = 0; q <= 1; ++q) {
  841. if (ll == IB_LINK_LAYER_INFINIBAND) {
  842. agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
  843. q ? IB_QPT_GSI : IB_QPT_SMI,
  844. NULL, 0, send_handler,
  845. NULL, NULL, 0);
  846. if (IS_ERR(agent)) {
  847. ret = PTR_ERR(agent);
  848. goto err;
  849. }
  850. dev->send_agent[p][q] = agent;
  851. } else
  852. dev->send_agent[p][q] = NULL;
  853. }
  854. }
  855. return 0;
  856. err:
  857. for (p = 0; p < dev->num_ports; ++p)
  858. for (q = 0; q <= 1; ++q)
  859. if (dev->send_agent[p][q])
  860. ib_unregister_mad_agent(dev->send_agent[p][q]);
  861. return ret;
  862. }
  863. void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
  864. {
  865. struct ib_mad_agent *agent;
  866. int p, q;
  867. for (p = 0; p < dev->num_ports; ++p) {
  868. for (q = 0; q <= 1; ++q) {
  869. agent = dev->send_agent[p][q];
  870. if (agent) {
  871. dev->send_agent[p][q] = NULL;
  872. ib_unregister_mad_agent(agent);
  873. }
  874. }
  875. if (dev->sm_ah[p])
  876. ib_destroy_ah(dev->sm_ah[p]);
  877. }
  878. }
  879. static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
  880. {
  881. mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
  882. if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
  883. mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
  884. MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
  885. }
  886. static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
  887. {
  888. /* re-configure the alias-guid and mcg's */
  889. if (mlx4_is_master(dev->dev)) {
  890. mlx4_ib_invalidate_all_guid_record(dev, port_num);
  891. if (!dev->sriov.is_going_down) {
  892. mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
  893. mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
  894. MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
  895. }
  896. }
  897. mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
  898. }
  899. static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
  900. struct mlx4_eqe *eqe)
  901. {
  902. __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
  903. GET_MASK_FROM_EQE(eqe));
  904. }
  905. static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
  906. u32 guid_tbl_blk_num, u32 change_bitmap)
  907. {
  908. struct ib_smp *in_mad = NULL;
  909. struct ib_smp *out_mad = NULL;
  910. u16 i;
  911. if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
  912. return;
  913. in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
  914. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  915. if (!in_mad || !out_mad) {
  916. mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
  917. goto out;
  918. }
  919. guid_tbl_blk_num *= 4;
  920. for (i = 0; i < 4; i++) {
  921. if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
  922. continue;
  923. memset(in_mad, 0, sizeof *in_mad);
  924. memset(out_mad, 0, sizeof *out_mad);
  925. in_mad->base_version = 1;
  926. in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  927. in_mad->class_version = 1;
  928. in_mad->method = IB_MGMT_METHOD_GET;
  929. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  930. in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
  931. if (mlx4_MAD_IFC(dev,
  932. MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
  933. port_num, NULL, NULL, in_mad, out_mad)) {
  934. mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
  935. goto out;
  936. }
  937. mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
  938. port_num,
  939. (u8 *)(&((struct ib_smp *)out_mad)->data));
  940. mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
  941. port_num,
  942. (u8 *)(&((struct ib_smp *)out_mad)->data));
  943. }
  944. out:
  945. kfree(in_mad);
  946. kfree(out_mad);
  947. return;
  948. }
  949. void handle_port_mgmt_change_event(struct work_struct *work)
  950. {
  951. struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
  952. struct mlx4_ib_dev *dev = ew->ib_dev;
  953. struct mlx4_eqe *eqe = &(ew->ib_eqe);
  954. u8 port = eqe->event.port_mgmt_change.port;
  955. u32 changed_attr;
  956. u32 tbl_block;
  957. u32 change_bitmap;
  958. switch (eqe->subtype) {
  959. case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
  960. changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
  961. /* Update the SM ah - This should be done before handling
  962. the other changed attributes so that MADs can be sent to the SM */
  963. if (changed_attr & MSTR_SM_CHANGE_MASK) {
  964. u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
  965. u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
  966. update_sm_ah(dev, port, lid, sl);
  967. }
  968. /* Check if it is a lid change event */
  969. if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
  970. handle_lid_change_event(dev, port);
  971. /* Generate GUID changed event */
  972. if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
  973. mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
  974. /*if master, notify all slaves*/
  975. if (mlx4_is_master(dev->dev))
  976. mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
  977. MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
  978. }
  979. if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
  980. handle_client_rereg_event(dev, port);
  981. break;
  982. case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
  983. mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
  984. if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
  985. propagate_pkey_ev(dev, port, eqe);
  986. break;
  987. case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
  988. /* paravirtualized master's guid is guid 0 -- does not change */
  989. if (!mlx4_is_master(dev->dev))
  990. mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
  991. /*if master, notify relevant slaves*/
  992. else if (!dev->sriov.is_going_down) {
  993. tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
  994. change_bitmap = GET_MASK_FROM_EQE(eqe);
  995. handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
  996. }
  997. break;
  998. default:
  999. pr_warn("Unsupported subtype 0x%x for "
  1000. "Port Management Change event\n", eqe->subtype);
  1001. }
  1002. kfree(ew);
  1003. }
  1004. void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
  1005. enum ib_event_type type)
  1006. {
  1007. struct ib_event event;
  1008. event.device = &dev->ib_dev;
  1009. event.element.port_num = port_num;
  1010. event.event = type;
  1011. ib_dispatch_event(&event);
  1012. }
  1013. static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
  1014. {
  1015. unsigned long flags;
  1016. struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
  1017. struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
  1018. spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
  1019. if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
  1020. queue_work(ctx->wq, &ctx->work);
  1021. spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
  1022. }
  1023. static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
  1024. struct mlx4_ib_demux_pv_qp *tun_qp,
  1025. int index)
  1026. {
  1027. struct ib_sge sg_list;
  1028. struct ib_recv_wr recv_wr, *bad_recv_wr;
  1029. int size;
  1030. size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
  1031. sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
  1032. sg_list.addr = tun_qp->ring[index].map;
  1033. sg_list.length = size;
  1034. sg_list.lkey = ctx->pd->local_dma_lkey;
  1035. recv_wr.next = NULL;
  1036. recv_wr.sg_list = &sg_list;
  1037. recv_wr.num_sge = 1;
  1038. recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
  1039. MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
  1040. ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
  1041. size, DMA_FROM_DEVICE);
  1042. return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
  1043. }
  1044. static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
  1045. int slave, struct ib_sa_mad *sa_mad)
  1046. {
  1047. int ret = 0;
  1048. /* dispatch to different sa handlers */
  1049. switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
  1050. case IB_SA_ATTR_MC_MEMBER_REC:
  1051. ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
  1052. break;
  1053. default:
  1054. break;
  1055. }
  1056. return ret;
  1057. }
  1058. static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
  1059. {
  1060. int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
  1061. return (qpn >= proxy_start && qpn <= proxy_start + 1);
  1062. }
  1063. int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
  1064. enum ib_qp_type dest_qpt, u16 pkey_index,
  1065. u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
  1066. u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
  1067. {
  1068. struct ib_sge list;
  1069. struct ib_ud_wr wr;
  1070. struct ib_send_wr *bad_wr;
  1071. struct mlx4_ib_demux_pv_ctx *sqp_ctx;
  1072. struct mlx4_ib_demux_pv_qp *sqp;
  1073. struct mlx4_mad_snd_buf *sqp_mad;
  1074. struct ib_ah *ah;
  1075. struct ib_qp *send_qp = NULL;
  1076. unsigned wire_tx_ix = 0;
  1077. int ret = 0;
  1078. u16 wire_pkey_ix;
  1079. int src_qpnum;
  1080. u8 sgid_index;
  1081. sqp_ctx = dev->sriov.sqps[port-1];
  1082. /* check if proxy qp created */
  1083. if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
  1084. return -EAGAIN;
  1085. if (dest_qpt == IB_QPT_SMI) {
  1086. src_qpnum = 0;
  1087. sqp = &sqp_ctx->qp[0];
  1088. wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
  1089. } else {
  1090. src_qpnum = 1;
  1091. sqp = &sqp_ctx->qp[1];
  1092. wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
  1093. }
  1094. send_qp = sqp->qp;
  1095. /* create ah */
  1096. sgid_index = attr->grh.sgid_index;
  1097. attr->grh.sgid_index = 0;
  1098. ah = ib_create_ah(sqp_ctx->pd, attr);
  1099. if (IS_ERR(ah))
  1100. return -ENOMEM;
  1101. attr->grh.sgid_index = sgid_index;
  1102. to_mah(ah)->av.ib.gid_index = sgid_index;
  1103. /* get rid of force-loopback bit */
  1104. to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
  1105. spin_lock(&sqp->tx_lock);
  1106. if (sqp->tx_ix_head - sqp->tx_ix_tail >=
  1107. (MLX4_NUM_TUNNEL_BUFS - 1))
  1108. ret = -EAGAIN;
  1109. else
  1110. wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
  1111. spin_unlock(&sqp->tx_lock);
  1112. if (ret)
  1113. goto out;
  1114. sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
  1115. if (sqp->tx_ring[wire_tx_ix].ah)
  1116. ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
  1117. sqp->tx_ring[wire_tx_ix].ah = ah;
  1118. ib_dma_sync_single_for_cpu(&dev->ib_dev,
  1119. sqp->tx_ring[wire_tx_ix].buf.map,
  1120. sizeof (struct mlx4_mad_snd_buf),
  1121. DMA_TO_DEVICE);
  1122. memcpy(&sqp_mad->payload, mad, sizeof *mad);
  1123. ib_dma_sync_single_for_device(&dev->ib_dev,
  1124. sqp->tx_ring[wire_tx_ix].buf.map,
  1125. sizeof (struct mlx4_mad_snd_buf),
  1126. DMA_TO_DEVICE);
  1127. list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
  1128. list.length = sizeof (struct mlx4_mad_snd_buf);
  1129. list.lkey = sqp_ctx->pd->local_dma_lkey;
  1130. wr.ah = ah;
  1131. wr.port_num = port;
  1132. wr.pkey_index = wire_pkey_ix;
  1133. wr.remote_qkey = qkey;
  1134. wr.remote_qpn = remote_qpn;
  1135. wr.wr.next = NULL;
  1136. wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
  1137. wr.wr.sg_list = &list;
  1138. wr.wr.num_sge = 1;
  1139. wr.wr.opcode = IB_WR_SEND;
  1140. wr.wr.send_flags = IB_SEND_SIGNALED;
  1141. if (s_mac)
  1142. memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
  1143. if (vlan_id < 0x1000)
  1144. vlan_id |= (attr->sl & 7) << 13;
  1145. to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
  1146. ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
  1147. out:
  1148. if (ret)
  1149. ib_destroy_ah(ah);
  1150. return ret;
  1151. }
  1152. static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
  1153. {
  1154. if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
  1155. return slave;
  1156. return mlx4_get_base_gid_ix(dev->dev, slave, port);
  1157. }
  1158. static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
  1159. struct ib_ah_attr *ah_attr)
  1160. {
  1161. if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
  1162. ah_attr->grh.sgid_index = slave;
  1163. else
  1164. ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
  1165. }
  1166. static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
  1167. {
  1168. struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
  1169. struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
  1170. int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
  1171. struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
  1172. struct mlx4_ib_ah ah;
  1173. struct ib_ah_attr ah_attr;
  1174. u8 *slave_id;
  1175. int slave;
  1176. int port;
  1177. u16 vlan_id;
  1178. /* Get slave that sent this packet */
  1179. if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
  1180. wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
  1181. (wc->src_qp & 0x1) != ctx->port - 1 ||
  1182. wc->src_qp & 0x4) {
  1183. mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
  1184. return;
  1185. }
  1186. slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
  1187. if (slave != ctx->slave) {
  1188. mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
  1189. "belongs to another slave\n", wc->src_qp);
  1190. return;
  1191. }
  1192. /* Map transaction ID */
  1193. ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
  1194. sizeof (struct mlx4_tunnel_mad),
  1195. DMA_FROM_DEVICE);
  1196. switch (tunnel->mad.mad_hdr.method) {
  1197. case IB_MGMT_METHOD_SET:
  1198. case IB_MGMT_METHOD_GET:
  1199. case IB_MGMT_METHOD_REPORT:
  1200. case IB_SA_METHOD_GET_TABLE:
  1201. case IB_SA_METHOD_DELETE:
  1202. case IB_SA_METHOD_GET_MULTI:
  1203. case IB_SA_METHOD_GET_TRACE_TBL:
  1204. slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
  1205. if (*slave_id) {
  1206. mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
  1207. "class:%d slave:%d\n", *slave_id,
  1208. tunnel->mad.mad_hdr.mgmt_class, slave);
  1209. return;
  1210. } else
  1211. *slave_id = slave;
  1212. default:
  1213. /* nothing */;
  1214. }
  1215. /* Class-specific handling */
  1216. switch (tunnel->mad.mad_hdr.mgmt_class) {
  1217. case IB_MGMT_CLASS_SUBN_LID_ROUTED:
  1218. case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
  1219. if (slave != mlx4_master_func_num(dev->dev) &&
  1220. !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
  1221. return;
  1222. break;
  1223. case IB_MGMT_CLASS_SUBN_ADM:
  1224. if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
  1225. (struct ib_sa_mad *) &tunnel->mad))
  1226. return;
  1227. break;
  1228. case IB_MGMT_CLASS_CM:
  1229. if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
  1230. (struct ib_mad *) &tunnel->mad))
  1231. return;
  1232. break;
  1233. case IB_MGMT_CLASS_DEVICE_MGMT:
  1234. if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
  1235. tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
  1236. return;
  1237. break;
  1238. default:
  1239. /* Drop unsupported classes for slaves in tunnel mode */
  1240. if (slave != mlx4_master_func_num(dev->dev)) {
  1241. mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
  1242. "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
  1243. return;
  1244. }
  1245. }
  1246. /* We are using standard ib_core services to send the mad, so generate a
  1247. * stadard address handle by decoding the tunnelled mlx4_ah fields */
  1248. memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
  1249. ah.ibah.device = ctx->ib_dev;
  1250. port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
  1251. port = mlx4_slave_convert_port(dev->dev, slave, port);
  1252. if (port < 0)
  1253. return;
  1254. ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
  1255. mlx4_ib_query_ah(&ah.ibah, &ah_attr);
  1256. if (ah_attr.ah_flags & IB_AH_GRH)
  1257. fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
  1258. memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
  1259. vlan_id = be16_to_cpu(tunnel->hdr.vlan);
  1260. /* if slave have default vlan use it */
  1261. mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
  1262. &vlan_id, &ah_attr.sl);
  1263. mlx4_ib_send_to_wire(dev, slave, ctx->port,
  1264. is_proxy_qp0(dev, wc->src_qp, slave) ?
  1265. IB_QPT_SMI : IB_QPT_GSI,
  1266. be16_to_cpu(tunnel->hdr.pkey_index),
  1267. be32_to_cpu(tunnel->hdr.remote_qpn),
  1268. be32_to_cpu(tunnel->hdr.qkey),
  1269. &ah_attr, wc->smac, vlan_id, &tunnel->mad);
  1270. }
  1271. static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
  1272. enum ib_qp_type qp_type, int is_tun)
  1273. {
  1274. int i;
  1275. struct mlx4_ib_demux_pv_qp *tun_qp;
  1276. int rx_buf_size, tx_buf_size;
  1277. if (qp_type > IB_QPT_GSI)
  1278. return -EINVAL;
  1279. tun_qp = &ctx->qp[qp_type];
  1280. tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
  1281. GFP_KERNEL);
  1282. if (!tun_qp->ring)
  1283. return -ENOMEM;
  1284. tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
  1285. sizeof (struct mlx4_ib_tun_tx_buf),
  1286. GFP_KERNEL);
  1287. if (!tun_qp->tx_ring) {
  1288. kfree(tun_qp->ring);
  1289. tun_qp->ring = NULL;
  1290. return -ENOMEM;
  1291. }
  1292. if (is_tun) {
  1293. rx_buf_size = sizeof (struct mlx4_tunnel_mad);
  1294. tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
  1295. } else {
  1296. rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
  1297. tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
  1298. }
  1299. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1300. tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
  1301. if (!tun_qp->ring[i].addr)
  1302. goto err;
  1303. tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
  1304. tun_qp->ring[i].addr,
  1305. rx_buf_size,
  1306. DMA_FROM_DEVICE);
  1307. if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
  1308. kfree(tun_qp->ring[i].addr);
  1309. goto err;
  1310. }
  1311. }
  1312. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1313. tun_qp->tx_ring[i].buf.addr =
  1314. kmalloc(tx_buf_size, GFP_KERNEL);
  1315. if (!tun_qp->tx_ring[i].buf.addr)
  1316. goto tx_err;
  1317. tun_qp->tx_ring[i].buf.map =
  1318. ib_dma_map_single(ctx->ib_dev,
  1319. tun_qp->tx_ring[i].buf.addr,
  1320. tx_buf_size,
  1321. DMA_TO_DEVICE);
  1322. if (ib_dma_mapping_error(ctx->ib_dev,
  1323. tun_qp->tx_ring[i].buf.map)) {
  1324. kfree(tun_qp->tx_ring[i].buf.addr);
  1325. goto tx_err;
  1326. }
  1327. tun_qp->tx_ring[i].ah = NULL;
  1328. }
  1329. spin_lock_init(&tun_qp->tx_lock);
  1330. tun_qp->tx_ix_head = 0;
  1331. tun_qp->tx_ix_tail = 0;
  1332. tun_qp->proxy_qpt = qp_type;
  1333. return 0;
  1334. tx_err:
  1335. while (i > 0) {
  1336. --i;
  1337. ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
  1338. tx_buf_size, DMA_TO_DEVICE);
  1339. kfree(tun_qp->tx_ring[i].buf.addr);
  1340. }
  1341. kfree(tun_qp->tx_ring);
  1342. tun_qp->tx_ring = NULL;
  1343. i = MLX4_NUM_TUNNEL_BUFS;
  1344. err:
  1345. while (i > 0) {
  1346. --i;
  1347. ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
  1348. rx_buf_size, DMA_FROM_DEVICE);
  1349. kfree(tun_qp->ring[i].addr);
  1350. }
  1351. kfree(tun_qp->ring);
  1352. tun_qp->ring = NULL;
  1353. return -ENOMEM;
  1354. }
  1355. static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
  1356. enum ib_qp_type qp_type, int is_tun)
  1357. {
  1358. int i;
  1359. struct mlx4_ib_demux_pv_qp *tun_qp;
  1360. int rx_buf_size, tx_buf_size;
  1361. if (qp_type > IB_QPT_GSI)
  1362. return;
  1363. tun_qp = &ctx->qp[qp_type];
  1364. if (is_tun) {
  1365. rx_buf_size = sizeof (struct mlx4_tunnel_mad);
  1366. tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
  1367. } else {
  1368. rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
  1369. tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
  1370. }
  1371. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1372. ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
  1373. rx_buf_size, DMA_FROM_DEVICE);
  1374. kfree(tun_qp->ring[i].addr);
  1375. }
  1376. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1377. ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
  1378. tx_buf_size, DMA_TO_DEVICE);
  1379. kfree(tun_qp->tx_ring[i].buf.addr);
  1380. if (tun_qp->tx_ring[i].ah)
  1381. ib_destroy_ah(tun_qp->tx_ring[i].ah);
  1382. }
  1383. kfree(tun_qp->tx_ring);
  1384. kfree(tun_qp->ring);
  1385. }
  1386. static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
  1387. {
  1388. struct mlx4_ib_demux_pv_ctx *ctx;
  1389. struct mlx4_ib_demux_pv_qp *tun_qp;
  1390. struct ib_wc wc;
  1391. int ret;
  1392. ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
  1393. ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
  1394. while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
  1395. tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
  1396. if (wc.status == IB_WC_SUCCESS) {
  1397. switch (wc.opcode) {
  1398. case IB_WC_RECV:
  1399. mlx4_ib_multiplex_mad(ctx, &wc);
  1400. ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
  1401. wc.wr_id &
  1402. (MLX4_NUM_TUNNEL_BUFS - 1));
  1403. if (ret)
  1404. pr_err("Failed reposting tunnel "
  1405. "buf:%lld\n", wc.wr_id);
  1406. break;
  1407. case IB_WC_SEND:
  1408. pr_debug("received tunnel send completion:"
  1409. "wrid=0x%llx, status=0x%x\n",
  1410. wc.wr_id, wc.status);
  1411. ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
  1412. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1413. tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1414. = NULL;
  1415. spin_lock(&tun_qp->tx_lock);
  1416. tun_qp->tx_ix_tail++;
  1417. spin_unlock(&tun_qp->tx_lock);
  1418. break;
  1419. default:
  1420. break;
  1421. }
  1422. } else {
  1423. pr_debug("mlx4_ib: completion error in tunnel: %d."
  1424. " status = %d, wrid = 0x%llx\n",
  1425. ctx->slave, wc.status, wc.wr_id);
  1426. if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
  1427. ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
  1428. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1429. tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1430. = NULL;
  1431. spin_lock(&tun_qp->tx_lock);
  1432. tun_qp->tx_ix_tail++;
  1433. spin_unlock(&tun_qp->tx_lock);
  1434. }
  1435. }
  1436. }
  1437. }
  1438. static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
  1439. {
  1440. struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
  1441. /* It's worse than that! He's dead, Jim! */
  1442. pr_err("Fatal error (%d) on a MAD QP on port %d\n",
  1443. event->event, sqp->port);
  1444. }
  1445. static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
  1446. enum ib_qp_type qp_type, int create_tun)
  1447. {
  1448. int i, ret;
  1449. struct mlx4_ib_demux_pv_qp *tun_qp;
  1450. struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
  1451. struct ib_qp_attr attr;
  1452. int qp_attr_mask_INIT;
  1453. if (qp_type > IB_QPT_GSI)
  1454. return -EINVAL;
  1455. tun_qp = &ctx->qp[qp_type];
  1456. memset(&qp_init_attr, 0, sizeof qp_init_attr);
  1457. qp_init_attr.init_attr.send_cq = ctx->cq;
  1458. qp_init_attr.init_attr.recv_cq = ctx->cq;
  1459. qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
  1460. qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
  1461. qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
  1462. qp_init_attr.init_attr.cap.max_send_sge = 1;
  1463. qp_init_attr.init_attr.cap.max_recv_sge = 1;
  1464. if (create_tun) {
  1465. qp_init_attr.init_attr.qp_type = IB_QPT_UD;
  1466. qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
  1467. qp_init_attr.port = ctx->port;
  1468. qp_init_attr.slave = ctx->slave;
  1469. qp_init_attr.proxy_qp_type = qp_type;
  1470. qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
  1471. IB_QP_QKEY | IB_QP_PORT;
  1472. } else {
  1473. qp_init_attr.init_attr.qp_type = qp_type;
  1474. qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
  1475. qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
  1476. }
  1477. qp_init_attr.init_attr.port_num = ctx->port;
  1478. qp_init_attr.init_attr.qp_context = ctx;
  1479. qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
  1480. tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
  1481. if (IS_ERR(tun_qp->qp)) {
  1482. ret = PTR_ERR(tun_qp->qp);
  1483. tun_qp->qp = NULL;
  1484. pr_err("Couldn't create %s QP (%d)\n",
  1485. create_tun ? "tunnel" : "special", ret);
  1486. return ret;
  1487. }
  1488. memset(&attr, 0, sizeof attr);
  1489. attr.qp_state = IB_QPS_INIT;
  1490. ret = 0;
  1491. if (create_tun)
  1492. ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
  1493. ctx->port, IB_DEFAULT_PKEY_FULL,
  1494. &attr.pkey_index);
  1495. if (ret || !create_tun)
  1496. attr.pkey_index =
  1497. to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
  1498. attr.qkey = IB_QP1_QKEY;
  1499. attr.port_num = ctx->port;
  1500. ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
  1501. if (ret) {
  1502. pr_err("Couldn't change %s qp state to INIT (%d)\n",
  1503. create_tun ? "tunnel" : "special", ret);
  1504. goto err_qp;
  1505. }
  1506. attr.qp_state = IB_QPS_RTR;
  1507. ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
  1508. if (ret) {
  1509. pr_err("Couldn't change %s qp state to RTR (%d)\n",
  1510. create_tun ? "tunnel" : "special", ret);
  1511. goto err_qp;
  1512. }
  1513. attr.qp_state = IB_QPS_RTS;
  1514. attr.sq_psn = 0;
  1515. ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
  1516. if (ret) {
  1517. pr_err("Couldn't change %s qp state to RTS (%d)\n",
  1518. create_tun ? "tunnel" : "special", ret);
  1519. goto err_qp;
  1520. }
  1521. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1522. ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
  1523. if (ret) {
  1524. pr_err(" mlx4_ib_post_pv_buf error"
  1525. " (err = %d, i = %d)\n", ret, i);
  1526. goto err_qp;
  1527. }
  1528. }
  1529. return 0;
  1530. err_qp:
  1531. ib_destroy_qp(tun_qp->qp);
  1532. tun_qp->qp = NULL;
  1533. return ret;
  1534. }
  1535. /*
  1536. * IB MAD completion callback for real SQPs
  1537. */
  1538. static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
  1539. {
  1540. struct mlx4_ib_demux_pv_ctx *ctx;
  1541. struct mlx4_ib_demux_pv_qp *sqp;
  1542. struct ib_wc wc;
  1543. struct ib_grh *grh;
  1544. struct ib_mad *mad;
  1545. ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
  1546. ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
  1547. while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
  1548. sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
  1549. if (wc.status == IB_WC_SUCCESS) {
  1550. switch (wc.opcode) {
  1551. case IB_WC_SEND:
  1552. ib_destroy_ah(sqp->tx_ring[wc.wr_id &
  1553. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1554. sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1555. = NULL;
  1556. spin_lock(&sqp->tx_lock);
  1557. sqp->tx_ix_tail++;
  1558. spin_unlock(&sqp->tx_lock);
  1559. break;
  1560. case IB_WC_RECV:
  1561. mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
  1562. (sqp->ring[wc.wr_id &
  1563. (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
  1564. grh = &(((struct mlx4_mad_rcv_buf *)
  1565. (sqp->ring[wc.wr_id &
  1566. (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
  1567. mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
  1568. if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
  1569. (MLX4_NUM_TUNNEL_BUFS - 1)))
  1570. pr_err("Failed reposting SQP "
  1571. "buf:%lld\n", wc.wr_id);
  1572. break;
  1573. default:
  1574. BUG_ON(1);
  1575. break;
  1576. }
  1577. } else {
  1578. pr_debug("mlx4_ib: completion error in tunnel: %d."
  1579. " status = %d, wrid = 0x%llx\n",
  1580. ctx->slave, wc.status, wc.wr_id);
  1581. if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
  1582. ib_destroy_ah(sqp->tx_ring[wc.wr_id &
  1583. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1584. sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1585. = NULL;
  1586. spin_lock(&sqp->tx_lock);
  1587. sqp->tx_ix_tail++;
  1588. spin_unlock(&sqp->tx_lock);
  1589. }
  1590. }
  1591. }
  1592. }
  1593. static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
  1594. struct mlx4_ib_demux_pv_ctx **ret_ctx)
  1595. {
  1596. struct mlx4_ib_demux_pv_ctx *ctx;
  1597. *ret_ctx = NULL;
  1598. ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
  1599. if (!ctx) {
  1600. pr_err("failed allocating pv resource context "
  1601. "for port %d, slave %d\n", port, slave);
  1602. return -ENOMEM;
  1603. }
  1604. ctx->ib_dev = &dev->ib_dev;
  1605. ctx->port = port;
  1606. ctx->slave = slave;
  1607. *ret_ctx = ctx;
  1608. return 0;
  1609. }
  1610. static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
  1611. {
  1612. if (dev->sriov.demux[port - 1].tun[slave]) {
  1613. kfree(dev->sriov.demux[port - 1].tun[slave]);
  1614. dev->sriov.demux[port - 1].tun[slave] = NULL;
  1615. }
  1616. }
  1617. static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
  1618. int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
  1619. {
  1620. int ret, cq_size;
  1621. struct ib_cq_init_attr cq_attr = {};
  1622. if (ctx->state != DEMUX_PV_STATE_DOWN)
  1623. return -EEXIST;
  1624. ctx->state = DEMUX_PV_STATE_STARTING;
  1625. /* have QP0 only if link layer is IB */
  1626. if (rdma_port_get_link_layer(ibdev, ctx->port) ==
  1627. IB_LINK_LAYER_INFINIBAND)
  1628. ctx->has_smi = 1;
  1629. if (ctx->has_smi) {
  1630. ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
  1631. if (ret) {
  1632. pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
  1633. goto err_out;
  1634. }
  1635. }
  1636. ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
  1637. if (ret) {
  1638. pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
  1639. goto err_out_qp0;
  1640. }
  1641. cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
  1642. if (ctx->has_smi)
  1643. cq_size *= 2;
  1644. cq_attr.cqe = cq_size;
  1645. ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
  1646. NULL, ctx, &cq_attr);
  1647. if (IS_ERR(ctx->cq)) {
  1648. ret = PTR_ERR(ctx->cq);
  1649. pr_err("Couldn't create tunnel CQ (%d)\n", ret);
  1650. goto err_buf;
  1651. }
  1652. ctx->pd = ib_alloc_pd(ctx->ib_dev);
  1653. if (IS_ERR(ctx->pd)) {
  1654. ret = PTR_ERR(ctx->pd);
  1655. pr_err("Couldn't create tunnel PD (%d)\n", ret);
  1656. goto err_cq;
  1657. }
  1658. if (ctx->has_smi) {
  1659. ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
  1660. if (ret) {
  1661. pr_err("Couldn't create %s QP0 (%d)\n",
  1662. create_tun ? "tunnel for" : "", ret);
  1663. goto err_pd;
  1664. }
  1665. }
  1666. ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
  1667. if (ret) {
  1668. pr_err("Couldn't create %s QP1 (%d)\n",
  1669. create_tun ? "tunnel for" : "", ret);
  1670. goto err_qp0;
  1671. }
  1672. if (create_tun)
  1673. INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
  1674. else
  1675. INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
  1676. ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
  1677. ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
  1678. if (ret) {
  1679. pr_err("Couldn't arm tunnel cq (%d)\n", ret);
  1680. goto err_wq;
  1681. }
  1682. ctx->state = DEMUX_PV_STATE_ACTIVE;
  1683. return 0;
  1684. err_wq:
  1685. ctx->wq = NULL;
  1686. ib_destroy_qp(ctx->qp[1].qp);
  1687. ctx->qp[1].qp = NULL;
  1688. err_qp0:
  1689. if (ctx->has_smi)
  1690. ib_destroy_qp(ctx->qp[0].qp);
  1691. ctx->qp[0].qp = NULL;
  1692. err_pd:
  1693. ib_dealloc_pd(ctx->pd);
  1694. ctx->pd = NULL;
  1695. err_cq:
  1696. ib_destroy_cq(ctx->cq);
  1697. ctx->cq = NULL;
  1698. err_buf:
  1699. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
  1700. err_out_qp0:
  1701. if (ctx->has_smi)
  1702. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
  1703. err_out:
  1704. ctx->state = DEMUX_PV_STATE_DOWN;
  1705. return ret;
  1706. }
  1707. static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
  1708. struct mlx4_ib_demux_pv_ctx *ctx, int flush)
  1709. {
  1710. if (!ctx)
  1711. return;
  1712. if (ctx->state > DEMUX_PV_STATE_DOWN) {
  1713. ctx->state = DEMUX_PV_STATE_DOWNING;
  1714. if (flush)
  1715. flush_workqueue(ctx->wq);
  1716. if (ctx->has_smi) {
  1717. ib_destroy_qp(ctx->qp[0].qp);
  1718. ctx->qp[0].qp = NULL;
  1719. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
  1720. }
  1721. ib_destroy_qp(ctx->qp[1].qp);
  1722. ctx->qp[1].qp = NULL;
  1723. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
  1724. ib_dealloc_pd(ctx->pd);
  1725. ctx->pd = NULL;
  1726. ib_destroy_cq(ctx->cq);
  1727. ctx->cq = NULL;
  1728. ctx->state = DEMUX_PV_STATE_DOWN;
  1729. }
  1730. }
  1731. static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
  1732. int port, int do_init)
  1733. {
  1734. int ret = 0;
  1735. if (!do_init) {
  1736. clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
  1737. /* for master, destroy real sqp resources */
  1738. if (slave == mlx4_master_func_num(dev->dev))
  1739. destroy_pv_resources(dev, slave, port,
  1740. dev->sriov.sqps[port - 1], 1);
  1741. /* destroy the tunnel qp resources */
  1742. destroy_pv_resources(dev, slave, port,
  1743. dev->sriov.demux[port - 1].tun[slave], 1);
  1744. return 0;
  1745. }
  1746. /* create the tunnel qp resources */
  1747. ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
  1748. dev->sriov.demux[port - 1].tun[slave]);
  1749. /* for master, create the real sqp resources */
  1750. if (!ret && slave == mlx4_master_func_num(dev->dev))
  1751. ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
  1752. dev->sriov.sqps[port - 1]);
  1753. return ret;
  1754. }
  1755. void mlx4_ib_tunnels_update_work(struct work_struct *work)
  1756. {
  1757. struct mlx4_ib_demux_work *dmxw;
  1758. dmxw = container_of(work, struct mlx4_ib_demux_work, work);
  1759. mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
  1760. dmxw->do_init);
  1761. kfree(dmxw);
  1762. return;
  1763. }
  1764. static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
  1765. struct mlx4_ib_demux_ctx *ctx,
  1766. int port)
  1767. {
  1768. char name[12];
  1769. int ret = 0;
  1770. int i;
  1771. ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
  1772. sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
  1773. if (!ctx->tun)
  1774. return -ENOMEM;
  1775. ctx->dev = dev;
  1776. ctx->port = port;
  1777. ctx->ib_dev = &dev->ib_dev;
  1778. for (i = 0;
  1779. i < min(dev->dev->caps.sqp_demux,
  1780. (u16)(dev->dev->persist->num_vfs + 1));
  1781. i++) {
  1782. struct mlx4_active_ports actv_ports =
  1783. mlx4_get_active_ports(dev->dev, i);
  1784. if (!test_bit(port - 1, actv_ports.ports))
  1785. continue;
  1786. ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
  1787. if (ret) {
  1788. ret = -ENOMEM;
  1789. goto err_mcg;
  1790. }
  1791. }
  1792. ret = mlx4_ib_mcg_port_init(ctx);
  1793. if (ret) {
  1794. pr_err("Failed initializing mcg para-virt (%d)\n", ret);
  1795. goto err_mcg;
  1796. }
  1797. snprintf(name, sizeof name, "mlx4_ibt%d", port);
  1798. ctx->wq = create_singlethread_workqueue(name);
  1799. if (!ctx->wq) {
  1800. pr_err("Failed to create tunnelling WQ for port %d\n", port);
  1801. ret = -ENOMEM;
  1802. goto err_wq;
  1803. }
  1804. snprintf(name, sizeof name, "mlx4_ibud%d", port);
  1805. ctx->ud_wq = create_singlethread_workqueue(name);
  1806. if (!ctx->ud_wq) {
  1807. pr_err("Failed to create up/down WQ for port %d\n", port);
  1808. ret = -ENOMEM;
  1809. goto err_udwq;
  1810. }
  1811. return 0;
  1812. err_udwq:
  1813. destroy_workqueue(ctx->wq);
  1814. ctx->wq = NULL;
  1815. err_wq:
  1816. mlx4_ib_mcg_port_cleanup(ctx, 1);
  1817. err_mcg:
  1818. for (i = 0; i < dev->dev->caps.sqp_demux; i++)
  1819. free_pv_object(dev, i, port);
  1820. kfree(ctx->tun);
  1821. ctx->tun = NULL;
  1822. return ret;
  1823. }
  1824. static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
  1825. {
  1826. if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
  1827. sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
  1828. flush_workqueue(sqp_ctx->wq);
  1829. if (sqp_ctx->has_smi) {
  1830. ib_destroy_qp(sqp_ctx->qp[0].qp);
  1831. sqp_ctx->qp[0].qp = NULL;
  1832. mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
  1833. }
  1834. ib_destroy_qp(sqp_ctx->qp[1].qp);
  1835. sqp_ctx->qp[1].qp = NULL;
  1836. mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
  1837. ib_dealloc_pd(sqp_ctx->pd);
  1838. sqp_ctx->pd = NULL;
  1839. ib_destroy_cq(sqp_ctx->cq);
  1840. sqp_ctx->cq = NULL;
  1841. sqp_ctx->state = DEMUX_PV_STATE_DOWN;
  1842. }
  1843. }
  1844. static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
  1845. {
  1846. int i;
  1847. if (ctx) {
  1848. struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
  1849. mlx4_ib_mcg_port_cleanup(ctx, 1);
  1850. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  1851. if (!ctx->tun[i])
  1852. continue;
  1853. if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
  1854. ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
  1855. }
  1856. flush_workqueue(ctx->wq);
  1857. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  1858. destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
  1859. free_pv_object(dev, i, ctx->port);
  1860. }
  1861. kfree(ctx->tun);
  1862. destroy_workqueue(ctx->ud_wq);
  1863. destroy_workqueue(ctx->wq);
  1864. }
  1865. }
  1866. static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
  1867. {
  1868. int i;
  1869. if (!mlx4_is_master(dev->dev))
  1870. return;
  1871. /* initialize or tear down tunnel QPs for the master */
  1872. for (i = 0; i < dev->dev->caps.num_ports; i++)
  1873. mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
  1874. return;
  1875. }
  1876. int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
  1877. {
  1878. int i = 0;
  1879. int err;
  1880. if (!mlx4_is_mfunc(dev->dev))
  1881. return 0;
  1882. dev->sriov.is_going_down = 0;
  1883. spin_lock_init(&dev->sriov.going_down_lock);
  1884. mlx4_ib_cm_paravirt_init(dev);
  1885. mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
  1886. if (mlx4_is_slave(dev->dev)) {
  1887. mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
  1888. return 0;
  1889. }
  1890. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  1891. if (i == mlx4_master_func_num(dev->dev))
  1892. mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
  1893. else
  1894. mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
  1895. }
  1896. err = mlx4_ib_init_alias_guid_service(dev);
  1897. if (err) {
  1898. mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
  1899. goto paravirt_err;
  1900. }
  1901. err = mlx4_ib_device_register_sysfs(dev);
  1902. if (err) {
  1903. mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
  1904. goto sysfs_err;
  1905. }
  1906. mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
  1907. dev->dev->caps.sqp_demux);
  1908. for (i = 0; i < dev->num_ports; i++) {
  1909. union ib_gid gid;
  1910. err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
  1911. if (err)
  1912. goto demux_err;
  1913. dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
  1914. err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
  1915. &dev->sriov.sqps[i]);
  1916. if (err)
  1917. goto demux_err;
  1918. err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
  1919. if (err)
  1920. goto free_pv;
  1921. }
  1922. mlx4_ib_master_tunnels(dev, 1);
  1923. return 0;
  1924. free_pv:
  1925. free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
  1926. demux_err:
  1927. while (--i >= 0) {
  1928. free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
  1929. mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
  1930. }
  1931. mlx4_ib_device_unregister_sysfs(dev);
  1932. sysfs_err:
  1933. mlx4_ib_destroy_alias_guid_service(dev);
  1934. paravirt_err:
  1935. mlx4_ib_cm_paravirt_clean(dev, -1);
  1936. return err;
  1937. }
  1938. void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
  1939. {
  1940. int i;
  1941. unsigned long flags;
  1942. if (!mlx4_is_mfunc(dev->dev))
  1943. return;
  1944. spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
  1945. dev->sriov.is_going_down = 1;
  1946. spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
  1947. if (mlx4_is_master(dev->dev)) {
  1948. for (i = 0; i < dev->num_ports; i++) {
  1949. flush_workqueue(dev->sriov.demux[i].ud_wq);
  1950. mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
  1951. kfree(dev->sriov.sqps[i]);
  1952. dev->sriov.sqps[i] = NULL;
  1953. mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
  1954. }
  1955. mlx4_ib_cm_paravirt_clean(dev, -1);
  1956. mlx4_ib_destroy_alias_guid_service(dev);
  1957. mlx4_ib_device_unregister_sysfs(dev);
  1958. }
  1959. }