main.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/ipv6.h>
  42. #include <net/addrconf.h>
  43. #include <net/devlink.h>
  44. #include <rdma/ib_smi.h>
  45. #include <rdma/ib_user_verbs.h>
  46. #include <rdma/ib_addr.h>
  47. #include <rdma/ib_cache.h>
  48. #include <net/bonding.h>
  49. #include <linux/mlx4/driver.h>
  50. #include <linux/mlx4/cmd.h>
  51. #include <linux/mlx4/qp.h>
  52. #include "mlx4_ib.h"
  53. #include "user.h"
  54. #define DRV_NAME MLX4_IB_DRV_NAME
  55. #define DRV_VERSION "2.2-1"
  56. #define DRV_RELDATE "Feb 2014"
  57. #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
  58. #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
  59. #define MLX4_IB_CARD_REV_A0 0xA0
  60. MODULE_AUTHOR("Roland Dreier");
  61. MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
  62. MODULE_LICENSE("Dual BSD/GPL");
  63. MODULE_VERSION(DRV_VERSION);
  64. int mlx4_ib_sm_guid_assign = 0;
  65. module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
  66. MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
  67. static const char mlx4_ib_version[] =
  68. DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
  69. DRV_VERSION " (" DRV_RELDATE ")\n";
  70. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
  71. static struct workqueue_struct *wq;
  72. static void init_query_mad(struct ib_smp *mad)
  73. {
  74. mad->base_version = 1;
  75. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  76. mad->class_version = 1;
  77. mad->method = IB_MGMT_METHOD_GET;
  78. }
  79. static int check_flow_steering_support(struct mlx4_dev *dev)
  80. {
  81. int eth_num_ports = 0;
  82. int ib_num_ports = 0;
  83. int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
  84. if (dmfs) {
  85. int i;
  86. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  87. eth_num_ports++;
  88. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  89. ib_num_ports++;
  90. dmfs &= (!ib_num_ports ||
  91. (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
  92. (!eth_num_ports ||
  93. (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
  94. if (ib_num_ports && mlx4_is_mfunc(dev)) {
  95. pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
  96. dmfs = 0;
  97. }
  98. }
  99. return dmfs;
  100. }
  101. static int num_ib_ports(struct mlx4_dev *dev)
  102. {
  103. int ib_ports = 0;
  104. int i;
  105. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  106. ib_ports++;
  107. return ib_ports;
  108. }
  109. static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
  110. {
  111. struct mlx4_ib_dev *ibdev = to_mdev(device);
  112. struct net_device *dev;
  113. rcu_read_lock();
  114. dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
  115. if (dev) {
  116. if (mlx4_is_bonded(ibdev->dev)) {
  117. struct net_device *upper = NULL;
  118. upper = netdev_master_upper_dev_get_rcu(dev);
  119. if (upper) {
  120. struct net_device *active;
  121. active = bond_option_active_slave_get_rcu(netdev_priv(upper));
  122. if (active)
  123. dev = active;
  124. }
  125. }
  126. }
  127. if (dev)
  128. dev_hold(dev);
  129. rcu_read_unlock();
  130. return dev;
  131. }
  132. static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
  133. struct mlx4_ib_dev *ibdev,
  134. u8 port_num)
  135. {
  136. struct mlx4_cmd_mailbox *mailbox;
  137. int err;
  138. struct mlx4_dev *dev = ibdev->dev;
  139. int i;
  140. union ib_gid *gid_tbl;
  141. mailbox = mlx4_alloc_cmd_mailbox(dev);
  142. if (IS_ERR(mailbox))
  143. return -ENOMEM;
  144. gid_tbl = mailbox->buf;
  145. for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
  146. memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
  147. err = mlx4_cmd(dev, mailbox->dma,
  148. MLX4_SET_PORT_GID_TABLE << 8 | port_num,
  149. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  150. MLX4_CMD_WRAPPED);
  151. if (mlx4_is_bonded(dev))
  152. err += mlx4_cmd(dev, mailbox->dma,
  153. MLX4_SET_PORT_GID_TABLE << 8 | 2,
  154. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  155. MLX4_CMD_WRAPPED);
  156. mlx4_free_cmd_mailbox(dev, mailbox);
  157. return err;
  158. }
  159. static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
  160. struct mlx4_ib_dev *ibdev,
  161. u8 port_num)
  162. {
  163. struct mlx4_cmd_mailbox *mailbox;
  164. int err;
  165. struct mlx4_dev *dev = ibdev->dev;
  166. int i;
  167. struct {
  168. union ib_gid gid;
  169. __be32 rsrvd1[2];
  170. __be16 rsrvd2;
  171. u8 type;
  172. u8 version;
  173. __be32 rsrvd3;
  174. } *gid_tbl;
  175. mailbox = mlx4_alloc_cmd_mailbox(dev);
  176. if (IS_ERR(mailbox))
  177. return -ENOMEM;
  178. gid_tbl = mailbox->buf;
  179. for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
  180. memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
  181. if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
  182. gid_tbl[i].version = 2;
  183. if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
  184. gid_tbl[i].type = 1;
  185. else
  186. memset(&gid_tbl[i].gid, 0, 12);
  187. }
  188. }
  189. err = mlx4_cmd(dev, mailbox->dma,
  190. MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
  191. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  192. MLX4_CMD_WRAPPED);
  193. if (mlx4_is_bonded(dev))
  194. err += mlx4_cmd(dev, mailbox->dma,
  195. MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
  196. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  197. MLX4_CMD_WRAPPED);
  198. mlx4_free_cmd_mailbox(dev, mailbox);
  199. return err;
  200. }
  201. static int mlx4_ib_update_gids(struct gid_entry *gids,
  202. struct mlx4_ib_dev *ibdev,
  203. u8 port_num)
  204. {
  205. if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
  206. return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
  207. return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
  208. }
  209. static int mlx4_ib_add_gid(struct ib_device *device,
  210. u8 port_num,
  211. unsigned int index,
  212. const union ib_gid *gid,
  213. const struct ib_gid_attr *attr,
  214. void **context)
  215. {
  216. struct mlx4_ib_dev *ibdev = to_mdev(device);
  217. struct mlx4_ib_iboe *iboe = &ibdev->iboe;
  218. struct mlx4_port_gid_table *port_gid_table;
  219. int free = -1, found = -1;
  220. int ret = 0;
  221. int hw_update = 0;
  222. int i;
  223. struct gid_entry *gids = NULL;
  224. if (!rdma_cap_roce_gid_table(device, port_num))
  225. return -EINVAL;
  226. if (port_num > MLX4_MAX_PORTS)
  227. return -EINVAL;
  228. if (!context)
  229. return -EINVAL;
  230. port_gid_table = &iboe->gids[port_num - 1];
  231. spin_lock_bh(&iboe->lock);
  232. for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
  233. if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
  234. (port_gid_table->gids[i].gid_type == attr->gid_type)) {
  235. found = i;
  236. break;
  237. }
  238. if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
  239. free = i; /* HW has space */
  240. }
  241. if (found < 0) {
  242. if (free < 0) {
  243. ret = -ENOSPC;
  244. } else {
  245. port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
  246. if (!port_gid_table->gids[free].ctx) {
  247. ret = -ENOMEM;
  248. } else {
  249. *context = port_gid_table->gids[free].ctx;
  250. memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
  251. port_gid_table->gids[free].gid_type = attr->gid_type;
  252. port_gid_table->gids[free].ctx->real_index = free;
  253. port_gid_table->gids[free].ctx->refcount = 1;
  254. hw_update = 1;
  255. }
  256. }
  257. } else {
  258. struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
  259. *context = ctx;
  260. ctx->refcount++;
  261. }
  262. if (!ret && hw_update) {
  263. gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
  264. if (!gids) {
  265. ret = -ENOMEM;
  266. } else {
  267. for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
  268. memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
  269. gids[i].gid_type = port_gid_table->gids[i].gid_type;
  270. }
  271. }
  272. }
  273. spin_unlock_bh(&iboe->lock);
  274. if (!ret && hw_update) {
  275. ret = mlx4_ib_update_gids(gids, ibdev, port_num);
  276. kfree(gids);
  277. }
  278. return ret;
  279. }
  280. static int mlx4_ib_del_gid(struct ib_device *device,
  281. u8 port_num,
  282. unsigned int index,
  283. void **context)
  284. {
  285. struct gid_cache_context *ctx = *context;
  286. struct mlx4_ib_dev *ibdev = to_mdev(device);
  287. struct mlx4_ib_iboe *iboe = &ibdev->iboe;
  288. struct mlx4_port_gid_table *port_gid_table;
  289. int ret = 0;
  290. int hw_update = 0;
  291. struct gid_entry *gids = NULL;
  292. if (!rdma_cap_roce_gid_table(device, port_num))
  293. return -EINVAL;
  294. if (port_num > MLX4_MAX_PORTS)
  295. return -EINVAL;
  296. port_gid_table = &iboe->gids[port_num - 1];
  297. spin_lock_bh(&iboe->lock);
  298. if (ctx) {
  299. ctx->refcount--;
  300. if (!ctx->refcount) {
  301. unsigned int real_index = ctx->real_index;
  302. memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
  303. kfree(port_gid_table->gids[real_index].ctx);
  304. port_gid_table->gids[real_index].ctx = NULL;
  305. hw_update = 1;
  306. }
  307. }
  308. if (!ret && hw_update) {
  309. int i;
  310. gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
  311. if (!gids) {
  312. ret = -ENOMEM;
  313. } else {
  314. for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
  315. memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
  316. }
  317. }
  318. spin_unlock_bh(&iboe->lock);
  319. if (!ret && hw_update) {
  320. ret = mlx4_ib_update_gids(gids, ibdev, port_num);
  321. kfree(gids);
  322. }
  323. return ret;
  324. }
  325. int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
  326. u8 port_num, int index)
  327. {
  328. struct mlx4_ib_iboe *iboe = &ibdev->iboe;
  329. struct gid_cache_context *ctx = NULL;
  330. union ib_gid gid;
  331. struct mlx4_port_gid_table *port_gid_table;
  332. int real_index = -EINVAL;
  333. int i;
  334. int ret;
  335. unsigned long flags;
  336. struct ib_gid_attr attr;
  337. if (port_num > MLX4_MAX_PORTS)
  338. return -EINVAL;
  339. if (mlx4_is_bonded(ibdev->dev))
  340. port_num = 1;
  341. if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
  342. return index;
  343. ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
  344. if (ret)
  345. return ret;
  346. if (attr.ndev)
  347. dev_put(attr.ndev);
  348. if (!memcmp(&gid, &zgid, sizeof(gid)))
  349. return -EINVAL;
  350. spin_lock_irqsave(&iboe->lock, flags);
  351. port_gid_table = &iboe->gids[port_num - 1];
  352. for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
  353. if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
  354. attr.gid_type == port_gid_table->gids[i].gid_type) {
  355. ctx = port_gid_table->gids[i].ctx;
  356. break;
  357. }
  358. if (ctx)
  359. real_index = ctx->real_index;
  360. spin_unlock_irqrestore(&iboe->lock, flags);
  361. return real_index;
  362. }
  363. static int mlx4_ib_query_device(struct ib_device *ibdev,
  364. struct ib_device_attr *props,
  365. struct ib_udata *uhw)
  366. {
  367. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  368. struct ib_smp *in_mad = NULL;
  369. struct ib_smp *out_mad = NULL;
  370. int err = -ENOMEM;
  371. int have_ib_ports;
  372. struct mlx4_uverbs_ex_query_device cmd;
  373. struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
  374. struct mlx4_clock_params clock_params;
  375. if (uhw->inlen) {
  376. if (uhw->inlen < sizeof(cmd))
  377. return -EINVAL;
  378. err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
  379. if (err)
  380. return err;
  381. if (cmd.comp_mask)
  382. return -EINVAL;
  383. if (cmd.reserved)
  384. return -EINVAL;
  385. }
  386. resp.response_length = offsetof(typeof(resp), response_length) +
  387. sizeof(resp.response_length);
  388. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  389. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  390. if (!in_mad || !out_mad)
  391. goto out;
  392. init_query_mad(in_mad);
  393. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  394. err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
  395. 1, NULL, NULL, in_mad, out_mad);
  396. if (err)
  397. goto out;
  398. memset(props, 0, sizeof *props);
  399. have_ib_ports = num_ib_ports(dev->dev);
  400. props->fw_ver = dev->dev->caps.fw_ver;
  401. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  402. IB_DEVICE_PORT_ACTIVE_EVENT |
  403. IB_DEVICE_SYS_IMAGE_GUID |
  404. IB_DEVICE_RC_RNR_NAK_GEN |
  405. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  406. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
  407. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  408. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
  409. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  410. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
  411. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  412. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
  413. props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
  414. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
  415. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  416. if (dev->dev->caps.max_gso_sz &&
  417. (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
  418. (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
  419. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  420. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
  421. props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
  422. if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
  423. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
  424. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
  425. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  426. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
  427. props->device_cap_flags |= IB_DEVICE_XRC;
  428. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
  429. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
  430. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
  431. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
  432. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
  433. else
  434. props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
  435. if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
  436. props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
  437. }
  438. props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
  439. props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  440. 0xffffff;
  441. props->vendor_part_id = dev->dev->persist->pdev->device;
  442. props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
  443. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  444. props->max_mr_size = ~0ull;
  445. props->page_size_cap = dev->dev->caps.page_size_cap;
  446. props->max_qp = dev->dev->quotas.qp;
  447. props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
  448. props->max_sge = min(dev->dev->caps.max_sq_sg,
  449. dev->dev->caps.max_rq_sg);
  450. props->max_sge_rd = MLX4_MAX_SGE_RD;
  451. props->max_cq = dev->dev->quotas.cq;
  452. props->max_cqe = dev->dev->caps.max_cqes;
  453. props->max_mr = dev->dev->quotas.mpt;
  454. props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
  455. props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
  456. props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
  457. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  458. props->max_srq = dev->dev->quotas.srq;
  459. props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
  460. props->max_srq_sge = dev->dev->caps.max_srq_sge;
  461. props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
  462. props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
  463. props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
  464. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  465. props->masked_atomic_cap = props->atomic_cap;
  466. props->max_pkeys = dev->dev->caps.pkey_table_len[1];
  467. props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
  468. props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
  469. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  470. props->max_mcast_grp;
  471. props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
  472. props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
  473. props->timestamp_mask = 0xFFFFFFFFFFFFULL;
  474. if (!mlx4_is_slave(dev->dev))
  475. err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
  476. if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
  477. resp.response_length += sizeof(resp.hca_core_clock_offset);
  478. if (!err && !mlx4_is_slave(dev->dev)) {
  479. resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
  480. resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
  481. }
  482. }
  483. if (uhw->outlen) {
  484. err = ib_copy_to_udata(uhw, &resp, resp.response_length);
  485. if (err)
  486. goto out;
  487. }
  488. out:
  489. kfree(in_mad);
  490. kfree(out_mad);
  491. return err;
  492. }
  493. static enum rdma_link_layer
  494. mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
  495. {
  496. struct mlx4_dev *dev = to_mdev(device)->dev;
  497. return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
  498. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  499. }
  500. static int ib_link_query_port(struct ib_device *ibdev, u8 port,
  501. struct ib_port_attr *props, int netw_view)
  502. {
  503. struct ib_smp *in_mad = NULL;
  504. struct ib_smp *out_mad = NULL;
  505. int ext_active_speed;
  506. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  507. int err = -ENOMEM;
  508. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  509. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  510. if (!in_mad || !out_mad)
  511. goto out;
  512. init_query_mad(in_mad);
  513. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  514. in_mad->attr_mod = cpu_to_be32(port);
  515. if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
  516. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  517. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
  518. in_mad, out_mad);
  519. if (err)
  520. goto out;
  521. props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
  522. props->lmc = out_mad->data[34] & 0x7;
  523. props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
  524. props->sm_sl = out_mad->data[36] & 0xf;
  525. props->state = out_mad->data[32] & 0xf;
  526. props->phys_state = out_mad->data[33] >> 4;
  527. props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
  528. if (netw_view)
  529. props->gid_tbl_len = out_mad->data[50];
  530. else
  531. props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
  532. props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
  533. props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
  534. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  535. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  536. props->active_width = out_mad->data[31] & 0xf;
  537. props->active_speed = out_mad->data[35] >> 4;
  538. props->max_mtu = out_mad->data[41] & 0xf;
  539. props->active_mtu = out_mad->data[36] >> 4;
  540. props->subnet_timeout = out_mad->data[51] & 0x1f;
  541. props->max_vl_num = out_mad->data[37] >> 4;
  542. props->init_type_reply = out_mad->data[41] >> 4;
  543. /* Check if extended speeds (EDR/FDR/...) are supported */
  544. if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
  545. ext_active_speed = out_mad->data[62] >> 4;
  546. switch (ext_active_speed) {
  547. case 1:
  548. props->active_speed = IB_SPEED_FDR;
  549. break;
  550. case 2:
  551. props->active_speed = IB_SPEED_EDR;
  552. break;
  553. }
  554. }
  555. /* If reported active speed is QDR, check if is FDR-10 */
  556. if (props->active_speed == IB_SPEED_QDR) {
  557. init_query_mad(in_mad);
  558. in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
  559. in_mad->attr_mod = cpu_to_be32(port);
  560. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
  561. NULL, NULL, in_mad, out_mad);
  562. if (err)
  563. goto out;
  564. /* Checking LinkSpeedActive for FDR-10 */
  565. if (out_mad->data[15] & 0x1)
  566. props->active_speed = IB_SPEED_FDR10;
  567. }
  568. /* Avoid wrong speed value returned by FW if the IB link is down. */
  569. if (props->state == IB_PORT_DOWN)
  570. props->active_speed = IB_SPEED_SDR;
  571. out:
  572. kfree(in_mad);
  573. kfree(out_mad);
  574. return err;
  575. }
  576. static u8 state_to_phys_state(enum ib_port_state state)
  577. {
  578. return state == IB_PORT_ACTIVE ? 5 : 3;
  579. }
  580. static int eth_link_query_port(struct ib_device *ibdev, u8 port,
  581. struct ib_port_attr *props, int netw_view)
  582. {
  583. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  584. struct mlx4_ib_iboe *iboe = &mdev->iboe;
  585. struct net_device *ndev;
  586. enum ib_mtu tmp;
  587. struct mlx4_cmd_mailbox *mailbox;
  588. int err = 0;
  589. int is_bonded = mlx4_is_bonded(mdev->dev);
  590. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  591. if (IS_ERR(mailbox))
  592. return PTR_ERR(mailbox);
  593. err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
  594. MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
  595. MLX4_CMD_WRAPPED);
  596. if (err)
  597. goto out;
  598. props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
  599. IB_WIDTH_4X : IB_WIDTH_1X;
  600. props->active_speed = IB_SPEED_QDR;
  601. props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
  602. props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
  603. props->max_msg_sz = mdev->dev->caps.max_msg_sz;
  604. props->pkey_tbl_len = 1;
  605. props->max_mtu = IB_MTU_4096;
  606. props->max_vl_num = 2;
  607. props->state = IB_PORT_DOWN;
  608. props->phys_state = state_to_phys_state(props->state);
  609. props->active_mtu = IB_MTU_256;
  610. spin_lock_bh(&iboe->lock);
  611. ndev = iboe->netdevs[port - 1];
  612. if (ndev && is_bonded) {
  613. rcu_read_lock(); /* required to get upper dev */
  614. ndev = netdev_master_upper_dev_get_rcu(ndev);
  615. rcu_read_unlock();
  616. }
  617. if (!ndev)
  618. goto out_unlock;
  619. tmp = iboe_get_mtu(ndev->mtu);
  620. props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
  621. props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
  622. IB_PORT_ACTIVE : IB_PORT_DOWN;
  623. props->phys_state = state_to_phys_state(props->state);
  624. out_unlock:
  625. spin_unlock_bh(&iboe->lock);
  626. out:
  627. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  628. return err;
  629. }
  630. int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  631. struct ib_port_attr *props, int netw_view)
  632. {
  633. int err;
  634. memset(props, 0, sizeof *props);
  635. err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
  636. ib_link_query_port(ibdev, port, props, netw_view) :
  637. eth_link_query_port(ibdev, port, props, netw_view);
  638. return err;
  639. }
  640. static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  641. struct ib_port_attr *props)
  642. {
  643. /* returns host view */
  644. return __mlx4_ib_query_port(ibdev, port, props, 0);
  645. }
  646. int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  647. union ib_gid *gid, int netw_view)
  648. {
  649. struct ib_smp *in_mad = NULL;
  650. struct ib_smp *out_mad = NULL;
  651. int err = -ENOMEM;
  652. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  653. int clear = 0;
  654. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  655. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  656. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  657. if (!in_mad || !out_mad)
  658. goto out;
  659. init_query_mad(in_mad);
  660. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  661. in_mad->attr_mod = cpu_to_be32(port);
  662. if (mlx4_is_mfunc(dev->dev) && netw_view)
  663. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  664. err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
  665. if (err)
  666. goto out;
  667. memcpy(gid->raw, out_mad->data + 8, 8);
  668. if (mlx4_is_mfunc(dev->dev) && !netw_view) {
  669. if (index) {
  670. /* For any index > 0, return the null guid */
  671. err = 0;
  672. clear = 1;
  673. goto out;
  674. }
  675. }
  676. init_query_mad(in_mad);
  677. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  678. in_mad->attr_mod = cpu_to_be32(index / 8);
  679. err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
  680. NULL, NULL, in_mad, out_mad);
  681. if (err)
  682. goto out;
  683. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  684. out:
  685. if (clear)
  686. memset(gid->raw + 8, 0, 8);
  687. kfree(in_mad);
  688. kfree(out_mad);
  689. return err;
  690. }
  691. static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  692. union ib_gid *gid)
  693. {
  694. int ret;
  695. if (rdma_protocol_ib(ibdev, port))
  696. return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
  697. if (!rdma_protocol_roce(ibdev, port))
  698. return -ENODEV;
  699. if (!rdma_cap_roce_gid_table(ibdev, port))
  700. return -ENODEV;
  701. ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
  702. if (ret == -EAGAIN) {
  703. memcpy(gid, &zgid, sizeof(*gid));
  704. return 0;
  705. }
  706. return ret;
  707. }
  708. int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  709. u16 *pkey, int netw_view)
  710. {
  711. struct ib_smp *in_mad = NULL;
  712. struct ib_smp *out_mad = NULL;
  713. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  714. int err = -ENOMEM;
  715. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  716. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  717. if (!in_mad || !out_mad)
  718. goto out;
  719. init_query_mad(in_mad);
  720. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  721. in_mad->attr_mod = cpu_to_be32(index / 32);
  722. if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
  723. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  724. err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
  725. in_mad, out_mad);
  726. if (err)
  727. goto out;
  728. *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
  729. out:
  730. kfree(in_mad);
  731. kfree(out_mad);
  732. return err;
  733. }
  734. static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
  735. {
  736. return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
  737. }
  738. static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
  739. struct ib_device_modify *props)
  740. {
  741. struct mlx4_cmd_mailbox *mailbox;
  742. unsigned long flags;
  743. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  744. return -EOPNOTSUPP;
  745. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  746. return 0;
  747. if (mlx4_is_slave(to_mdev(ibdev)->dev))
  748. return -EOPNOTSUPP;
  749. spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
  750. memcpy(ibdev->node_desc, props->node_desc, 64);
  751. spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
  752. /*
  753. * If possible, pass node desc to FW, so it can generate
  754. * a 144 trap. If cmd fails, just ignore.
  755. */
  756. mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
  757. if (IS_ERR(mailbox))
  758. return 0;
  759. memcpy(mailbox->buf, props->node_desc, 64);
  760. mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
  761. MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  762. mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
  763. return 0;
  764. }
  765. static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
  766. u32 cap_mask)
  767. {
  768. struct mlx4_cmd_mailbox *mailbox;
  769. int err;
  770. mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  771. if (IS_ERR(mailbox))
  772. return PTR_ERR(mailbox);
  773. if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  774. *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
  775. ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
  776. } else {
  777. ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
  778. ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
  779. }
  780. err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
  781. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  782. MLX4_CMD_WRAPPED);
  783. mlx4_free_cmd_mailbox(dev->dev, mailbox);
  784. return err;
  785. }
  786. static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  787. struct ib_port_modify *props)
  788. {
  789. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  790. u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  791. struct ib_port_attr attr;
  792. u32 cap_mask;
  793. int err;
  794. /* return OK if this is RoCE. CM calls ib_modify_port() regardless
  795. * of whether port link layer is ETH or IB. For ETH ports, qkey
  796. * violations and port capabilities are not meaningful.
  797. */
  798. if (is_eth)
  799. return 0;
  800. mutex_lock(&mdev->cap_mask_mutex);
  801. err = mlx4_ib_query_port(ibdev, port, &attr);
  802. if (err)
  803. goto out;
  804. cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
  805. ~props->clr_port_cap_mask;
  806. err = mlx4_ib_SET_PORT(mdev, port,
  807. !!(mask & IB_PORT_RESET_QKEY_CNTR),
  808. cap_mask);
  809. out:
  810. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  811. return err;
  812. }
  813. static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
  814. struct ib_udata *udata)
  815. {
  816. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  817. struct mlx4_ib_ucontext *context;
  818. struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
  819. struct mlx4_ib_alloc_ucontext_resp resp;
  820. int err;
  821. if (!dev->ib_active)
  822. return ERR_PTR(-EAGAIN);
  823. if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
  824. resp_v3.qp_tab_size = dev->dev->caps.num_qps;
  825. resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
  826. resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  827. } else {
  828. resp.dev_caps = dev->dev->caps.userspace_caps;
  829. resp.qp_tab_size = dev->dev->caps.num_qps;
  830. resp.bf_reg_size = dev->dev->caps.bf_reg_size;
  831. resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  832. resp.cqe_size = dev->dev->caps.cqe_size;
  833. }
  834. context = kzalloc(sizeof(*context), GFP_KERNEL);
  835. if (!context)
  836. return ERR_PTR(-ENOMEM);
  837. err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
  838. if (err) {
  839. kfree(context);
  840. return ERR_PTR(err);
  841. }
  842. INIT_LIST_HEAD(&context->db_page_list);
  843. mutex_init(&context->db_page_mutex);
  844. if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
  845. err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
  846. else
  847. err = ib_copy_to_udata(udata, &resp, sizeof(resp));
  848. if (err) {
  849. mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
  850. kfree(context);
  851. return ERR_PTR(-EFAULT);
  852. }
  853. return &context->ibucontext;
  854. }
  855. static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  856. {
  857. struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
  858. mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
  859. kfree(context);
  860. return 0;
  861. }
  862. static void mlx4_ib_vma_open(struct vm_area_struct *area)
  863. {
  864. /* vma_open is called when a new VMA is created on top of our VMA.
  865. * This is done through either mremap flow or split_vma (usually due
  866. * to mlock, madvise, munmap, etc.). We do not support a clone of the
  867. * vma, as this VMA is strongly hardware related. Therefore we set the
  868. * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
  869. * calling us again and trying to do incorrect actions. We assume that
  870. * the original vma size is exactly a single page that there will be no
  871. * "splitting" operations on.
  872. */
  873. area->vm_ops = NULL;
  874. }
  875. static void mlx4_ib_vma_close(struct vm_area_struct *area)
  876. {
  877. struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
  878. /* It's guaranteed that all VMAs opened on a FD are closed before the
  879. * file itself is closed, therefore no sync is needed with the regular
  880. * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
  881. * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
  882. * The close operation is usually called under mm->mmap_sem except when
  883. * process is exiting. The exiting case is handled explicitly as part
  884. * of mlx4_ib_disassociate_ucontext.
  885. */
  886. mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
  887. area->vm_private_data;
  888. /* set the vma context pointer to null in the mlx4_ib driver's private
  889. * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
  890. */
  891. mlx4_ib_vma_priv_data->vma = NULL;
  892. }
  893. static const struct vm_operations_struct mlx4_ib_vm_ops = {
  894. .open = mlx4_ib_vma_open,
  895. .close = mlx4_ib_vma_close
  896. };
  897. static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
  898. {
  899. int i;
  900. int ret = 0;
  901. struct vm_area_struct *vma;
  902. struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
  903. struct task_struct *owning_process = NULL;
  904. struct mm_struct *owning_mm = NULL;
  905. owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
  906. if (!owning_process)
  907. return;
  908. owning_mm = get_task_mm(owning_process);
  909. if (!owning_mm) {
  910. pr_info("no mm, disassociate ucontext is pending task termination\n");
  911. while (1) {
  912. /* make sure that task is dead before returning, it may
  913. * prevent a rare case of module down in parallel to a
  914. * call to mlx4_ib_vma_close.
  915. */
  916. put_task_struct(owning_process);
  917. msleep(1);
  918. owning_process = get_pid_task(ibcontext->tgid,
  919. PIDTYPE_PID);
  920. if (!owning_process ||
  921. owning_process->state == TASK_DEAD) {
  922. pr_info("disassociate ucontext done, task was terminated\n");
  923. /* in case task was dead need to release the task struct */
  924. if (owning_process)
  925. put_task_struct(owning_process);
  926. return;
  927. }
  928. }
  929. }
  930. /* need to protect from a race on closing the vma as part of
  931. * mlx4_ib_vma_close().
  932. */
  933. down_read(&owning_mm->mmap_sem);
  934. for (i = 0; i < HW_BAR_COUNT; i++) {
  935. vma = context->hw_bar_info[i].vma;
  936. if (!vma)
  937. continue;
  938. ret = zap_vma_ptes(context->hw_bar_info[i].vma,
  939. context->hw_bar_info[i].vma->vm_start,
  940. PAGE_SIZE);
  941. if (ret) {
  942. pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
  943. BUG_ON(1);
  944. }
  945. /* context going to be destroyed, should not access ops any more */
  946. context->hw_bar_info[i].vma->vm_ops = NULL;
  947. }
  948. up_read(&owning_mm->mmap_sem);
  949. mmput(owning_mm);
  950. put_task_struct(owning_process);
  951. }
  952. static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
  953. struct mlx4_ib_vma_private_data *vma_private_data)
  954. {
  955. vma_private_data->vma = vma;
  956. vma->vm_private_data = vma_private_data;
  957. vma->vm_ops = &mlx4_ib_vm_ops;
  958. }
  959. static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  960. {
  961. struct mlx4_ib_dev *dev = to_mdev(context->device);
  962. struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
  963. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  964. return -EINVAL;
  965. if (vma->vm_pgoff == 0) {
  966. /* We prevent double mmaping on same context */
  967. if (mucontext->hw_bar_info[HW_BAR_DB].vma)
  968. return -EINVAL;
  969. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  970. if (io_remap_pfn_range(vma, vma->vm_start,
  971. to_mucontext(context)->uar.pfn,
  972. PAGE_SIZE, vma->vm_page_prot))
  973. return -EAGAIN;
  974. mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
  975. } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
  976. /* We prevent double mmaping on same context */
  977. if (mucontext->hw_bar_info[HW_BAR_BF].vma)
  978. return -EINVAL;
  979. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  980. if (io_remap_pfn_range(vma, vma->vm_start,
  981. to_mucontext(context)->uar.pfn +
  982. dev->dev->caps.num_uars,
  983. PAGE_SIZE, vma->vm_page_prot))
  984. return -EAGAIN;
  985. mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
  986. } else if (vma->vm_pgoff == 3) {
  987. struct mlx4_clock_params params;
  988. int ret;
  989. /* We prevent double mmaping on same context */
  990. if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
  991. return -EINVAL;
  992. ret = mlx4_get_internal_clock_params(dev->dev, &params);
  993. if (ret)
  994. return ret;
  995. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  996. if (io_remap_pfn_range(vma, vma->vm_start,
  997. (pci_resource_start(dev->dev->persist->pdev,
  998. params.bar) +
  999. params.offset)
  1000. >> PAGE_SHIFT,
  1001. PAGE_SIZE, vma->vm_page_prot))
  1002. return -EAGAIN;
  1003. mlx4_ib_set_vma_data(vma,
  1004. &mucontext->hw_bar_info[HW_BAR_CLOCK]);
  1005. } else {
  1006. return -EINVAL;
  1007. }
  1008. return 0;
  1009. }
  1010. static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
  1011. struct ib_ucontext *context,
  1012. struct ib_udata *udata)
  1013. {
  1014. struct mlx4_ib_pd *pd;
  1015. int err;
  1016. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  1017. if (!pd)
  1018. return ERR_PTR(-ENOMEM);
  1019. err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
  1020. if (err) {
  1021. kfree(pd);
  1022. return ERR_PTR(err);
  1023. }
  1024. if (context)
  1025. if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
  1026. mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
  1027. kfree(pd);
  1028. return ERR_PTR(-EFAULT);
  1029. }
  1030. return &pd->ibpd;
  1031. }
  1032. static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
  1033. {
  1034. mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
  1035. kfree(pd);
  1036. return 0;
  1037. }
  1038. static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
  1039. struct ib_ucontext *context,
  1040. struct ib_udata *udata)
  1041. {
  1042. struct mlx4_ib_xrcd *xrcd;
  1043. struct ib_cq_init_attr cq_attr = {};
  1044. int err;
  1045. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
  1046. return ERR_PTR(-ENOSYS);
  1047. xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
  1048. if (!xrcd)
  1049. return ERR_PTR(-ENOMEM);
  1050. err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
  1051. if (err)
  1052. goto err1;
  1053. xrcd->pd = ib_alloc_pd(ibdev);
  1054. if (IS_ERR(xrcd->pd)) {
  1055. err = PTR_ERR(xrcd->pd);
  1056. goto err2;
  1057. }
  1058. cq_attr.cqe = 1;
  1059. xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
  1060. if (IS_ERR(xrcd->cq)) {
  1061. err = PTR_ERR(xrcd->cq);
  1062. goto err3;
  1063. }
  1064. return &xrcd->ibxrcd;
  1065. err3:
  1066. ib_dealloc_pd(xrcd->pd);
  1067. err2:
  1068. mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
  1069. err1:
  1070. kfree(xrcd);
  1071. return ERR_PTR(err);
  1072. }
  1073. static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  1074. {
  1075. ib_destroy_cq(to_mxrcd(xrcd)->cq);
  1076. ib_dealloc_pd(to_mxrcd(xrcd)->pd);
  1077. mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
  1078. kfree(xrcd);
  1079. return 0;
  1080. }
  1081. static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
  1082. {
  1083. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1084. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1085. struct mlx4_ib_gid_entry *ge;
  1086. ge = kzalloc(sizeof *ge, GFP_KERNEL);
  1087. if (!ge)
  1088. return -ENOMEM;
  1089. ge->gid = *gid;
  1090. if (mlx4_ib_add_mc(mdev, mqp, gid)) {
  1091. ge->port = mqp->port;
  1092. ge->added = 1;
  1093. }
  1094. mutex_lock(&mqp->mutex);
  1095. list_add_tail(&ge->list, &mqp->gid_list);
  1096. mutex_unlock(&mqp->mutex);
  1097. return 0;
  1098. }
  1099. static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
  1100. struct mlx4_ib_counters *ctr_table)
  1101. {
  1102. struct counter_index *counter, *tmp_count;
  1103. mutex_lock(&ctr_table->mutex);
  1104. list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
  1105. list) {
  1106. if (counter->allocated)
  1107. mlx4_counter_free(ibdev->dev, counter->index);
  1108. list_del(&counter->list);
  1109. kfree(counter);
  1110. }
  1111. mutex_unlock(&ctr_table->mutex);
  1112. }
  1113. int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  1114. union ib_gid *gid)
  1115. {
  1116. struct net_device *ndev;
  1117. int ret = 0;
  1118. if (!mqp->port)
  1119. return 0;
  1120. spin_lock_bh(&mdev->iboe.lock);
  1121. ndev = mdev->iboe.netdevs[mqp->port - 1];
  1122. if (ndev)
  1123. dev_hold(ndev);
  1124. spin_unlock_bh(&mdev->iboe.lock);
  1125. if (ndev) {
  1126. ret = 1;
  1127. dev_put(ndev);
  1128. }
  1129. return ret;
  1130. }
  1131. struct mlx4_ib_steering {
  1132. struct list_head list;
  1133. struct mlx4_flow_reg_id reg_id;
  1134. union ib_gid gid;
  1135. };
  1136. static int parse_flow_attr(struct mlx4_dev *dev,
  1137. u32 qp_num,
  1138. union ib_flow_spec *ib_spec,
  1139. struct _rule_hw *mlx4_spec)
  1140. {
  1141. enum mlx4_net_trans_rule_id type;
  1142. switch (ib_spec->type) {
  1143. case IB_FLOW_SPEC_ETH:
  1144. type = MLX4_NET_TRANS_RULE_ID_ETH;
  1145. memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
  1146. ETH_ALEN);
  1147. memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
  1148. ETH_ALEN);
  1149. mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
  1150. mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
  1151. break;
  1152. case IB_FLOW_SPEC_IB:
  1153. type = MLX4_NET_TRANS_RULE_ID_IB;
  1154. mlx4_spec->ib.l3_qpn =
  1155. cpu_to_be32(qp_num);
  1156. mlx4_spec->ib.qpn_mask =
  1157. cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
  1158. break;
  1159. case IB_FLOW_SPEC_IPV4:
  1160. type = MLX4_NET_TRANS_RULE_ID_IPV4;
  1161. mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
  1162. mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
  1163. mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
  1164. mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
  1165. break;
  1166. case IB_FLOW_SPEC_TCP:
  1167. case IB_FLOW_SPEC_UDP:
  1168. type = ib_spec->type == IB_FLOW_SPEC_TCP ?
  1169. MLX4_NET_TRANS_RULE_ID_TCP :
  1170. MLX4_NET_TRANS_RULE_ID_UDP;
  1171. mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
  1172. mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
  1173. mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
  1174. mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
  1175. break;
  1176. default:
  1177. return -EINVAL;
  1178. }
  1179. if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
  1180. mlx4_hw_rule_sz(dev, type) < 0)
  1181. return -EINVAL;
  1182. mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
  1183. mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
  1184. return mlx4_hw_rule_sz(dev, type);
  1185. }
  1186. struct default_rules {
  1187. __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
  1188. __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
  1189. __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
  1190. __u8 link_layer;
  1191. };
  1192. static const struct default_rules default_table[] = {
  1193. {
  1194. .mandatory_fields = {IB_FLOW_SPEC_IPV4},
  1195. .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
  1196. .rules_create_list = {IB_FLOW_SPEC_IB},
  1197. .link_layer = IB_LINK_LAYER_INFINIBAND
  1198. }
  1199. };
  1200. static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
  1201. struct ib_flow_attr *flow_attr)
  1202. {
  1203. int i, j, k;
  1204. void *ib_flow;
  1205. const struct default_rules *pdefault_rules = default_table;
  1206. u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
  1207. for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
  1208. __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
  1209. memset(&field_types, 0, sizeof(field_types));
  1210. if (link_layer != pdefault_rules->link_layer)
  1211. continue;
  1212. ib_flow = flow_attr + 1;
  1213. /* we assume the specs are sorted */
  1214. for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
  1215. j < flow_attr->num_of_specs; k++) {
  1216. union ib_flow_spec *current_flow =
  1217. (union ib_flow_spec *)ib_flow;
  1218. /* same layer but different type */
  1219. if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
  1220. (pdefault_rules->mandatory_fields[k] &
  1221. IB_FLOW_SPEC_LAYER_MASK)) &&
  1222. (current_flow->type !=
  1223. pdefault_rules->mandatory_fields[k]))
  1224. goto out;
  1225. /* same layer, try match next one */
  1226. if (current_flow->type ==
  1227. pdefault_rules->mandatory_fields[k]) {
  1228. j++;
  1229. ib_flow +=
  1230. ((union ib_flow_spec *)ib_flow)->size;
  1231. }
  1232. }
  1233. ib_flow = flow_attr + 1;
  1234. for (j = 0; j < flow_attr->num_of_specs;
  1235. j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
  1236. for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
  1237. /* same layer and same type */
  1238. if (((union ib_flow_spec *)ib_flow)->type ==
  1239. pdefault_rules->mandatory_not_fields[k])
  1240. goto out;
  1241. return i;
  1242. }
  1243. out:
  1244. return -1;
  1245. }
  1246. static int __mlx4_ib_create_default_rules(
  1247. struct mlx4_ib_dev *mdev,
  1248. struct ib_qp *qp,
  1249. const struct default_rules *pdefault_rules,
  1250. struct _rule_hw *mlx4_spec) {
  1251. int size = 0;
  1252. int i;
  1253. for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
  1254. int ret;
  1255. union ib_flow_spec ib_spec;
  1256. switch (pdefault_rules->rules_create_list[i]) {
  1257. case 0:
  1258. /* no rule */
  1259. continue;
  1260. case IB_FLOW_SPEC_IB:
  1261. ib_spec.type = IB_FLOW_SPEC_IB;
  1262. ib_spec.size = sizeof(struct ib_flow_spec_ib);
  1263. break;
  1264. default:
  1265. /* invalid rule */
  1266. return -EINVAL;
  1267. }
  1268. /* We must put empty rule, qpn is being ignored */
  1269. ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
  1270. mlx4_spec);
  1271. if (ret < 0) {
  1272. pr_info("invalid parsing\n");
  1273. return -EINVAL;
  1274. }
  1275. mlx4_spec = (void *)mlx4_spec + ret;
  1276. size += ret;
  1277. }
  1278. return size;
  1279. }
  1280. static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
  1281. int domain,
  1282. enum mlx4_net_trans_promisc_mode flow_type,
  1283. u64 *reg_id)
  1284. {
  1285. int ret, i;
  1286. int size = 0;
  1287. void *ib_flow;
  1288. struct mlx4_ib_dev *mdev = to_mdev(qp->device);
  1289. struct mlx4_cmd_mailbox *mailbox;
  1290. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  1291. int default_flow;
  1292. static const u16 __mlx4_domain[] = {
  1293. [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
  1294. [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
  1295. [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
  1296. [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
  1297. };
  1298. if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
  1299. pr_err("Invalid priority value %d\n", flow_attr->priority);
  1300. return -EINVAL;
  1301. }
  1302. if (domain >= IB_FLOW_DOMAIN_NUM) {
  1303. pr_err("Invalid domain value %d\n", domain);
  1304. return -EINVAL;
  1305. }
  1306. if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
  1307. return -EINVAL;
  1308. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  1309. if (IS_ERR(mailbox))
  1310. return PTR_ERR(mailbox);
  1311. ctrl = mailbox->buf;
  1312. ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
  1313. flow_attr->priority);
  1314. ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
  1315. ctrl->port = flow_attr->port;
  1316. ctrl->qpn = cpu_to_be32(qp->qp_num);
  1317. ib_flow = flow_attr + 1;
  1318. size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
  1319. /* Add default flows */
  1320. default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
  1321. if (default_flow >= 0) {
  1322. ret = __mlx4_ib_create_default_rules(
  1323. mdev, qp, default_table + default_flow,
  1324. mailbox->buf + size);
  1325. if (ret < 0) {
  1326. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  1327. return -EINVAL;
  1328. }
  1329. size += ret;
  1330. }
  1331. for (i = 0; i < flow_attr->num_of_specs; i++) {
  1332. ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
  1333. mailbox->buf + size);
  1334. if (ret < 0) {
  1335. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  1336. return -EINVAL;
  1337. }
  1338. ib_flow += ((union ib_flow_spec *) ib_flow)->size;
  1339. size += ret;
  1340. }
  1341. ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
  1342. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  1343. MLX4_CMD_WRAPPED);
  1344. if (ret == -ENOMEM)
  1345. pr_err("mcg table is full. Fail to register network rule.\n");
  1346. else if (ret == -ENXIO)
  1347. pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
  1348. else if (ret)
  1349. pr_err("Invalid argumant. Fail to register network rule.\n");
  1350. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  1351. return ret;
  1352. }
  1353. static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
  1354. {
  1355. int err;
  1356. err = mlx4_cmd(dev, reg_id, 0, 0,
  1357. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  1358. MLX4_CMD_WRAPPED);
  1359. if (err)
  1360. pr_err("Fail to detach network rule. registration id = 0x%llx\n",
  1361. reg_id);
  1362. return err;
  1363. }
  1364. static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
  1365. u64 *reg_id)
  1366. {
  1367. void *ib_flow;
  1368. union ib_flow_spec *ib_spec;
  1369. struct mlx4_dev *dev = to_mdev(qp->device)->dev;
  1370. int err = 0;
  1371. if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
  1372. dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
  1373. return 0; /* do nothing */
  1374. ib_flow = flow_attr + 1;
  1375. ib_spec = (union ib_flow_spec *)ib_flow;
  1376. if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
  1377. return 0; /* do nothing */
  1378. err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
  1379. flow_attr->port, qp->qp_num,
  1380. MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
  1381. reg_id);
  1382. return err;
  1383. }
  1384. static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
  1385. struct ib_flow_attr *flow_attr,
  1386. enum mlx4_net_trans_promisc_mode *type)
  1387. {
  1388. int err = 0;
  1389. if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
  1390. (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
  1391. (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
  1392. return -EOPNOTSUPP;
  1393. }
  1394. if (flow_attr->num_of_specs == 0) {
  1395. type[0] = MLX4_FS_MC_SNIFFER;
  1396. type[1] = MLX4_FS_UC_SNIFFER;
  1397. } else {
  1398. union ib_flow_spec *ib_spec;
  1399. ib_spec = (union ib_flow_spec *)(flow_attr + 1);
  1400. if (ib_spec->type != IB_FLOW_SPEC_ETH)
  1401. return -EINVAL;
  1402. /* if all is zero than MC and UC */
  1403. if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
  1404. type[0] = MLX4_FS_MC_SNIFFER;
  1405. type[1] = MLX4_FS_UC_SNIFFER;
  1406. } else {
  1407. u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
  1408. ib_spec->eth.mask.dst_mac[1],
  1409. ib_spec->eth.mask.dst_mac[2],
  1410. ib_spec->eth.mask.dst_mac[3],
  1411. ib_spec->eth.mask.dst_mac[4],
  1412. ib_spec->eth.mask.dst_mac[5]};
  1413. /* Above xor was only on MC bit, non empty mask is valid
  1414. * only if this bit is set and rest are zero.
  1415. */
  1416. if (!is_zero_ether_addr(&mac[0]))
  1417. return -EINVAL;
  1418. if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
  1419. type[0] = MLX4_FS_MC_SNIFFER;
  1420. else
  1421. type[0] = MLX4_FS_UC_SNIFFER;
  1422. }
  1423. }
  1424. return err;
  1425. }
  1426. static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
  1427. struct ib_flow_attr *flow_attr,
  1428. int domain)
  1429. {
  1430. int err = 0, i = 0, j = 0;
  1431. struct mlx4_ib_flow *mflow;
  1432. enum mlx4_net_trans_promisc_mode type[2];
  1433. struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
  1434. int is_bonded = mlx4_is_bonded(dev);
  1435. if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
  1436. (flow_attr->type != IB_FLOW_ATTR_NORMAL))
  1437. return ERR_PTR(-EOPNOTSUPP);
  1438. memset(type, 0, sizeof(type));
  1439. mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
  1440. if (!mflow) {
  1441. err = -ENOMEM;
  1442. goto err_free;
  1443. }
  1444. switch (flow_attr->type) {
  1445. case IB_FLOW_ATTR_NORMAL:
  1446. /* If dont trap flag (continue match) is set, under specific
  1447. * condition traffic be replicated to given qp,
  1448. * without stealing it
  1449. */
  1450. if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
  1451. err = mlx4_ib_add_dont_trap_rule(dev,
  1452. flow_attr,
  1453. type);
  1454. if (err)
  1455. goto err_free;
  1456. } else {
  1457. type[0] = MLX4_FS_REGULAR;
  1458. }
  1459. break;
  1460. case IB_FLOW_ATTR_ALL_DEFAULT:
  1461. type[0] = MLX4_FS_ALL_DEFAULT;
  1462. break;
  1463. case IB_FLOW_ATTR_MC_DEFAULT:
  1464. type[0] = MLX4_FS_MC_DEFAULT;
  1465. break;
  1466. case IB_FLOW_ATTR_SNIFFER:
  1467. type[0] = MLX4_FS_MIRROR_RX_PORT;
  1468. type[1] = MLX4_FS_MIRROR_SX_PORT;
  1469. break;
  1470. default:
  1471. err = -EINVAL;
  1472. goto err_free;
  1473. }
  1474. while (i < ARRAY_SIZE(type) && type[i]) {
  1475. err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
  1476. &mflow->reg_id[i].id);
  1477. if (err)
  1478. goto err_create_flow;
  1479. if (is_bonded) {
  1480. /* Application always sees one port so the mirror rule
  1481. * must be on port #2
  1482. */
  1483. flow_attr->port = 2;
  1484. err = __mlx4_ib_create_flow(qp, flow_attr,
  1485. domain, type[j],
  1486. &mflow->reg_id[j].mirror);
  1487. flow_attr->port = 1;
  1488. if (err)
  1489. goto err_create_flow;
  1490. j++;
  1491. }
  1492. i++;
  1493. }
  1494. if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
  1495. err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
  1496. &mflow->reg_id[i].id);
  1497. if (err)
  1498. goto err_create_flow;
  1499. if (is_bonded) {
  1500. flow_attr->port = 2;
  1501. err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
  1502. &mflow->reg_id[j].mirror);
  1503. flow_attr->port = 1;
  1504. if (err)
  1505. goto err_create_flow;
  1506. j++;
  1507. }
  1508. /* function to create mirror rule */
  1509. i++;
  1510. }
  1511. return &mflow->ibflow;
  1512. err_create_flow:
  1513. while (i) {
  1514. (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
  1515. mflow->reg_id[i].id);
  1516. i--;
  1517. }
  1518. while (j) {
  1519. (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
  1520. mflow->reg_id[j].mirror);
  1521. j--;
  1522. }
  1523. err_free:
  1524. kfree(mflow);
  1525. return ERR_PTR(err);
  1526. }
  1527. static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
  1528. {
  1529. int err, ret = 0;
  1530. int i = 0;
  1531. struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
  1532. struct mlx4_ib_flow *mflow = to_mflow(flow_id);
  1533. while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
  1534. err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
  1535. if (err)
  1536. ret = err;
  1537. if (mflow->reg_id[i].mirror) {
  1538. err = __mlx4_ib_destroy_flow(mdev->dev,
  1539. mflow->reg_id[i].mirror);
  1540. if (err)
  1541. ret = err;
  1542. }
  1543. i++;
  1544. }
  1545. kfree(mflow);
  1546. return ret;
  1547. }
  1548. static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  1549. {
  1550. int err;
  1551. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1552. struct mlx4_dev *dev = mdev->dev;
  1553. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1554. struct mlx4_ib_steering *ib_steering = NULL;
  1555. enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
  1556. struct mlx4_flow_reg_id reg_id;
  1557. if (mdev->dev->caps.steering_mode ==
  1558. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1559. ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
  1560. if (!ib_steering)
  1561. return -ENOMEM;
  1562. }
  1563. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
  1564. !!(mqp->flags &
  1565. MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  1566. prot, &reg_id.id);
  1567. if (err) {
  1568. pr_err("multicast attach op failed, err %d\n", err);
  1569. goto err_malloc;
  1570. }
  1571. reg_id.mirror = 0;
  1572. if (mlx4_is_bonded(dev)) {
  1573. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
  1574. (mqp->port == 1) ? 2 : 1,
  1575. !!(mqp->flags &
  1576. MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  1577. prot, &reg_id.mirror);
  1578. if (err)
  1579. goto err_add;
  1580. }
  1581. err = add_gid_entry(ibqp, gid);
  1582. if (err)
  1583. goto err_add;
  1584. if (ib_steering) {
  1585. memcpy(ib_steering->gid.raw, gid->raw, 16);
  1586. ib_steering->reg_id = reg_id;
  1587. mutex_lock(&mqp->mutex);
  1588. list_add(&ib_steering->list, &mqp->steering_rules);
  1589. mutex_unlock(&mqp->mutex);
  1590. }
  1591. return 0;
  1592. err_add:
  1593. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1594. prot, reg_id.id);
  1595. if (reg_id.mirror)
  1596. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1597. prot, reg_id.mirror);
  1598. err_malloc:
  1599. kfree(ib_steering);
  1600. return err;
  1601. }
  1602. static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
  1603. {
  1604. struct mlx4_ib_gid_entry *ge;
  1605. struct mlx4_ib_gid_entry *tmp;
  1606. struct mlx4_ib_gid_entry *ret = NULL;
  1607. list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
  1608. if (!memcmp(raw, ge->gid.raw, 16)) {
  1609. ret = ge;
  1610. break;
  1611. }
  1612. }
  1613. return ret;
  1614. }
  1615. static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  1616. {
  1617. int err;
  1618. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  1619. struct mlx4_dev *dev = mdev->dev;
  1620. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  1621. struct net_device *ndev;
  1622. struct mlx4_ib_gid_entry *ge;
  1623. struct mlx4_flow_reg_id reg_id = {0, 0};
  1624. enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
  1625. if (mdev->dev->caps.steering_mode ==
  1626. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1627. struct mlx4_ib_steering *ib_steering;
  1628. mutex_lock(&mqp->mutex);
  1629. list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
  1630. if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
  1631. list_del(&ib_steering->list);
  1632. break;
  1633. }
  1634. }
  1635. mutex_unlock(&mqp->mutex);
  1636. if (&ib_steering->list == &mqp->steering_rules) {
  1637. pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
  1638. return -EINVAL;
  1639. }
  1640. reg_id = ib_steering->reg_id;
  1641. kfree(ib_steering);
  1642. }
  1643. err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1644. prot, reg_id.id);
  1645. if (err)
  1646. return err;
  1647. if (mlx4_is_bonded(dev)) {
  1648. err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  1649. prot, reg_id.mirror);
  1650. if (err)
  1651. return err;
  1652. }
  1653. mutex_lock(&mqp->mutex);
  1654. ge = find_gid_entry(mqp, gid->raw);
  1655. if (ge) {
  1656. spin_lock_bh(&mdev->iboe.lock);
  1657. ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
  1658. if (ndev)
  1659. dev_hold(ndev);
  1660. spin_unlock_bh(&mdev->iboe.lock);
  1661. if (ndev)
  1662. dev_put(ndev);
  1663. list_del(&ge->list);
  1664. kfree(ge);
  1665. } else
  1666. pr_warn("could not find mgid entry\n");
  1667. mutex_unlock(&mqp->mutex);
  1668. return 0;
  1669. }
  1670. static int init_node_data(struct mlx4_ib_dev *dev)
  1671. {
  1672. struct ib_smp *in_mad = NULL;
  1673. struct ib_smp *out_mad = NULL;
  1674. int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
  1675. int err = -ENOMEM;
  1676. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  1677. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  1678. if (!in_mad || !out_mad)
  1679. goto out;
  1680. init_query_mad(in_mad);
  1681. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  1682. if (mlx4_is_master(dev->dev))
  1683. mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
  1684. err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
  1685. if (err)
  1686. goto out;
  1687. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  1688. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  1689. err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
  1690. if (err)
  1691. goto out;
  1692. dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
  1693. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  1694. out:
  1695. kfree(in_mad);
  1696. kfree(out_mad);
  1697. return err;
  1698. }
  1699. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  1700. char *buf)
  1701. {
  1702. struct mlx4_ib_dev *dev =
  1703. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1704. return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
  1705. }
  1706. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  1707. char *buf)
  1708. {
  1709. struct mlx4_ib_dev *dev =
  1710. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1711. return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
  1712. (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
  1713. (int) dev->dev->caps.fw_ver & 0xffff);
  1714. }
  1715. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  1716. char *buf)
  1717. {
  1718. struct mlx4_ib_dev *dev =
  1719. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1720. return sprintf(buf, "%x\n", dev->dev->rev_id);
  1721. }
  1722. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  1723. char *buf)
  1724. {
  1725. struct mlx4_ib_dev *dev =
  1726. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  1727. return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
  1728. dev->dev->board_id);
  1729. }
  1730. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  1731. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  1732. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  1733. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  1734. static struct device_attribute *mlx4_class_attributes[] = {
  1735. &dev_attr_hw_rev,
  1736. &dev_attr_fw_ver,
  1737. &dev_attr_hca_type,
  1738. &dev_attr_board_id
  1739. };
  1740. #define MLX4_IB_INVALID_MAC ((u64)-1)
  1741. static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
  1742. struct net_device *dev,
  1743. int port)
  1744. {
  1745. u64 new_smac = 0;
  1746. u64 release_mac = MLX4_IB_INVALID_MAC;
  1747. struct mlx4_ib_qp *qp;
  1748. read_lock(&dev_base_lock);
  1749. new_smac = mlx4_mac_to_u64(dev->dev_addr);
  1750. read_unlock(&dev_base_lock);
  1751. atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
  1752. /* no need for update QP1 and mac registration in non-SRIOV */
  1753. if (!mlx4_is_mfunc(ibdev->dev))
  1754. return;
  1755. mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
  1756. qp = ibdev->qp1_proxy[port - 1];
  1757. if (qp) {
  1758. int new_smac_index;
  1759. u64 old_smac;
  1760. struct mlx4_update_qp_params update_params;
  1761. mutex_lock(&qp->mutex);
  1762. old_smac = qp->pri.smac;
  1763. if (new_smac == old_smac)
  1764. goto unlock;
  1765. new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
  1766. if (new_smac_index < 0)
  1767. goto unlock;
  1768. update_params.smac_index = new_smac_index;
  1769. if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
  1770. &update_params)) {
  1771. release_mac = new_smac;
  1772. goto unlock;
  1773. }
  1774. /* if old port was zero, no mac was yet registered for this QP */
  1775. if (qp->pri.smac_port)
  1776. release_mac = old_smac;
  1777. qp->pri.smac = new_smac;
  1778. qp->pri.smac_port = port;
  1779. qp->pri.smac_index = new_smac_index;
  1780. }
  1781. unlock:
  1782. if (release_mac != MLX4_IB_INVALID_MAC)
  1783. mlx4_unregister_mac(ibdev->dev, port, release_mac);
  1784. if (qp)
  1785. mutex_unlock(&qp->mutex);
  1786. mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
  1787. }
  1788. static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
  1789. struct net_device *dev,
  1790. unsigned long event)
  1791. {
  1792. struct mlx4_ib_iboe *iboe;
  1793. int update_qps_port = -1;
  1794. int port;
  1795. ASSERT_RTNL();
  1796. iboe = &ibdev->iboe;
  1797. spin_lock_bh(&iboe->lock);
  1798. mlx4_foreach_ib_transport_port(port, ibdev->dev) {
  1799. iboe->netdevs[port - 1] =
  1800. mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
  1801. if (dev == iboe->netdevs[port - 1] &&
  1802. (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
  1803. event == NETDEV_UP || event == NETDEV_CHANGE))
  1804. update_qps_port = port;
  1805. }
  1806. spin_unlock_bh(&iboe->lock);
  1807. if (update_qps_port > 0)
  1808. mlx4_ib_update_qps(ibdev, dev, update_qps_port);
  1809. }
  1810. static int mlx4_ib_netdev_event(struct notifier_block *this,
  1811. unsigned long event, void *ptr)
  1812. {
  1813. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1814. struct mlx4_ib_dev *ibdev;
  1815. if (!net_eq(dev_net(dev), &init_net))
  1816. return NOTIFY_DONE;
  1817. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
  1818. mlx4_ib_scan_netdevs(ibdev, dev, event);
  1819. return NOTIFY_DONE;
  1820. }
  1821. static void init_pkeys(struct mlx4_ib_dev *ibdev)
  1822. {
  1823. int port;
  1824. int slave;
  1825. int i;
  1826. if (mlx4_is_master(ibdev->dev)) {
  1827. for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
  1828. ++slave) {
  1829. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
  1830. for (i = 0;
  1831. i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
  1832. ++i) {
  1833. ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
  1834. /* master has the identity virt2phys pkey mapping */
  1835. (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
  1836. ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
  1837. mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
  1838. ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
  1839. }
  1840. }
  1841. }
  1842. /* initialize pkey cache */
  1843. for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
  1844. for (i = 0;
  1845. i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
  1846. ++i)
  1847. ibdev->pkeys.phys_pkey_cache[port-1][i] =
  1848. (i) ? 0 : 0xFFFF;
  1849. }
  1850. }
  1851. }
  1852. static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  1853. {
  1854. int i, j, eq = 0, total_eqs = 0;
  1855. ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
  1856. sizeof(ibdev->eq_table[0]), GFP_KERNEL);
  1857. if (!ibdev->eq_table)
  1858. return;
  1859. for (i = 1; i <= dev->caps.num_ports; i++) {
  1860. for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
  1861. j++, total_eqs++) {
  1862. if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
  1863. continue;
  1864. ibdev->eq_table[eq] = total_eqs;
  1865. if (!mlx4_assign_eq(dev, i,
  1866. &ibdev->eq_table[eq]))
  1867. eq++;
  1868. else
  1869. ibdev->eq_table[eq] = -1;
  1870. }
  1871. }
  1872. for (i = eq; i < dev->caps.num_comp_vectors;
  1873. ibdev->eq_table[i++] = -1)
  1874. ;
  1875. /* Advertise the new number of EQs to clients */
  1876. ibdev->ib_dev.num_comp_vectors = eq;
  1877. }
  1878. static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  1879. {
  1880. int i;
  1881. int total_eqs = ibdev->ib_dev.num_comp_vectors;
  1882. /* no eqs were allocated */
  1883. if (!ibdev->eq_table)
  1884. return;
  1885. /* Reset the advertised EQ number */
  1886. ibdev->ib_dev.num_comp_vectors = 0;
  1887. for (i = 0; i < total_eqs; i++)
  1888. mlx4_release_eq(dev, ibdev->eq_table[i]);
  1889. kfree(ibdev->eq_table);
  1890. ibdev->eq_table = NULL;
  1891. }
  1892. static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
  1893. struct ib_port_immutable *immutable)
  1894. {
  1895. struct ib_port_attr attr;
  1896. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  1897. int err;
  1898. err = mlx4_ib_query_port(ibdev, port_num, &attr);
  1899. if (err)
  1900. return err;
  1901. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  1902. immutable->gid_tbl_len = attr.gid_tbl_len;
  1903. if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
  1904. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
  1905. } else {
  1906. if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
  1907. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
  1908. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
  1909. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
  1910. RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
  1911. }
  1912. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  1913. return 0;
  1914. }
  1915. static void *mlx4_ib_add(struct mlx4_dev *dev)
  1916. {
  1917. struct mlx4_ib_dev *ibdev;
  1918. int num_ports = 0;
  1919. int i, j;
  1920. int err;
  1921. struct mlx4_ib_iboe *iboe;
  1922. int ib_num_ports = 0;
  1923. int num_req_counters;
  1924. int allocated;
  1925. u32 counter_index;
  1926. struct counter_index *new_counter_index = NULL;
  1927. pr_info_once("%s", mlx4_ib_version);
  1928. num_ports = 0;
  1929. mlx4_foreach_ib_transport_port(i, dev)
  1930. num_ports++;
  1931. /* No point in registering a device with no ports... */
  1932. if (num_ports == 0)
  1933. return NULL;
  1934. ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
  1935. if (!ibdev) {
  1936. dev_err(&dev->persist->pdev->dev,
  1937. "Device struct alloc failed\n");
  1938. return NULL;
  1939. }
  1940. iboe = &ibdev->iboe;
  1941. if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
  1942. goto err_dealloc;
  1943. if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
  1944. goto err_pd;
  1945. ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
  1946. PAGE_SIZE);
  1947. if (!ibdev->uar_map)
  1948. goto err_uar;
  1949. MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
  1950. ibdev->dev = dev;
  1951. ibdev->bond_next_port = 0;
  1952. strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
  1953. ibdev->ib_dev.owner = THIS_MODULE;
  1954. ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
  1955. ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
  1956. ibdev->num_ports = num_ports;
  1957. ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
  1958. 1 : ibdev->num_ports;
  1959. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  1960. ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
  1961. ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
  1962. ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
  1963. ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
  1964. if (dev->caps.userspace_caps)
  1965. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
  1966. else
  1967. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
  1968. ibdev->ib_dev.uverbs_cmd_mask =
  1969. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1970. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1971. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1972. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1973. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1974. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1975. (1ull << IB_USER_VERBS_CMD_REREG_MR) |
  1976. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1977. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1978. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1979. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1980. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1981. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1982. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1983. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1984. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1985. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1986. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1987. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1988. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1989. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1990. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1991. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  1992. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  1993. ibdev->ib_dev.query_device = mlx4_ib_query_device;
  1994. ibdev->ib_dev.query_port = mlx4_ib_query_port;
  1995. ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
  1996. ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
  1997. ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
  1998. ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
  1999. ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
  2000. ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
  2001. ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
  2002. ibdev->ib_dev.mmap = mlx4_ib_mmap;
  2003. ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
  2004. ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
  2005. ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
  2006. ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
  2007. ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
  2008. ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
  2009. ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
  2010. ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
  2011. ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
  2012. ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
  2013. ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
  2014. ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
  2015. ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
  2016. ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
  2017. ibdev->ib_dev.post_send = mlx4_ib_post_send;
  2018. ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
  2019. ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
  2020. ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
  2021. ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
  2022. ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
  2023. ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
  2024. ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
  2025. ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
  2026. ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
  2027. ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
  2028. ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
  2029. ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
  2030. ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
  2031. ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
  2032. ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
  2033. ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
  2034. ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
  2035. ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
  2036. if (!mlx4_is_slave(ibdev->dev)) {
  2037. ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
  2038. ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
  2039. ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
  2040. ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
  2041. }
  2042. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
  2043. dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
  2044. ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
  2045. ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
  2046. ibdev->ib_dev.uverbs_cmd_mask |=
  2047. (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
  2048. (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
  2049. }
  2050. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
  2051. ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
  2052. ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
  2053. ibdev->ib_dev.uverbs_cmd_mask |=
  2054. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  2055. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  2056. }
  2057. if (check_flow_steering_support(dev)) {
  2058. ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
  2059. ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
  2060. ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
  2061. ibdev->ib_dev.uverbs_ex_cmd_mask |=
  2062. (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
  2063. (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
  2064. }
  2065. ibdev->ib_dev.uverbs_ex_cmd_mask |=
  2066. (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
  2067. (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
  2068. (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
  2069. mlx4_ib_alloc_eqs(dev, ibdev);
  2070. spin_lock_init(&iboe->lock);
  2071. if (init_node_data(ibdev))
  2072. goto err_map;
  2073. for (i = 0; i < ibdev->num_ports; ++i) {
  2074. mutex_init(&ibdev->counters_table[i].mutex);
  2075. INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
  2076. }
  2077. num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
  2078. for (i = 0; i < num_req_counters; ++i) {
  2079. mutex_init(&ibdev->qp1_proxy_lock[i]);
  2080. allocated = 0;
  2081. if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
  2082. IB_LINK_LAYER_ETHERNET) {
  2083. err = mlx4_counter_alloc(ibdev->dev, &counter_index);
  2084. /* if failed to allocate a new counter, use default */
  2085. if (err)
  2086. counter_index =
  2087. mlx4_get_default_counter_index(dev,
  2088. i + 1);
  2089. else
  2090. allocated = 1;
  2091. } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
  2092. counter_index = mlx4_get_default_counter_index(dev,
  2093. i + 1);
  2094. }
  2095. new_counter_index = kmalloc(sizeof(*new_counter_index),
  2096. GFP_KERNEL);
  2097. if (!new_counter_index) {
  2098. if (allocated)
  2099. mlx4_counter_free(ibdev->dev, counter_index);
  2100. goto err_counter;
  2101. }
  2102. new_counter_index->index = counter_index;
  2103. new_counter_index->allocated = allocated;
  2104. list_add_tail(&new_counter_index->list,
  2105. &ibdev->counters_table[i].counters_list);
  2106. ibdev->counters_table[i].default_counter = counter_index;
  2107. pr_info("counter index %d for port %d allocated %d\n",
  2108. counter_index, i + 1, allocated);
  2109. }
  2110. if (mlx4_is_bonded(dev))
  2111. for (i = 1; i < ibdev->num_ports ; ++i) {
  2112. new_counter_index =
  2113. kmalloc(sizeof(struct counter_index),
  2114. GFP_KERNEL);
  2115. if (!new_counter_index)
  2116. goto err_counter;
  2117. new_counter_index->index = counter_index;
  2118. new_counter_index->allocated = 0;
  2119. list_add_tail(&new_counter_index->list,
  2120. &ibdev->counters_table[i].counters_list);
  2121. ibdev->counters_table[i].default_counter =
  2122. counter_index;
  2123. }
  2124. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  2125. ib_num_ports++;
  2126. spin_lock_init(&ibdev->sm_lock);
  2127. mutex_init(&ibdev->cap_mask_mutex);
  2128. INIT_LIST_HEAD(&ibdev->qp_list);
  2129. spin_lock_init(&ibdev->reset_flow_resource_lock);
  2130. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
  2131. ib_num_ports) {
  2132. ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
  2133. err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
  2134. MLX4_IB_UC_STEER_QPN_ALIGN,
  2135. &ibdev->steer_qpn_base, 0);
  2136. if (err)
  2137. goto err_counter;
  2138. ibdev->ib_uc_qpns_bitmap =
  2139. kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
  2140. sizeof(long),
  2141. GFP_KERNEL);
  2142. if (!ibdev->ib_uc_qpns_bitmap) {
  2143. dev_err(&dev->persist->pdev->dev,
  2144. "bit map alloc failed\n");
  2145. goto err_steer_qp_release;
  2146. }
  2147. bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
  2148. err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
  2149. dev, ibdev->steer_qpn_base,
  2150. ibdev->steer_qpn_base +
  2151. ibdev->steer_qpn_count - 1);
  2152. if (err)
  2153. goto err_steer_free_bitmap;
  2154. }
  2155. for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
  2156. atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
  2157. if (ib_register_device(&ibdev->ib_dev, NULL))
  2158. goto err_steer_free_bitmap;
  2159. if (mlx4_ib_mad_init(ibdev))
  2160. goto err_reg;
  2161. if (mlx4_ib_init_sriov(ibdev))
  2162. goto err_mad;
  2163. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE ||
  2164. dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
  2165. if (!iboe->nb.notifier_call) {
  2166. iboe->nb.notifier_call = mlx4_ib_netdev_event;
  2167. err = register_netdevice_notifier(&iboe->nb);
  2168. if (err) {
  2169. iboe->nb.notifier_call = NULL;
  2170. goto err_notif;
  2171. }
  2172. }
  2173. if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
  2174. err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
  2175. if (err) {
  2176. goto err_notif;
  2177. }
  2178. }
  2179. }
  2180. for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
  2181. if (device_create_file(&ibdev->ib_dev.dev,
  2182. mlx4_class_attributes[j]))
  2183. goto err_notif;
  2184. }
  2185. ibdev->ib_active = true;
  2186. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  2187. devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
  2188. &ibdev->ib_dev);
  2189. if (mlx4_is_mfunc(ibdev->dev))
  2190. init_pkeys(ibdev);
  2191. /* create paravirt contexts for any VFs which are active */
  2192. if (mlx4_is_master(ibdev->dev)) {
  2193. for (j = 0; j < MLX4_MFUNC_MAX; j++) {
  2194. if (j == mlx4_master_func_num(ibdev->dev))
  2195. continue;
  2196. if (mlx4_is_slave_active(ibdev->dev, j))
  2197. do_slave_init(ibdev, j, 1);
  2198. }
  2199. }
  2200. return ibdev;
  2201. err_notif:
  2202. if (ibdev->iboe.nb.notifier_call) {
  2203. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  2204. pr_warn("failure unregistering notifier\n");
  2205. ibdev->iboe.nb.notifier_call = NULL;
  2206. }
  2207. flush_workqueue(wq);
  2208. mlx4_ib_close_sriov(ibdev);
  2209. err_mad:
  2210. mlx4_ib_mad_cleanup(ibdev);
  2211. err_reg:
  2212. ib_unregister_device(&ibdev->ib_dev);
  2213. err_steer_free_bitmap:
  2214. kfree(ibdev->ib_uc_qpns_bitmap);
  2215. err_steer_qp_release:
  2216. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
  2217. mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
  2218. ibdev->steer_qpn_count);
  2219. err_counter:
  2220. for (i = 0; i < ibdev->num_ports; ++i)
  2221. mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
  2222. err_map:
  2223. iounmap(ibdev->uar_map);
  2224. err_uar:
  2225. mlx4_uar_free(dev, &ibdev->priv_uar);
  2226. err_pd:
  2227. mlx4_pd_free(dev, ibdev->priv_pdn);
  2228. err_dealloc:
  2229. ib_dealloc_device(&ibdev->ib_dev);
  2230. return NULL;
  2231. }
  2232. int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
  2233. {
  2234. int offset;
  2235. WARN_ON(!dev->ib_uc_qpns_bitmap);
  2236. offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
  2237. dev->steer_qpn_count,
  2238. get_count_order(count));
  2239. if (offset < 0)
  2240. return offset;
  2241. *qpn = dev->steer_qpn_base + offset;
  2242. return 0;
  2243. }
  2244. void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
  2245. {
  2246. if (!qpn ||
  2247. dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
  2248. return;
  2249. BUG_ON(qpn < dev->steer_qpn_base);
  2250. bitmap_release_region(dev->ib_uc_qpns_bitmap,
  2251. qpn - dev->steer_qpn_base,
  2252. get_count_order(count));
  2253. }
  2254. int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  2255. int is_attach)
  2256. {
  2257. int err;
  2258. size_t flow_size;
  2259. struct ib_flow_attr *flow = NULL;
  2260. struct ib_flow_spec_ib *ib_spec;
  2261. if (is_attach) {
  2262. flow_size = sizeof(struct ib_flow_attr) +
  2263. sizeof(struct ib_flow_spec_ib);
  2264. flow = kzalloc(flow_size, GFP_KERNEL);
  2265. if (!flow)
  2266. return -ENOMEM;
  2267. flow->port = mqp->port;
  2268. flow->num_of_specs = 1;
  2269. flow->size = flow_size;
  2270. ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
  2271. ib_spec->type = IB_FLOW_SPEC_IB;
  2272. ib_spec->size = sizeof(struct ib_flow_spec_ib);
  2273. /* Add an empty rule for IB L2 */
  2274. memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
  2275. err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
  2276. IB_FLOW_DOMAIN_NIC,
  2277. MLX4_FS_REGULAR,
  2278. &mqp->reg_id);
  2279. } else {
  2280. err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
  2281. }
  2282. kfree(flow);
  2283. return err;
  2284. }
  2285. static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
  2286. {
  2287. struct mlx4_ib_dev *ibdev = ibdev_ptr;
  2288. int p;
  2289. int i;
  2290. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  2291. devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
  2292. ibdev->ib_active = false;
  2293. flush_workqueue(wq);
  2294. mlx4_ib_close_sriov(ibdev);
  2295. mlx4_ib_mad_cleanup(ibdev);
  2296. ib_unregister_device(&ibdev->ib_dev);
  2297. if (ibdev->iboe.nb.notifier_call) {
  2298. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  2299. pr_warn("failure unregistering notifier\n");
  2300. ibdev->iboe.nb.notifier_call = NULL;
  2301. }
  2302. if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
  2303. mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
  2304. ibdev->steer_qpn_count);
  2305. kfree(ibdev->ib_uc_qpns_bitmap);
  2306. }
  2307. iounmap(ibdev->uar_map);
  2308. for (p = 0; p < ibdev->num_ports; ++p)
  2309. mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
  2310. mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
  2311. mlx4_CLOSE_PORT(dev, p);
  2312. mlx4_ib_free_eqs(dev, ibdev);
  2313. mlx4_uar_free(dev, &ibdev->priv_uar);
  2314. mlx4_pd_free(dev, ibdev->priv_pdn);
  2315. ib_dealloc_device(&ibdev->ib_dev);
  2316. }
  2317. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
  2318. {
  2319. struct mlx4_ib_demux_work **dm = NULL;
  2320. struct mlx4_dev *dev = ibdev->dev;
  2321. int i;
  2322. unsigned long flags;
  2323. struct mlx4_active_ports actv_ports;
  2324. unsigned int ports;
  2325. unsigned int first_port;
  2326. if (!mlx4_is_master(dev))
  2327. return;
  2328. actv_ports = mlx4_get_active_ports(dev, slave);
  2329. ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
  2330. first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
  2331. dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
  2332. if (!dm) {
  2333. pr_err("failed to allocate memory for tunneling qp update\n");
  2334. return;
  2335. }
  2336. for (i = 0; i < ports; i++) {
  2337. dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
  2338. if (!dm[i]) {
  2339. pr_err("failed to allocate memory for tunneling qp update work struct\n");
  2340. while (--i >= 0)
  2341. kfree(dm[i]);
  2342. goto out;
  2343. }
  2344. INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
  2345. dm[i]->port = first_port + i + 1;
  2346. dm[i]->slave = slave;
  2347. dm[i]->do_init = do_init;
  2348. dm[i]->dev = ibdev;
  2349. }
  2350. /* initialize or tear down tunnel QPs for the slave */
  2351. spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
  2352. if (!ibdev->sriov.is_going_down) {
  2353. for (i = 0; i < ports; i++)
  2354. queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
  2355. spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
  2356. } else {
  2357. spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
  2358. for (i = 0; i < ports; i++)
  2359. kfree(dm[i]);
  2360. }
  2361. out:
  2362. kfree(dm);
  2363. return;
  2364. }
  2365. static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
  2366. {
  2367. struct mlx4_ib_qp *mqp;
  2368. unsigned long flags_qp;
  2369. unsigned long flags_cq;
  2370. struct mlx4_ib_cq *send_mcq, *recv_mcq;
  2371. struct list_head cq_notify_list;
  2372. struct mlx4_cq *mcq;
  2373. unsigned long flags;
  2374. pr_warn("mlx4_ib_handle_catas_error was started\n");
  2375. INIT_LIST_HEAD(&cq_notify_list);
  2376. /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
  2377. spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
  2378. list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
  2379. spin_lock_irqsave(&mqp->sq.lock, flags_qp);
  2380. if (mqp->sq.tail != mqp->sq.head) {
  2381. send_mcq = to_mcq(mqp->ibqp.send_cq);
  2382. spin_lock_irqsave(&send_mcq->lock, flags_cq);
  2383. if (send_mcq->mcq.comp &&
  2384. mqp->ibqp.send_cq->comp_handler) {
  2385. if (!send_mcq->mcq.reset_notify_added) {
  2386. send_mcq->mcq.reset_notify_added = 1;
  2387. list_add_tail(&send_mcq->mcq.reset_notify,
  2388. &cq_notify_list);
  2389. }
  2390. }
  2391. spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
  2392. }
  2393. spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
  2394. /* Now, handle the QP's receive queue */
  2395. spin_lock_irqsave(&mqp->rq.lock, flags_qp);
  2396. /* no handling is needed for SRQ */
  2397. if (!mqp->ibqp.srq) {
  2398. if (mqp->rq.tail != mqp->rq.head) {
  2399. recv_mcq = to_mcq(mqp->ibqp.recv_cq);
  2400. spin_lock_irqsave(&recv_mcq->lock, flags_cq);
  2401. if (recv_mcq->mcq.comp &&
  2402. mqp->ibqp.recv_cq->comp_handler) {
  2403. if (!recv_mcq->mcq.reset_notify_added) {
  2404. recv_mcq->mcq.reset_notify_added = 1;
  2405. list_add_tail(&recv_mcq->mcq.reset_notify,
  2406. &cq_notify_list);
  2407. }
  2408. }
  2409. spin_unlock_irqrestore(&recv_mcq->lock,
  2410. flags_cq);
  2411. }
  2412. }
  2413. spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
  2414. }
  2415. list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
  2416. mcq->comp(mcq);
  2417. }
  2418. spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
  2419. pr_warn("mlx4_ib_handle_catas_error ended\n");
  2420. }
  2421. static void handle_bonded_port_state_event(struct work_struct *work)
  2422. {
  2423. struct ib_event_work *ew =
  2424. container_of(work, struct ib_event_work, work);
  2425. struct mlx4_ib_dev *ibdev = ew->ib_dev;
  2426. enum ib_port_state bonded_port_state = IB_PORT_NOP;
  2427. int i;
  2428. struct ib_event ibev;
  2429. kfree(ew);
  2430. spin_lock_bh(&ibdev->iboe.lock);
  2431. for (i = 0; i < MLX4_MAX_PORTS; ++i) {
  2432. struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
  2433. enum ib_port_state curr_port_state;
  2434. if (!curr_netdev)
  2435. continue;
  2436. curr_port_state =
  2437. (netif_running(curr_netdev) &&
  2438. netif_carrier_ok(curr_netdev)) ?
  2439. IB_PORT_ACTIVE : IB_PORT_DOWN;
  2440. bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
  2441. curr_port_state : IB_PORT_ACTIVE;
  2442. }
  2443. spin_unlock_bh(&ibdev->iboe.lock);
  2444. ibev.device = &ibdev->ib_dev;
  2445. ibev.element.port_num = 1;
  2446. ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
  2447. IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  2448. ib_dispatch_event(&ibev);
  2449. }
  2450. static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
  2451. enum mlx4_dev_event event, unsigned long param)
  2452. {
  2453. struct ib_event ibev;
  2454. struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
  2455. struct mlx4_eqe *eqe = NULL;
  2456. struct ib_event_work *ew;
  2457. int p = 0;
  2458. if (mlx4_is_bonded(dev) &&
  2459. ((event == MLX4_DEV_EVENT_PORT_UP) ||
  2460. (event == MLX4_DEV_EVENT_PORT_DOWN))) {
  2461. ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
  2462. if (!ew)
  2463. return;
  2464. INIT_WORK(&ew->work, handle_bonded_port_state_event);
  2465. ew->ib_dev = ibdev;
  2466. queue_work(wq, &ew->work);
  2467. return;
  2468. }
  2469. if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
  2470. eqe = (struct mlx4_eqe *)param;
  2471. else
  2472. p = (int) param;
  2473. switch (event) {
  2474. case MLX4_DEV_EVENT_PORT_UP:
  2475. if (p > ibdev->num_ports)
  2476. return;
  2477. if (mlx4_is_master(dev) &&
  2478. rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
  2479. IB_LINK_LAYER_INFINIBAND) {
  2480. mlx4_ib_invalidate_all_guid_record(ibdev, p);
  2481. }
  2482. ibev.event = IB_EVENT_PORT_ACTIVE;
  2483. break;
  2484. case MLX4_DEV_EVENT_PORT_DOWN:
  2485. if (p > ibdev->num_ports)
  2486. return;
  2487. ibev.event = IB_EVENT_PORT_ERR;
  2488. break;
  2489. case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
  2490. ibdev->ib_active = false;
  2491. ibev.event = IB_EVENT_DEVICE_FATAL;
  2492. mlx4_ib_handle_catas_error(ibdev);
  2493. break;
  2494. case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
  2495. ew = kmalloc(sizeof *ew, GFP_ATOMIC);
  2496. if (!ew) {
  2497. pr_err("failed to allocate memory for events work\n");
  2498. break;
  2499. }
  2500. INIT_WORK(&ew->work, handle_port_mgmt_change_event);
  2501. memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
  2502. ew->ib_dev = ibdev;
  2503. /* need to queue only for port owner, which uses GEN_EQE */
  2504. if (mlx4_is_master(dev))
  2505. queue_work(wq, &ew->work);
  2506. else
  2507. handle_port_mgmt_change_event(&ew->work);
  2508. return;
  2509. case MLX4_DEV_EVENT_SLAVE_INIT:
  2510. /* here, p is the slave id */
  2511. do_slave_init(ibdev, p, 1);
  2512. if (mlx4_is_master(dev)) {
  2513. int i;
  2514. for (i = 1; i <= ibdev->num_ports; i++) {
  2515. if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
  2516. == IB_LINK_LAYER_INFINIBAND)
  2517. mlx4_ib_slave_alias_guid_event(ibdev,
  2518. p, i,
  2519. 1);
  2520. }
  2521. }
  2522. return;
  2523. case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
  2524. if (mlx4_is_master(dev)) {
  2525. int i;
  2526. for (i = 1; i <= ibdev->num_ports; i++) {
  2527. if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
  2528. == IB_LINK_LAYER_INFINIBAND)
  2529. mlx4_ib_slave_alias_guid_event(ibdev,
  2530. p, i,
  2531. 0);
  2532. }
  2533. }
  2534. /* here, p is the slave id */
  2535. do_slave_init(ibdev, p, 0);
  2536. return;
  2537. default:
  2538. return;
  2539. }
  2540. ibev.device = ibdev_ptr;
  2541. ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
  2542. ib_dispatch_event(&ibev);
  2543. }
  2544. static struct mlx4_interface mlx4_ib_interface = {
  2545. .add = mlx4_ib_add,
  2546. .remove = mlx4_ib_remove,
  2547. .event = mlx4_ib_event,
  2548. .protocol = MLX4_PROT_IB_IPV6,
  2549. .flags = MLX4_INTFF_BONDING
  2550. };
  2551. static int __init mlx4_ib_init(void)
  2552. {
  2553. int err;
  2554. wq = create_singlethread_workqueue("mlx4_ib");
  2555. if (!wq)
  2556. return -ENOMEM;
  2557. err = mlx4_ib_mcg_init();
  2558. if (err)
  2559. goto clean_wq;
  2560. err = mlx4_register_interface(&mlx4_ib_interface);
  2561. if (err)
  2562. goto clean_mcg;
  2563. return 0;
  2564. clean_mcg:
  2565. mlx4_ib_mcg_destroy();
  2566. clean_wq:
  2567. destroy_workqueue(wq);
  2568. return err;
  2569. }
  2570. static void __exit mlx4_ib_cleanup(void)
  2571. {
  2572. mlx4_unregister_interface(&mlx4_ib_interface);
  2573. mlx4_ib_mcg_destroy();
  2574. destroy_workqueue(wq);
  2575. }
  2576. module_init(mlx4_ib_init);
  2577. module_exit(mlx4_ib_cleanup);