port.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/errno.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/export.h>
  36. #include <linux/mlx4/cmd.h>
  37. #include "mlx4.h"
  38. #include "mlx4_stats.h"
  39. #define MLX4_MAC_VALID (1ull << 63)
  40. #define MLX4_VLAN_VALID (1u << 31)
  41. #define MLX4_VLAN_MASK 0xfff
  42. #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
  43. #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
  44. #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
  45. #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
  46. #define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1)
  47. #define MLX4_FLAG2_V_USER_MTU_MASK BIT(5)
  48. #define MLX4_FLAG2_V_USER_MAC_MASK BIT(6)
  49. #define MLX4_FLAG_V_MTU_MASK BIT(0)
  50. #define MLX4_FLAG_V_PPRX_MASK BIT(1)
  51. #define MLX4_FLAG_V_PPTX_MASK BIT(2)
  52. #define MLX4_IGNORE_FCS_MASK 0x1
  53. #define MLX4_TC_MAX_NUMBER 8
  54. void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
  55. {
  56. int i;
  57. mutex_init(&table->mutex);
  58. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  59. table->entries[i] = 0;
  60. table->refs[i] = 0;
  61. table->is_dup[i] = false;
  62. }
  63. table->max = 1 << dev->caps.log_num_macs;
  64. table->total = 0;
  65. }
  66. void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
  67. {
  68. int i;
  69. mutex_init(&table->mutex);
  70. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  71. table->entries[i] = 0;
  72. table->refs[i] = 0;
  73. table->is_dup[i] = false;
  74. }
  75. table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
  76. table->total = 0;
  77. }
  78. void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
  79. struct mlx4_roce_gid_table *table)
  80. {
  81. int i;
  82. mutex_init(&table->mutex);
  83. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
  84. memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
  85. }
  86. static int validate_index(struct mlx4_dev *dev,
  87. struct mlx4_mac_table *table, int index)
  88. {
  89. int err = 0;
  90. if (index < 0 || index >= table->max || !table->entries[index]) {
  91. mlx4_warn(dev, "No valid Mac entry for the given index\n");
  92. err = -EINVAL;
  93. }
  94. return err;
  95. }
  96. static int find_index(struct mlx4_dev *dev,
  97. struct mlx4_mac_table *table, u64 mac)
  98. {
  99. int i;
  100. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  101. if (table->refs[i] &&
  102. (MLX4_MAC_MASK & mac) ==
  103. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
  104. return i;
  105. }
  106. /* Mac not found */
  107. return -EINVAL;
  108. }
  109. static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
  110. __be64 *entries)
  111. {
  112. struct mlx4_cmd_mailbox *mailbox;
  113. u32 in_mod;
  114. int err;
  115. mailbox = mlx4_alloc_cmd_mailbox(dev);
  116. if (IS_ERR(mailbox))
  117. return PTR_ERR(mailbox);
  118. memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
  119. in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
  120. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  121. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  122. MLX4_CMD_NATIVE);
  123. mlx4_free_cmd_mailbox(dev, mailbox);
  124. return err;
  125. }
  126. int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
  127. {
  128. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  129. struct mlx4_mac_table *table = &info->mac_table;
  130. int i;
  131. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  132. if (!table->refs[i])
  133. continue;
  134. if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  135. *idx = i;
  136. return 0;
  137. }
  138. }
  139. return -ENOENT;
  140. }
  141. EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
  142. static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
  143. {
  144. int i, num_eth_ports = 0;
  145. if (!mlx4_is_mfunc(dev))
  146. return false;
  147. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  148. ++num_eth_ports;
  149. return (num_eth_ports == 2) ? true : false;
  150. }
  151. int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  152. {
  153. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  154. struct mlx4_mac_table *table = &info->mac_table;
  155. int i, err = 0;
  156. int free = -1;
  157. int free_for_dup = -1;
  158. bool dup = mlx4_is_mf_bonded(dev);
  159. u8 dup_port = (port == 1) ? 2 : 1;
  160. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  161. bool need_mf_bond = mlx4_need_mf_bond(dev);
  162. bool can_mf_bond = true;
  163. mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
  164. (unsigned long long)mac, port,
  165. dup ? "with" : "without");
  166. if (need_mf_bond) {
  167. if (port == 1) {
  168. mutex_lock(&table->mutex);
  169. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  170. } else {
  171. mutex_lock(&dup_table->mutex);
  172. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  173. }
  174. } else {
  175. mutex_lock(&table->mutex);
  176. }
  177. if (need_mf_bond) {
  178. int index_at_port = -1;
  179. int index_at_dup_port = -1;
  180. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  181. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
  182. index_at_port = i;
  183. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
  184. index_at_dup_port = i;
  185. }
  186. /* check that same mac is not in the tables at different indices */
  187. if ((index_at_port != index_at_dup_port) &&
  188. (index_at_port >= 0) &&
  189. (index_at_dup_port >= 0))
  190. can_mf_bond = false;
  191. /* If the mac is already in the primary table, the slot must be
  192. * available in the duplicate table as well.
  193. */
  194. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  195. dup_table->refs[index_at_port]) {
  196. can_mf_bond = false;
  197. }
  198. /* If the mac is already in the duplicate table, check that the
  199. * corresponding index is not occupied in the primary table, or
  200. * the primary table already contains the mac at the same index.
  201. * Otherwise, you cannot bond (primary contains a different mac
  202. * at that index).
  203. */
  204. if (index_at_dup_port >= 0) {
  205. if (!table->refs[index_at_dup_port] ||
  206. ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
  207. free_for_dup = index_at_dup_port;
  208. else
  209. can_mf_bond = false;
  210. }
  211. }
  212. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  213. if (!table->refs[i]) {
  214. if (free < 0)
  215. free = i;
  216. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  217. if (!dup_table->refs[i])
  218. free_for_dup = i;
  219. }
  220. continue;
  221. }
  222. if ((MLX4_MAC_MASK & mac) ==
  223. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  224. /* MAC already registered, increment ref count */
  225. err = i;
  226. ++table->refs[i];
  227. if (dup) {
  228. u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
  229. if (dup_mac != mac || !dup_table->is_dup[i]) {
  230. mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
  231. mac, dup_port, i);
  232. }
  233. }
  234. goto out;
  235. }
  236. }
  237. if (need_mf_bond && (free_for_dup < 0)) {
  238. if (dup) {
  239. mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
  240. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  241. dup = false;
  242. }
  243. can_mf_bond = false;
  244. }
  245. if (need_mf_bond && can_mf_bond)
  246. free = free_for_dup;
  247. mlx4_dbg(dev, "Free MAC index is %d\n", free);
  248. if (table->total == table->max) {
  249. /* No free mac entries */
  250. err = -ENOSPC;
  251. goto out;
  252. }
  253. /* Register new MAC */
  254. table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  255. err = mlx4_set_port_mac_table(dev, port, table->entries);
  256. if (unlikely(err)) {
  257. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  258. (unsigned long long) mac);
  259. table->entries[free] = 0;
  260. goto out;
  261. }
  262. table->refs[free] = 1;
  263. table->is_dup[free] = false;
  264. ++table->total;
  265. if (dup) {
  266. dup_table->refs[free] = 0;
  267. dup_table->is_dup[free] = true;
  268. dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  269. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  270. if (unlikely(err)) {
  271. mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
  272. dup_table->is_dup[free] = false;
  273. dup_table->entries[free] = 0;
  274. goto out;
  275. }
  276. ++dup_table->total;
  277. }
  278. err = free;
  279. out:
  280. if (need_mf_bond) {
  281. if (port == 2) {
  282. mutex_unlock(&table->mutex);
  283. mutex_unlock(&dup_table->mutex);
  284. } else {
  285. mutex_unlock(&dup_table->mutex);
  286. mutex_unlock(&table->mutex);
  287. }
  288. } else {
  289. mutex_unlock(&table->mutex);
  290. }
  291. return err;
  292. }
  293. EXPORT_SYMBOL_GPL(__mlx4_register_mac);
  294. int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  295. {
  296. u64 out_param = 0;
  297. int err = -EINVAL;
  298. if (mlx4_is_mfunc(dev)) {
  299. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  300. err = mlx4_cmd_imm(dev, mac, &out_param,
  301. ((u32) port) << 8 | (u32) RES_MAC,
  302. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  303. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  304. }
  305. if (err && err == -EINVAL && mlx4_is_slave(dev)) {
  306. /* retry using old REG_MAC format */
  307. set_param_l(&out_param, port);
  308. err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  309. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  310. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  311. if (!err)
  312. dev->flags |= MLX4_FLAG_OLD_REG_MAC;
  313. }
  314. if (err)
  315. return err;
  316. return get_param_l(&out_param);
  317. }
  318. return __mlx4_register_mac(dev, port, mac);
  319. }
  320. EXPORT_SYMBOL_GPL(mlx4_register_mac);
  321. int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
  322. {
  323. return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
  324. (port - 1) * (1 << dev->caps.log_num_macs);
  325. }
  326. EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
  327. void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  328. {
  329. struct mlx4_port_info *info;
  330. struct mlx4_mac_table *table;
  331. int index;
  332. bool dup = mlx4_is_mf_bonded(dev);
  333. u8 dup_port = (port == 1) ? 2 : 1;
  334. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  335. if (port < 1 || port > dev->caps.num_ports) {
  336. mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
  337. return;
  338. }
  339. info = &mlx4_priv(dev)->port[port];
  340. table = &info->mac_table;
  341. if (dup) {
  342. if (port == 1) {
  343. mutex_lock(&table->mutex);
  344. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  345. } else {
  346. mutex_lock(&dup_table->mutex);
  347. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  348. }
  349. } else {
  350. mutex_lock(&table->mutex);
  351. }
  352. index = find_index(dev, table, mac);
  353. if (validate_index(dev, table, index))
  354. goto out;
  355. if (--table->refs[index] || table->is_dup[index]) {
  356. mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
  357. index);
  358. if (!table->refs[index])
  359. dup_table->is_dup[index] = false;
  360. goto out;
  361. }
  362. table->entries[index] = 0;
  363. if (mlx4_set_port_mac_table(dev, port, table->entries))
  364. mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
  365. --table->total;
  366. if (dup) {
  367. dup_table->is_dup[index] = false;
  368. if (dup_table->refs[index])
  369. goto out;
  370. dup_table->entries[index] = 0;
  371. if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
  372. mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
  373. --table->total;
  374. }
  375. out:
  376. if (dup) {
  377. if (port == 2) {
  378. mutex_unlock(&table->mutex);
  379. mutex_unlock(&dup_table->mutex);
  380. } else {
  381. mutex_unlock(&dup_table->mutex);
  382. mutex_unlock(&table->mutex);
  383. }
  384. } else {
  385. mutex_unlock(&table->mutex);
  386. }
  387. }
  388. EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
  389. void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  390. {
  391. u64 out_param = 0;
  392. if (mlx4_is_mfunc(dev)) {
  393. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  394. (void) mlx4_cmd_imm(dev, mac, &out_param,
  395. ((u32) port) << 8 | (u32) RES_MAC,
  396. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  397. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  398. } else {
  399. /* use old unregister mac format */
  400. set_param_l(&out_param, port);
  401. (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  402. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  403. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  404. }
  405. return;
  406. }
  407. __mlx4_unregister_mac(dev, port, mac);
  408. return;
  409. }
  410. EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
  411. int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
  412. {
  413. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  414. struct mlx4_mac_table *table = &info->mac_table;
  415. int index = qpn - info->base_qpn;
  416. int err = 0;
  417. bool dup = mlx4_is_mf_bonded(dev);
  418. u8 dup_port = (port == 1) ? 2 : 1;
  419. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  420. /* CX1 doesn't support multi-functions */
  421. if (dup) {
  422. if (port == 1) {
  423. mutex_lock(&table->mutex);
  424. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  425. } else {
  426. mutex_lock(&dup_table->mutex);
  427. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  428. }
  429. } else {
  430. mutex_lock(&table->mutex);
  431. }
  432. err = validate_index(dev, table, index);
  433. if (err)
  434. goto out;
  435. table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  436. err = mlx4_set_port_mac_table(dev, port, table->entries);
  437. if (unlikely(err)) {
  438. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  439. (unsigned long long) new_mac);
  440. table->entries[index] = 0;
  441. } else {
  442. if (dup) {
  443. dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  444. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  445. if (unlikely(err)) {
  446. mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
  447. (unsigned long long)new_mac);
  448. dup_table->entries[index] = 0;
  449. }
  450. }
  451. }
  452. out:
  453. if (dup) {
  454. if (port == 2) {
  455. mutex_unlock(&table->mutex);
  456. mutex_unlock(&dup_table->mutex);
  457. } else {
  458. mutex_unlock(&dup_table->mutex);
  459. mutex_unlock(&table->mutex);
  460. }
  461. } else {
  462. mutex_unlock(&table->mutex);
  463. }
  464. return err;
  465. }
  466. EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
  467. static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
  468. __be32 *entries)
  469. {
  470. struct mlx4_cmd_mailbox *mailbox;
  471. u32 in_mod;
  472. int err;
  473. mailbox = mlx4_alloc_cmd_mailbox(dev);
  474. if (IS_ERR(mailbox))
  475. return PTR_ERR(mailbox);
  476. memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
  477. in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
  478. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  479. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  480. MLX4_CMD_NATIVE);
  481. mlx4_free_cmd_mailbox(dev, mailbox);
  482. return err;
  483. }
  484. int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
  485. {
  486. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  487. int i;
  488. for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
  489. if (table->refs[i] &&
  490. (vid == (MLX4_VLAN_MASK &
  491. be32_to_cpu(table->entries[i])))) {
  492. /* VLAN already registered, increase reference count */
  493. *idx = i;
  494. return 0;
  495. }
  496. }
  497. return -ENOENT;
  498. }
  499. EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
  500. int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
  501. int *index)
  502. {
  503. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  504. int i, err = 0;
  505. int free = -1;
  506. int free_for_dup = -1;
  507. bool dup = mlx4_is_mf_bonded(dev);
  508. u8 dup_port = (port == 1) ? 2 : 1;
  509. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  510. bool need_mf_bond = mlx4_need_mf_bond(dev);
  511. bool can_mf_bond = true;
  512. mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
  513. vlan, port,
  514. dup ? "with" : "without");
  515. if (need_mf_bond) {
  516. if (port == 1) {
  517. mutex_lock(&table->mutex);
  518. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  519. } else {
  520. mutex_lock(&dup_table->mutex);
  521. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  522. }
  523. } else {
  524. mutex_lock(&table->mutex);
  525. }
  526. if (table->total == table->max) {
  527. /* No free vlan entries */
  528. err = -ENOSPC;
  529. goto out;
  530. }
  531. if (need_mf_bond) {
  532. int index_at_port = -1;
  533. int index_at_dup_port = -1;
  534. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  535. if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))))
  536. index_at_port = i;
  537. if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))))
  538. index_at_dup_port = i;
  539. }
  540. /* check that same vlan is not in the tables at different indices */
  541. if ((index_at_port != index_at_dup_port) &&
  542. (index_at_port >= 0) &&
  543. (index_at_dup_port >= 0))
  544. can_mf_bond = false;
  545. /* If the vlan is already in the primary table, the slot must be
  546. * available in the duplicate table as well.
  547. */
  548. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  549. dup_table->refs[index_at_port]) {
  550. can_mf_bond = false;
  551. }
  552. /* If the vlan is already in the duplicate table, check that the
  553. * corresponding index is not occupied in the primary table, or
  554. * the primary table already contains the vlan at the same index.
  555. * Otherwise, you cannot bond (primary contains a different vlan
  556. * at that index).
  557. */
  558. if (index_at_dup_port >= 0) {
  559. if (!table->refs[index_at_dup_port] ||
  560. (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
  561. free_for_dup = index_at_dup_port;
  562. else
  563. can_mf_bond = false;
  564. }
  565. }
  566. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  567. if (!table->refs[i]) {
  568. if (free < 0)
  569. free = i;
  570. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  571. if (!dup_table->refs[i])
  572. free_for_dup = i;
  573. }
  574. }
  575. if ((table->refs[i] || table->is_dup[i]) &&
  576. (vlan == (MLX4_VLAN_MASK &
  577. be32_to_cpu(table->entries[i])))) {
  578. /* Vlan already registered, increase references count */
  579. mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
  580. *index = i;
  581. ++table->refs[i];
  582. if (dup) {
  583. u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
  584. if (dup_vlan != vlan || !dup_table->is_dup[i]) {
  585. mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
  586. vlan, dup_port, i);
  587. }
  588. }
  589. goto out;
  590. }
  591. }
  592. if (need_mf_bond && (free_for_dup < 0)) {
  593. if (dup) {
  594. mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
  595. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  596. dup = false;
  597. }
  598. can_mf_bond = false;
  599. }
  600. if (need_mf_bond && can_mf_bond)
  601. free = free_for_dup;
  602. if (free < 0) {
  603. err = -ENOMEM;
  604. goto out;
  605. }
  606. /* Register new VLAN */
  607. table->refs[free] = 1;
  608. table->is_dup[free] = false;
  609. table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  610. err = mlx4_set_port_vlan_table(dev, port, table->entries);
  611. if (unlikely(err)) {
  612. mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
  613. table->refs[free] = 0;
  614. table->entries[free] = 0;
  615. goto out;
  616. }
  617. ++table->total;
  618. if (dup) {
  619. dup_table->refs[free] = 0;
  620. dup_table->is_dup[free] = true;
  621. dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  622. err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
  623. if (unlikely(err)) {
  624. mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
  625. dup_table->is_dup[free] = false;
  626. dup_table->entries[free] = 0;
  627. goto out;
  628. }
  629. ++dup_table->total;
  630. }
  631. *index = free;
  632. out:
  633. if (need_mf_bond) {
  634. if (port == 2) {
  635. mutex_unlock(&table->mutex);
  636. mutex_unlock(&dup_table->mutex);
  637. } else {
  638. mutex_unlock(&dup_table->mutex);
  639. mutex_unlock(&table->mutex);
  640. }
  641. } else {
  642. mutex_unlock(&table->mutex);
  643. }
  644. return err;
  645. }
  646. int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
  647. {
  648. u64 out_param = 0;
  649. int err;
  650. if (vlan > 4095)
  651. return -EINVAL;
  652. if (mlx4_is_mfunc(dev)) {
  653. err = mlx4_cmd_imm(dev, vlan, &out_param,
  654. ((u32) port) << 8 | (u32) RES_VLAN,
  655. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  656. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  657. if (!err)
  658. *index = get_param_l(&out_param);
  659. return err;
  660. }
  661. return __mlx4_register_vlan(dev, port, vlan, index);
  662. }
  663. EXPORT_SYMBOL_GPL(mlx4_register_vlan);
  664. void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  665. {
  666. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  667. int index;
  668. bool dup = mlx4_is_mf_bonded(dev);
  669. u8 dup_port = (port == 1) ? 2 : 1;
  670. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  671. if (dup) {
  672. if (port == 1) {
  673. mutex_lock(&table->mutex);
  674. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  675. } else {
  676. mutex_lock(&dup_table->mutex);
  677. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  678. }
  679. } else {
  680. mutex_lock(&table->mutex);
  681. }
  682. if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
  683. mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
  684. goto out;
  685. }
  686. if (index < MLX4_VLAN_REGULAR) {
  687. mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
  688. goto out;
  689. }
  690. if (--table->refs[index] || table->is_dup[index]) {
  691. mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
  692. table->refs[index], index);
  693. if (!table->refs[index])
  694. dup_table->is_dup[index] = false;
  695. goto out;
  696. }
  697. table->entries[index] = 0;
  698. if (mlx4_set_port_vlan_table(dev, port, table->entries))
  699. mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
  700. --table->total;
  701. if (dup) {
  702. dup_table->is_dup[index] = false;
  703. if (dup_table->refs[index])
  704. goto out;
  705. dup_table->entries[index] = 0;
  706. if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
  707. mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
  708. --dup_table->total;
  709. }
  710. out:
  711. if (dup) {
  712. if (port == 2) {
  713. mutex_unlock(&table->mutex);
  714. mutex_unlock(&dup_table->mutex);
  715. } else {
  716. mutex_unlock(&dup_table->mutex);
  717. mutex_unlock(&table->mutex);
  718. }
  719. } else {
  720. mutex_unlock(&table->mutex);
  721. }
  722. }
  723. void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  724. {
  725. u64 out_param = 0;
  726. if (mlx4_is_mfunc(dev)) {
  727. (void) mlx4_cmd_imm(dev, vlan, &out_param,
  728. ((u32) port) << 8 | (u32) RES_VLAN,
  729. RES_OP_RESERVE_AND_MAP,
  730. MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
  731. MLX4_CMD_WRAPPED);
  732. return;
  733. }
  734. __mlx4_unregister_vlan(dev, port, vlan);
  735. }
  736. EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
  737. int mlx4_bond_mac_table(struct mlx4_dev *dev)
  738. {
  739. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  740. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  741. int ret = 0;
  742. int i;
  743. bool update1 = false;
  744. bool update2 = false;
  745. mutex_lock(&t1->mutex);
  746. mutex_lock(&t2->mutex);
  747. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  748. if ((t1->entries[i] != t2->entries[i]) &&
  749. t1->entries[i] && t2->entries[i]) {
  750. mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
  751. ret = -EINVAL;
  752. goto unlock;
  753. }
  754. }
  755. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  756. if (t1->entries[i] && !t2->entries[i]) {
  757. t2->entries[i] = t1->entries[i];
  758. t2->is_dup[i] = true;
  759. update2 = true;
  760. } else if (!t1->entries[i] && t2->entries[i]) {
  761. t1->entries[i] = t2->entries[i];
  762. t1->is_dup[i] = true;
  763. update1 = true;
  764. } else if (t1->entries[i] && t2->entries[i]) {
  765. t1->is_dup[i] = true;
  766. t2->is_dup[i] = true;
  767. }
  768. }
  769. if (update1) {
  770. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  771. if (ret)
  772. mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
  773. }
  774. if (!ret && update2) {
  775. ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
  776. if (ret)
  777. mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
  778. }
  779. if (ret)
  780. mlx4_warn(dev, "failed to create mirror MAC tables\n");
  781. unlock:
  782. mutex_unlock(&t2->mutex);
  783. mutex_unlock(&t1->mutex);
  784. return ret;
  785. }
  786. int mlx4_unbond_mac_table(struct mlx4_dev *dev)
  787. {
  788. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  789. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  790. int ret = 0;
  791. int ret1;
  792. int i;
  793. bool update1 = false;
  794. bool update2 = false;
  795. mutex_lock(&t1->mutex);
  796. mutex_lock(&t2->mutex);
  797. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  798. if (t1->entries[i] != t2->entries[i]) {
  799. mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
  800. ret = -EINVAL;
  801. goto unlock;
  802. }
  803. }
  804. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  805. if (!t1->entries[i])
  806. continue;
  807. t1->is_dup[i] = false;
  808. if (!t1->refs[i]) {
  809. t1->entries[i] = 0;
  810. update1 = true;
  811. }
  812. t2->is_dup[i] = false;
  813. if (!t2->refs[i]) {
  814. t2->entries[i] = 0;
  815. update2 = true;
  816. }
  817. }
  818. if (update1) {
  819. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  820. if (ret)
  821. mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
  822. }
  823. if (update2) {
  824. ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
  825. if (ret1) {
  826. mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
  827. ret = ret1;
  828. }
  829. }
  830. unlock:
  831. mutex_unlock(&t2->mutex);
  832. mutex_unlock(&t1->mutex);
  833. return ret;
  834. }
  835. int mlx4_bond_vlan_table(struct mlx4_dev *dev)
  836. {
  837. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  838. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  839. int ret = 0;
  840. int i;
  841. bool update1 = false;
  842. bool update2 = false;
  843. mutex_lock(&t1->mutex);
  844. mutex_lock(&t2->mutex);
  845. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  846. if ((t1->entries[i] != t2->entries[i]) &&
  847. t1->entries[i] && t2->entries[i]) {
  848. mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
  849. ret = -EINVAL;
  850. goto unlock;
  851. }
  852. }
  853. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  854. if (t1->entries[i] && !t2->entries[i]) {
  855. t2->entries[i] = t1->entries[i];
  856. t2->is_dup[i] = true;
  857. update2 = true;
  858. } else if (!t1->entries[i] && t2->entries[i]) {
  859. t1->entries[i] = t2->entries[i];
  860. t1->is_dup[i] = true;
  861. update1 = true;
  862. } else if (t1->entries[i] && t2->entries[i]) {
  863. t1->is_dup[i] = true;
  864. t2->is_dup[i] = true;
  865. }
  866. }
  867. if (update1) {
  868. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  869. if (ret)
  870. mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
  871. }
  872. if (!ret && update2) {
  873. ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  874. if (ret)
  875. mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
  876. }
  877. if (ret)
  878. mlx4_warn(dev, "failed to create mirror VLAN tables\n");
  879. unlock:
  880. mutex_unlock(&t2->mutex);
  881. mutex_unlock(&t1->mutex);
  882. return ret;
  883. }
  884. int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
  885. {
  886. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  887. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  888. int ret = 0;
  889. int ret1;
  890. int i;
  891. bool update1 = false;
  892. bool update2 = false;
  893. mutex_lock(&t1->mutex);
  894. mutex_lock(&t2->mutex);
  895. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  896. if (t1->entries[i] != t2->entries[i]) {
  897. mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
  898. ret = -EINVAL;
  899. goto unlock;
  900. }
  901. }
  902. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  903. if (!t1->entries[i])
  904. continue;
  905. t1->is_dup[i] = false;
  906. if (!t1->refs[i]) {
  907. t1->entries[i] = 0;
  908. update1 = true;
  909. }
  910. t2->is_dup[i] = false;
  911. if (!t2->refs[i]) {
  912. t2->entries[i] = 0;
  913. update2 = true;
  914. }
  915. }
  916. if (update1) {
  917. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  918. if (ret)
  919. mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
  920. }
  921. if (update2) {
  922. ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  923. if (ret1) {
  924. mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
  925. ret = ret1;
  926. }
  927. }
  928. unlock:
  929. mutex_unlock(&t2->mutex);
  930. mutex_unlock(&t1->mutex);
  931. return ret;
  932. }
  933. int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
  934. {
  935. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  936. u8 *inbuf, *outbuf;
  937. int err;
  938. inmailbox = mlx4_alloc_cmd_mailbox(dev);
  939. if (IS_ERR(inmailbox))
  940. return PTR_ERR(inmailbox);
  941. outmailbox = mlx4_alloc_cmd_mailbox(dev);
  942. if (IS_ERR(outmailbox)) {
  943. mlx4_free_cmd_mailbox(dev, inmailbox);
  944. return PTR_ERR(outmailbox);
  945. }
  946. inbuf = inmailbox->buf;
  947. outbuf = outmailbox->buf;
  948. inbuf[0] = 1;
  949. inbuf[1] = 1;
  950. inbuf[2] = 1;
  951. inbuf[3] = 1;
  952. *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
  953. *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
  954. err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
  955. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  956. MLX4_CMD_NATIVE);
  957. if (!err)
  958. *caps = *(__be32 *) (outbuf + 84);
  959. mlx4_free_cmd_mailbox(dev, inmailbox);
  960. mlx4_free_cmd_mailbox(dev, outmailbox);
  961. return err;
  962. }
  963. static struct mlx4_roce_gid_entry zgid_entry;
  964. int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
  965. {
  966. int vfs;
  967. int slave_gid = slave;
  968. unsigned i;
  969. struct mlx4_slaves_pport slaves_pport;
  970. struct mlx4_active_ports actv_ports;
  971. unsigned max_port_p_one;
  972. if (slave == 0)
  973. return MLX4_ROCE_PF_GIDS;
  974. /* Slave is a VF */
  975. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  976. actv_ports = mlx4_get_active_ports(dev, slave);
  977. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  978. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  979. for (i = 1; i < max_port_p_one; i++) {
  980. struct mlx4_active_ports exclusive_ports;
  981. struct mlx4_slaves_pport slaves_pport_actv;
  982. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  983. set_bit(i - 1, exclusive_ports.ports);
  984. if (i == port)
  985. continue;
  986. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  987. dev, &exclusive_ports);
  988. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  989. dev->persist->num_vfs + 1);
  990. }
  991. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  992. if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
  993. return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
  994. return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
  995. }
  996. int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
  997. {
  998. int gids;
  999. unsigned i;
  1000. int slave_gid = slave;
  1001. int vfs;
  1002. struct mlx4_slaves_pport slaves_pport;
  1003. struct mlx4_active_ports actv_ports;
  1004. unsigned max_port_p_one;
  1005. if (slave == 0)
  1006. return 0;
  1007. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1008. actv_ports = mlx4_get_active_ports(dev, slave);
  1009. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  1010. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  1011. for (i = 1; i < max_port_p_one; i++) {
  1012. struct mlx4_active_ports exclusive_ports;
  1013. struct mlx4_slaves_pport slaves_pport_actv;
  1014. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1015. set_bit(i - 1, exclusive_ports.ports);
  1016. if (i == port)
  1017. continue;
  1018. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  1019. dev, &exclusive_ports);
  1020. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  1021. dev->persist->num_vfs + 1);
  1022. }
  1023. gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1024. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  1025. if (slave_gid <= gids % vfs)
  1026. return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
  1027. return MLX4_ROCE_PF_GIDS + (gids % vfs) +
  1028. ((gids / vfs) * (slave_gid - 1));
  1029. }
  1030. EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
  1031. static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
  1032. int port, struct mlx4_cmd_mailbox *mailbox)
  1033. {
  1034. struct mlx4_roce_gid_entry *gid_entry_mbox;
  1035. struct mlx4_priv *priv = mlx4_priv(dev);
  1036. int num_gids, base, offset;
  1037. int i, err;
  1038. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1039. base = mlx4_get_base_gid_ix(dev, slave, port);
  1040. memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
  1041. mutex_lock(&(priv->port[port].gid_table.mutex));
  1042. /* Zero-out gids belonging to that slave in the port GID table */
  1043. for (i = 0, offset = base; i < num_gids; offset++, i++)
  1044. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1045. zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1046. /* Now, copy roce port gids table to mailbox for passing to FW */
  1047. gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
  1048. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1049. memcpy(gid_entry_mbox->raw,
  1050. priv->port[port].gid_table.roce_gids[i].raw,
  1051. MLX4_ROCE_GID_ENTRY_SIZE);
  1052. err = mlx4_cmd(dev, mailbox->dma,
  1053. ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
  1054. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  1055. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1056. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1057. return err;
  1058. }
  1059. void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
  1060. {
  1061. struct mlx4_active_ports actv_ports;
  1062. struct mlx4_cmd_mailbox *mailbox;
  1063. int num_eth_ports, err;
  1064. int i;
  1065. if (slave < 0 || slave > dev->persist->num_vfs)
  1066. return;
  1067. actv_ports = mlx4_get_active_ports(dev, slave);
  1068. for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
  1069. if (test_bit(i, actv_ports.ports)) {
  1070. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1071. continue;
  1072. num_eth_ports++;
  1073. }
  1074. }
  1075. if (!num_eth_ports)
  1076. return;
  1077. /* have ETH ports. Alloc mailbox for SET_PORT command */
  1078. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1079. if (IS_ERR(mailbox))
  1080. return;
  1081. for (i = 0; i < dev->caps.num_ports; i++) {
  1082. if (test_bit(i, actv_ports.ports)) {
  1083. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1084. continue;
  1085. err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
  1086. if (err)
  1087. mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
  1088. slave, i + 1, err);
  1089. }
  1090. }
  1091. mlx4_free_cmd_mailbox(dev, mailbox);
  1092. return;
  1093. }
  1094. static void
  1095. mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
  1096. struct mlx4_set_port_general_context *gen_context)
  1097. {
  1098. struct mlx4_priv *priv = mlx4_priv(dev);
  1099. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1100. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1101. u16 mtu, prev_mtu;
  1102. /* Mtu is configured as the max USER_MTU among all
  1103. * the functions on the port.
  1104. */
  1105. mtu = be16_to_cpu(gen_context->mtu);
  1106. mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
  1107. ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
  1108. prev_mtu = slave_st->mtu[port];
  1109. slave_st->mtu[port] = mtu;
  1110. if (mtu > master->max_mtu[port])
  1111. master->max_mtu[port] = mtu;
  1112. if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
  1113. int i;
  1114. slave_st->mtu[port] = mtu;
  1115. master->max_mtu[port] = mtu;
  1116. for (i = 0; i < dev->num_slaves; i++)
  1117. master->max_mtu[port] =
  1118. max_t(u16, master->max_mtu[port],
  1119. master->slave_state[i].mtu[port]);
  1120. }
  1121. gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
  1122. }
  1123. static void
  1124. mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
  1125. struct mlx4_set_port_general_context *gen_context)
  1126. {
  1127. struct mlx4_priv *priv = mlx4_priv(dev);
  1128. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1129. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1130. u16 user_mtu, prev_user_mtu;
  1131. /* User Mtu is configured as the max USER_MTU among all
  1132. * the functions on the port.
  1133. */
  1134. user_mtu = be16_to_cpu(gen_context->user_mtu);
  1135. user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
  1136. prev_user_mtu = slave_st->user_mtu[port];
  1137. slave_st->user_mtu[port] = user_mtu;
  1138. if (user_mtu > master->max_user_mtu[port])
  1139. master->max_user_mtu[port] = user_mtu;
  1140. if (user_mtu < prev_user_mtu &&
  1141. prev_user_mtu == master->max_user_mtu[port]) {
  1142. int i;
  1143. slave_st->user_mtu[port] = user_mtu;
  1144. master->max_user_mtu[port] = user_mtu;
  1145. for (i = 0; i < dev->num_slaves; i++)
  1146. master->max_user_mtu[port] =
  1147. max_t(u16, master->max_user_mtu[port],
  1148. master->slave_state[i].user_mtu[port]);
  1149. }
  1150. gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
  1151. }
  1152. static void
  1153. mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
  1154. struct mlx4_set_port_general_context *gen_context)
  1155. {
  1156. struct mlx4_priv *priv = mlx4_priv(dev);
  1157. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1158. /* Slave cannot change Global Pause configuration */
  1159. if (slave != mlx4_master_func_num(dev) &&
  1160. (gen_context->pptx != master->pptx ||
  1161. gen_context->pprx != master->pprx)) {
  1162. gen_context->pptx = master->pptx;
  1163. gen_context->pprx = master->pprx;
  1164. mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
  1165. slave);
  1166. } else {
  1167. master->pptx = gen_context->pptx;
  1168. master->pprx = gen_context->pprx;
  1169. }
  1170. }
  1171. static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
  1172. u8 op_mod, struct mlx4_cmd_mailbox *inbox)
  1173. {
  1174. struct mlx4_priv *priv = mlx4_priv(dev);
  1175. struct mlx4_port_info *port_info;
  1176. struct mlx4_set_port_rqp_calc_context *qpn_context;
  1177. struct mlx4_set_port_general_context *gen_context;
  1178. struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
  1179. int reset_qkey_viols;
  1180. int port;
  1181. int is_eth;
  1182. int num_gids;
  1183. int base;
  1184. u32 in_modifier;
  1185. u32 promisc;
  1186. int err;
  1187. int i, j;
  1188. int offset;
  1189. __be32 agg_cap_mask;
  1190. __be32 slave_cap_mask;
  1191. __be32 new_cap_mask;
  1192. port = in_mod & 0xff;
  1193. in_modifier = in_mod >> 8;
  1194. is_eth = op_mod;
  1195. port_info = &priv->port[port];
  1196. /* Slaves cannot perform SET_PORT operations,
  1197. * except for changing MTU and USER_MTU.
  1198. */
  1199. if (is_eth) {
  1200. if (slave != dev->caps.function &&
  1201. in_modifier != MLX4_SET_PORT_GENERAL &&
  1202. in_modifier != MLX4_SET_PORT_GID_TABLE) {
  1203. mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
  1204. slave);
  1205. return -EINVAL;
  1206. }
  1207. switch (in_modifier) {
  1208. case MLX4_SET_PORT_RQP_CALC:
  1209. qpn_context = inbox->buf;
  1210. qpn_context->base_qpn =
  1211. cpu_to_be32(port_info->base_qpn);
  1212. qpn_context->n_mac = 0x7;
  1213. promisc = be32_to_cpu(qpn_context->promisc) >>
  1214. SET_PORT_PROMISC_SHIFT;
  1215. qpn_context->promisc = cpu_to_be32(
  1216. promisc << SET_PORT_PROMISC_SHIFT |
  1217. port_info->base_qpn);
  1218. promisc = be32_to_cpu(qpn_context->mcast) >>
  1219. SET_PORT_MC_PROMISC_SHIFT;
  1220. qpn_context->mcast = cpu_to_be32(
  1221. promisc << SET_PORT_MC_PROMISC_SHIFT |
  1222. port_info->base_qpn);
  1223. break;
  1224. case MLX4_SET_PORT_GENERAL:
  1225. gen_context = inbox->buf;
  1226. if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
  1227. mlx4_en_set_port_mtu(dev, slave, port,
  1228. gen_context);
  1229. if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
  1230. mlx4_en_set_port_user_mtu(dev, slave, port,
  1231. gen_context);
  1232. if (gen_context->flags &
  1233. (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
  1234. mlx4_en_set_port_global_pause(dev, slave,
  1235. gen_context);
  1236. break;
  1237. case MLX4_SET_PORT_GID_TABLE:
  1238. /* change to MULTIPLE entries: number of guest's gids
  1239. * need a FOR-loop here over number of gids the guest has.
  1240. * 1. Check no duplicates in gids passed by slave
  1241. */
  1242. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1243. base = mlx4_get_base_gid_ix(dev, slave, port);
  1244. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1245. for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
  1246. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1247. sizeof(zgid_entry)))
  1248. continue;
  1249. gid_entry_mb1 = gid_entry_mbox + 1;
  1250. for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
  1251. if (!memcmp(gid_entry_mb1->raw,
  1252. zgid_entry.raw, sizeof(zgid_entry)))
  1253. continue;
  1254. if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
  1255. sizeof(gid_entry_mbox->raw))) {
  1256. /* found duplicate */
  1257. return -EINVAL;
  1258. }
  1259. }
  1260. }
  1261. /* 2. Check that do not have duplicates in OTHER
  1262. * entries in the port GID table
  1263. */
  1264. mutex_lock(&(priv->port[port].gid_table.mutex));
  1265. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1266. if (i >= base && i < base + num_gids)
  1267. continue; /* don't compare to slave's current gids */
  1268. gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
  1269. if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
  1270. continue;
  1271. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1272. for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
  1273. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1274. sizeof(zgid_entry)))
  1275. continue;
  1276. if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
  1277. sizeof(gid_entry_tbl->raw))) {
  1278. /* found duplicate */
  1279. mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
  1280. slave, i);
  1281. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1282. return -EINVAL;
  1283. }
  1284. }
  1285. }
  1286. /* insert slave GIDs with memcpy, starting at slave's base index */
  1287. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1288. for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
  1289. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1290. gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1291. /* Now, copy roce port gids table to current mailbox for passing to FW */
  1292. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1293. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1294. memcpy(gid_entry_mbox->raw,
  1295. priv->port[port].gid_table.roce_gids[i].raw,
  1296. MLX4_ROCE_GID_ENTRY_SIZE);
  1297. err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1298. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1299. MLX4_CMD_NATIVE);
  1300. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1301. return err;
  1302. }
  1303. return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1304. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1305. MLX4_CMD_NATIVE);
  1306. }
  1307. /* Slaves are not allowed to SET_PORT beacon (LED) blink */
  1308. if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
  1309. mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
  1310. return -EPERM;
  1311. }
  1312. /* For IB, we only consider:
  1313. * - The capability mask, which is set to the aggregate of all
  1314. * slave function capabilities
  1315. * - The QKey violatin counter - reset according to each request.
  1316. */
  1317. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1318. reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
  1319. new_cap_mask = ((__be32 *) inbox->buf)[2];
  1320. } else {
  1321. reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
  1322. new_cap_mask = ((__be32 *) inbox->buf)[1];
  1323. }
  1324. /* slave may not set the IS_SM capability for the port */
  1325. if (slave != mlx4_master_func_num(dev) &&
  1326. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
  1327. return -EINVAL;
  1328. /* No DEV_MGMT in multifunc mode */
  1329. if (mlx4_is_mfunc(dev) &&
  1330. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
  1331. return -EINVAL;
  1332. agg_cap_mask = 0;
  1333. slave_cap_mask =
  1334. priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
  1335. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
  1336. for (i = 0; i < dev->num_slaves; i++)
  1337. agg_cap_mask |=
  1338. priv->mfunc.master.slave_state[i].ib_cap_mask[port];
  1339. /* only clear mailbox for guests. Master may be setting
  1340. * MTU or PKEY table size
  1341. */
  1342. if (slave != dev->caps.function)
  1343. memset(inbox->buf, 0, 256);
  1344. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1345. *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
  1346. ((__be32 *) inbox->buf)[2] = agg_cap_mask;
  1347. } else {
  1348. ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
  1349. ((__be32 *) inbox->buf)[1] = agg_cap_mask;
  1350. }
  1351. err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  1352. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1353. if (err)
  1354. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
  1355. slave_cap_mask;
  1356. return err;
  1357. }
  1358. int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
  1359. struct mlx4_vhcr *vhcr,
  1360. struct mlx4_cmd_mailbox *inbox,
  1361. struct mlx4_cmd_mailbox *outbox,
  1362. struct mlx4_cmd_info *cmd)
  1363. {
  1364. int port = mlx4_slave_convert_port(
  1365. dev, slave, vhcr->in_modifier & 0xFF);
  1366. if (port < 0)
  1367. return -EINVAL;
  1368. vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
  1369. (port & 0xFF);
  1370. return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
  1371. vhcr->op_modifier, inbox);
  1372. }
  1373. /* bit locations for set port command with zero op modifier */
  1374. enum {
  1375. MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
  1376. MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
  1377. MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
  1378. MLX4_CHANGE_PORT_VL_CAP = 21,
  1379. MLX4_CHANGE_PORT_MTU_CAP = 22,
  1380. };
  1381. int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
  1382. {
  1383. struct mlx4_cmd_mailbox *mailbox;
  1384. int err, vl_cap, pkey_tbl_flag = 0;
  1385. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
  1386. return 0;
  1387. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1388. if (IS_ERR(mailbox))
  1389. return PTR_ERR(mailbox);
  1390. ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
  1391. if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
  1392. pkey_tbl_flag = 1;
  1393. ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
  1394. }
  1395. /* IB VL CAP enum isn't used by the firmware, just numerical values */
  1396. for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
  1397. ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
  1398. (1 << MLX4_CHANGE_PORT_MTU_CAP) |
  1399. (1 << MLX4_CHANGE_PORT_VL_CAP) |
  1400. (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
  1401. (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
  1402. (vl_cap << MLX4_SET_PORT_VL_CAP));
  1403. err = mlx4_cmd(dev, mailbox->dma, port,
  1404. MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
  1405. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  1406. if (err != -ENOMEM)
  1407. break;
  1408. }
  1409. mlx4_free_cmd_mailbox(dev, mailbox);
  1410. return err;
  1411. }
  1412. #define SET_PORT_ROCE_2_FLAGS 0x10
  1413. #define MLX4_SET_PORT_ROCE_V1_V2 0x2
  1414. int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
  1415. u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
  1416. {
  1417. struct mlx4_cmd_mailbox *mailbox;
  1418. struct mlx4_set_port_general_context *context;
  1419. int err;
  1420. u32 in_mod;
  1421. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1422. if (IS_ERR(mailbox))
  1423. return PTR_ERR(mailbox);
  1424. context = mailbox->buf;
  1425. context->flags = SET_PORT_GEN_ALL_VALID;
  1426. context->mtu = cpu_to_be16(mtu);
  1427. context->pptx = (pptx * (!pfctx)) << 7;
  1428. context->pfctx = pfctx;
  1429. context->pprx = (pprx * (!pfcrx)) << 7;
  1430. context->pfcrx = pfcrx;
  1431. if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
  1432. context->flags |= SET_PORT_ROCE_2_FLAGS;
  1433. context->roce_mode |=
  1434. MLX4_SET_PORT_ROCE_V1_V2 << 4;
  1435. }
  1436. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1437. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1438. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1439. MLX4_CMD_WRAPPED);
  1440. mlx4_free_cmd_mailbox(dev, mailbox);
  1441. return err;
  1442. }
  1443. EXPORT_SYMBOL(mlx4_SET_PORT_general);
  1444. int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
  1445. u8 promisc)
  1446. {
  1447. struct mlx4_cmd_mailbox *mailbox;
  1448. struct mlx4_set_port_rqp_calc_context *context;
  1449. int err;
  1450. u32 in_mod;
  1451. u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
  1452. MCAST_DIRECT : MCAST_DEFAULT;
  1453. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  1454. return 0;
  1455. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1456. if (IS_ERR(mailbox))
  1457. return PTR_ERR(mailbox);
  1458. context = mailbox->buf;
  1459. context->base_qpn = cpu_to_be32(base_qpn);
  1460. context->n_mac = dev->caps.log_num_macs;
  1461. context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
  1462. base_qpn);
  1463. context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
  1464. base_qpn);
  1465. context->intra_no_vlan = 0;
  1466. context->no_vlan = MLX4_NO_VLAN_IDX;
  1467. context->intra_vlan_miss = 0;
  1468. context->vlan_miss = MLX4_VLAN_MISS_IDX;
  1469. in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
  1470. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1471. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1472. MLX4_CMD_WRAPPED);
  1473. mlx4_free_cmd_mailbox(dev, mailbox);
  1474. return err;
  1475. }
  1476. EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
  1477. int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
  1478. {
  1479. struct mlx4_cmd_mailbox *mailbox;
  1480. struct mlx4_set_port_general_context *context;
  1481. u32 in_mod;
  1482. int err;
  1483. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1484. if (IS_ERR(mailbox))
  1485. return PTR_ERR(mailbox);
  1486. context = mailbox->buf;
  1487. context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
  1488. context->user_mtu = cpu_to_be16(user_mtu);
  1489. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1490. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1491. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1492. MLX4_CMD_WRAPPED);
  1493. mlx4_free_cmd_mailbox(dev, mailbox);
  1494. return err;
  1495. }
  1496. EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
  1497. int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac)
  1498. {
  1499. struct mlx4_cmd_mailbox *mailbox;
  1500. struct mlx4_set_port_general_context *context;
  1501. u32 in_mod;
  1502. int err;
  1503. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1504. if (IS_ERR(mailbox))
  1505. return PTR_ERR(mailbox);
  1506. context = mailbox->buf;
  1507. context->flags2 |= MLX4_FLAG2_V_USER_MAC_MASK;
  1508. memcpy(context->user_mac, user_mac, sizeof(context->user_mac));
  1509. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1510. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1511. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1512. MLX4_CMD_NATIVE);
  1513. mlx4_free_cmd_mailbox(dev, mailbox);
  1514. return err;
  1515. }
  1516. EXPORT_SYMBOL(mlx4_SET_PORT_user_mac);
  1517. int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
  1518. {
  1519. struct mlx4_cmd_mailbox *mailbox;
  1520. struct mlx4_set_port_general_context *context;
  1521. u32 in_mod;
  1522. int err;
  1523. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1524. if (IS_ERR(mailbox))
  1525. return PTR_ERR(mailbox);
  1526. context = mailbox->buf;
  1527. context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
  1528. if (ignore_fcs_value)
  1529. context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
  1530. else
  1531. context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
  1532. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1533. err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
  1534. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1535. mlx4_free_cmd_mailbox(dev, mailbox);
  1536. return err;
  1537. }
  1538. EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
  1539. enum {
  1540. VXLAN_ENABLE_MODIFY = 1 << 7,
  1541. VXLAN_STEERING_MODIFY = 1 << 6,
  1542. VXLAN_ENABLE = 1 << 7,
  1543. };
  1544. struct mlx4_set_port_vxlan_context {
  1545. u32 reserved1;
  1546. u8 modify_flags;
  1547. u8 reserved2;
  1548. u8 enable_flags;
  1549. u8 steering;
  1550. };
  1551. int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
  1552. {
  1553. int err;
  1554. u32 in_mod;
  1555. struct mlx4_cmd_mailbox *mailbox;
  1556. struct mlx4_set_port_vxlan_context *context;
  1557. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1558. if (IS_ERR(mailbox))
  1559. return PTR_ERR(mailbox);
  1560. context = mailbox->buf;
  1561. memset(context, 0, sizeof(*context));
  1562. context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
  1563. if (enable)
  1564. context->enable_flags = VXLAN_ENABLE;
  1565. context->steering = steering;
  1566. in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
  1567. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1568. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1569. MLX4_CMD_NATIVE);
  1570. mlx4_free_cmd_mailbox(dev, mailbox);
  1571. return err;
  1572. }
  1573. EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
  1574. int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
  1575. {
  1576. int err;
  1577. struct mlx4_cmd_mailbox *mailbox;
  1578. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1579. if (IS_ERR(mailbox))
  1580. return PTR_ERR(mailbox);
  1581. *((__be32 *)mailbox->buf) = cpu_to_be32(time);
  1582. err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
  1583. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1584. MLX4_CMD_NATIVE);
  1585. mlx4_free_cmd_mailbox(dev, mailbox);
  1586. return err;
  1587. }
  1588. EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
  1589. int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1590. struct mlx4_vhcr *vhcr,
  1591. struct mlx4_cmd_mailbox *inbox,
  1592. struct mlx4_cmd_mailbox *outbox,
  1593. struct mlx4_cmd_info *cmd)
  1594. {
  1595. int err = 0;
  1596. return err;
  1597. }
  1598. int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
  1599. u64 mac, u64 clear, u8 mode)
  1600. {
  1601. return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
  1602. MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
  1603. MLX4_CMD_WRAPPED);
  1604. }
  1605. EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
  1606. int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1607. struct mlx4_vhcr *vhcr,
  1608. struct mlx4_cmd_mailbox *inbox,
  1609. struct mlx4_cmd_mailbox *outbox,
  1610. struct mlx4_cmd_info *cmd)
  1611. {
  1612. int err = 0;
  1613. return err;
  1614. }
  1615. int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
  1616. struct mlx4_vhcr *vhcr,
  1617. struct mlx4_cmd_mailbox *inbox,
  1618. struct mlx4_cmd_mailbox *outbox,
  1619. struct mlx4_cmd_info *cmd)
  1620. {
  1621. return 0;
  1622. }
  1623. int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
  1624. int *slave_id)
  1625. {
  1626. struct mlx4_priv *priv = mlx4_priv(dev);
  1627. int i, found_ix = -1;
  1628. int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1629. struct mlx4_slaves_pport slaves_pport;
  1630. unsigned num_vfs;
  1631. int slave_gid;
  1632. if (!mlx4_is_mfunc(dev))
  1633. return -EINVAL;
  1634. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1635. num_vfs = bitmap_weight(slaves_pport.slaves,
  1636. dev->persist->num_vfs + 1) - 1;
  1637. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1638. if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
  1639. MLX4_ROCE_GID_ENTRY_SIZE)) {
  1640. found_ix = i;
  1641. break;
  1642. }
  1643. }
  1644. if (found_ix >= 0) {
  1645. /* Calculate a slave_gid which is the slave number in the gid
  1646. * table and not a globally unique slave number.
  1647. */
  1648. if (found_ix < MLX4_ROCE_PF_GIDS)
  1649. slave_gid = 0;
  1650. else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
  1651. (vf_gids / num_vfs + 1))
  1652. slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
  1653. (vf_gids / num_vfs + 1)) + 1;
  1654. else
  1655. slave_gid =
  1656. ((found_ix - MLX4_ROCE_PF_GIDS -
  1657. ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
  1658. (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
  1659. /* Calculate the globally unique slave id */
  1660. if (slave_gid) {
  1661. struct mlx4_active_ports exclusive_ports;
  1662. struct mlx4_active_ports actv_ports;
  1663. struct mlx4_slaves_pport slaves_pport_actv;
  1664. unsigned max_port_p_one;
  1665. int num_vfs_before = 0;
  1666. int candidate_slave_gid;
  1667. /* Calculate how many VFs are on the previous port, if exists */
  1668. for (i = 1; i < port; i++) {
  1669. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1670. set_bit(i - 1, exclusive_ports.ports);
  1671. slaves_pport_actv =
  1672. mlx4_phys_to_slaves_pport_actv(
  1673. dev, &exclusive_ports);
  1674. num_vfs_before += bitmap_weight(
  1675. slaves_pport_actv.slaves,
  1676. dev->persist->num_vfs + 1);
  1677. }
  1678. /* candidate_slave_gid isn't necessarily the correct slave, but
  1679. * it has the same number of ports and is assigned to the same
  1680. * ports as the real slave we're looking for. On dual port VF,
  1681. * slave_gid = [single port VFs on port <port>] +
  1682. * [offset of the current slave from the first dual port VF] +
  1683. * 1 (for the PF).
  1684. */
  1685. candidate_slave_gid = slave_gid + num_vfs_before;
  1686. actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
  1687. max_port_p_one = find_first_bit(
  1688. actv_ports.ports, dev->caps.num_ports) +
  1689. bitmap_weight(actv_ports.ports,
  1690. dev->caps.num_ports) + 1;
  1691. /* Calculate the real slave number */
  1692. for (i = 1; i < max_port_p_one; i++) {
  1693. if (i == port)
  1694. continue;
  1695. bitmap_zero(exclusive_ports.ports,
  1696. dev->caps.num_ports);
  1697. set_bit(i - 1, exclusive_ports.ports);
  1698. slaves_pport_actv =
  1699. mlx4_phys_to_slaves_pport_actv(
  1700. dev, &exclusive_ports);
  1701. slave_gid += bitmap_weight(
  1702. slaves_pport_actv.slaves,
  1703. dev->persist->num_vfs + 1);
  1704. }
  1705. }
  1706. *slave_id = slave_gid;
  1707. }
  1708. return (found_ix >= 0) ? 0 : -EINVAL;
  1709. }
  1710. EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
  1711. int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
  1712. u8 *gid)
  1713. {
  1714. struct mlx4_priv *priv = mlx4_priv(dev);
  1715. if (!mlx4_is_master(dev))
  1716. return -EINVAL;
  1717. memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
  1718. MLX4_ROCE_GID_ENTRY_SIZE);
  1719. return 0;
  1720. }
  1721. EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
  1722. /* Cable Module Info */
  1723. #define MODULE_INFO_MAX_READ 48
  1724. #define I2C_ADDR_LOW 0x50
  1725. #define I2C_ADDR_HIGH 0x51
  1726. #define I2C_PAGE_SIZE 256
  1727. /* Module Info Data */
  1728. struct mlx4_cable_info {
  1729. u8 i2c_addr;
  1730. u8 page_num;
  1731. __be16 dev_mem_address;
  1732. __be16 reserved1;
  1733. __be16 size;
  1734. __be32 reserved2[2];
  1735. u8 data[MODULE_INFO_MAX_READ];
  1736. };
  1737. enum cable_info_err {
  1738. CABLE_INF_INV_PORT = 0x1,
  1739. CABLE_INF_OP_NOSUP = 0x2,
  1740. CABLE_INF_NOT_CONN = 0x3,
  1741. CABLE_INF_NO_EEPRM = 0x4,
  1742. CABLE_INF_PAGE_ERR = 0x5,
  1743. CABLE_INF_INV_ADDR = 0x6,
  1744. CABLE_INF_I2C_ADDR = 0x7,
  1745. CABLE_INF_QSFP_VIO = 0x8,
  1746. CABLE_INF_I2C_BUSY = 0x9,
  1747. };
  1748. #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
  1749. static inline const char *cable_info_mad_err_str(u16 mad_status)
  1750. {
  1751. u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
  1752. switch (err) {
  1753. case CABLE_INF_INV_PORT:
  1754. return "invalid port selected";
  1755. case CABLE_INF_OP_NOSUP:
  1756. return "operation not supported for this port (the port is of type CX4 or internal)";
  1757. case CABLE_INF_NOT_CONN:
  1758. return "cable is not connected";
  1759. case CABLE_INF_NO_EEPRM:
  1760. return "the connected cable has no EPROM (passive copper cable)";
  1761. case CABLE_INF_PAGE_ERR:
  1762. return "page number is greater than 15";
  1763. case CABLE_INF_INV_ADDR:
  1764. return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
  1765. case CABLE_INF_I2C_ADDR:
  1766. return "invalid I2C slave address";
  1767. case CABLE_INF_QSFP_VIO:
  1768. return "at least one cable violates the QSFP specification and ignores the modsel signal";
  1769. case CABLE_INF_I2C_BUSY:
  1770. return "I2C bus is constantly busy";
  1771. }
  1772. return "Unknown Error";
  1773. }
  1774. /**
  1775. * mlx4_get_module_info - Read cable module eeprom data
  1776. * @dev: mlx4_dev.
  1777. * @port: port number.
  1778. * @offset: byte offset in eeprom to start reading data from.
  1779. * @size: num of bytes to read.
  1780. * @data: output buffer to put the requested data into.
  1781. *
  1782. * Reads cable module eeprom data, puts the outcome data into
  1783. * data pointer paramer.
  1784. * Returns num of read bytes on success or a negative error
  1785. * code.
  1786. */
  1787. int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
  1788. u16 offset, u16 size, u8 *data)
  1789. {
  1790. struct mlx4_cmd_mailbox *inbox, *outbox;
  1791. struct mlx4_mad_ifc *inmad, *outmad;
  1792. struct mlx4_cable_info *cable_info;
  1793. u16 i2c_addr;
  1794. int ret;
  1795. if (size > MODULE_INFO_MAX_READ)
  1796. size = MODULE_INFO_MAX_READ;
  1797. inbox = mlx4_alloc_cmd_mailbox(dev);
  1798. if (IS_ERR(inbox))
  1799. return PTR_ERR(inbox);
  1800. outbox = mlx4_alloc_cmd_mailbox(dev);
  1801. if (IS_ERR(outbox)) {
  1802. mlx4_free_cmd_mailbox(dev, inbox);
  1803. return PTR_ERR(outbox);
  1804. }
  1805. inmad = (struct mlx4_mad_ifc *)(inbox->buf);
  1806. outmad = (struct mlx4_mad_ifc *)(outbox->buf);
  1807. inmad->method = 0x1; /* Get */
  1808. inmad->class_version = 0x1;
  1809. inmad->mgmt_class = 0x1;
  1810. inmad->base_version = 0x1;
  1811. inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
  1812. if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
  1813. /* Cross pages reads are not allowed
  1814. * read until offset 256 in low page
  1815. */
  1816. size -= offset + size - I2C_PAGE_SIZE;
  1817. i2c_addr = I2C_ADDR_LOW;
  1818. if (offset >= I2C_PAGE_SIZE) {
  1819. /* Reset offset to high page */
  1820. i2c_addr = I2C_ADDR_HIGH;
  1821. offset -= I2C_PAGE_SIZE;
  1822. }
  1823. cable_info = (struct mlx4_cable_info *)inmad->data;
  1824. cable_info->dev_mem_address = cpu_to_be16(offset);
  1825. cable_info->page_num = 0;
  1826. cable_info->i2c_addr = i2c_addr;
  1827. cable_info->size = cpu_to_be16(size);
  1828. ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
  1829. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  1830. MLX4_CMD_NATIVE);
  1831. if (ret)
  1832. goto out;
  1833. if (be16_to_cpu(outmad->status)) {
  1834. /* Mad returned with bad status */
  1835. ret = be16_to_cpu(outmad->status);
  1836. mlx4_warn(dev,
  1837. "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
  1838. 0xFF60, port, i2c_addr, offset, size,
  1839. ret, cable_info_mad_err_str(ret));
  1840. if (i2c_addr == I2C_ADDR_HIGH &&
  1841. MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
  1842. /* Some SFP cables do not support i2c slave
  1843. * address 0x51 (high page), abort silently.
  1844. */
  1845. ret = 0;
  1846. else
  1847. ret = -ret;
  1848. goto out;
  1849. }
  1850. cable_info = (struct mlx4_cable_info *)outmad->data;
  1851. memcpy(data, cable_info->data, size);
  1852. ret = size;
  1853. out:
  1854. mlx4_free_cmd_mailbox(dev, inbox);
  1855. mlx4_free_cmd_mailbox(dev, outbox);
  1856. return ret;
  1857. }
  1858. EXPORT_SYMBOL(mlx4_get_module_info);
  1859. int mlx4_max_tc(struct mlx4_dev *dev)
  1860. {
  1861. u8 num_tc = dev->caps.max_tc_eth;
  1862. if (!num_tc)
  1863. num_tc = MLX4_TC_MAX_NUMBER;
  1864. return num_tc;
  1865. }
  1866. EXPORT_SYMBOL(mlx4_max_tc);