port.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/errno.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/export.h>
  36. #include <linux/mlx4/cmd.h>
  37. #include "mlx4.h"
  38. #include "mlx4_stats.h"
  39. #define MLX4_MAC_VALID (1ull << 63)
  40. #define MLX4_VLAN_VALID (1u << 31)
  41. #define MLX4_VLAN_MASK 0xfff
  42. #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
  43. #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
  44. #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
  45. #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
  46. #define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1)
  47. #define MLX4_FLAG2_V_USER_MTU_MASK BIT(5)
  48. #define MLX4_FLAG_V_MTU_MASK BIT(0)
  49. #define MLX4_FLAG_V_PPRX_MASK BIT(1)
  50. #define MLX4_FLAG_V_PPTX_MASK BIT(2)
  51. #define MLX4_IGNORE_FCS_MASK 0x1
  52. #define MLX4_TC_MAX_NUMBER 8
  53. void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
  54. {
  55. int i;
  56. mutex_init(&table->mutex);
  57. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  58. table->entries[i] = 0;
  59. table->refs[i] = 0;
  60. table->is_dup[i] = false;
  61. }
  62. table->max = 1 << dev->caps.log_num_macs;
  63. table->total = 0;
  64. }
  65. void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
  66. {
  67. int i;
  68. mutex_init(&table->mutex);
  69. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  70. table->entries[i] = 0;
  71. table->refs[i] = 0;
  72. table->is_dup[i] = false;
  73. }
  74. table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
  75. table->total = 0;
  76. }
  77. void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
  78. struct mlx4_roce_gid_table *table)
  79. {
  80. int i;
  81. mutex_init(&table->mutex);
  82. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
  83. memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
  84. }
  85. static int validate_index(struct mlx4_dev *dev,
  86. struct mlx4_mac_table *table, int index)
  87. {
  88. int err = 0;
  89. if (index < 0 || index >= table->max || !table->entries[index]) {
  90. mlx4_warn(dev, "No valid Mac entry for the given index\n");
  91. err = -EINVAL;
  92. }
  93. return err;
  94. }
  95. static int find_index(struct mlx4_dev *dev,
  96. struct mlx4_mac_table *table, u64 mac)
  97. {
  98. int i;
  99. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  100. if (table->refs[i] &&
  101. (MLX4_MAC_MASK & mac) ==
  102. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
  103. return i;
  104. }
  105. /* Mac not found */
  106. return -EINVAL;
  107. }
  108. static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
  109. __be64 *entries)
  110. {
  111. struct mlx4_cmd_mailbox *mailbox;
  112. u32 in_mod;
  113. int err;
  114. mailbox = mlx4_alloc_cmd_mailbox(dev);
  115. if (IS_ERR(mailbox))
  116. return PTR_ERR(mailbox);
  117. memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
  118. in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
  119. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  120. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  121. MLX4_CMD_NATIVE);
  122. mlx4_free_cmd_mailbox(dev, mailbox);
  123. return err;
  124. }
  125. int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
  126. {
  127. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  128. struct mlx4_mac_table *table = &info->mac_table;
  129. int i;
  130. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  131. if (!table->refs[i])
  132. continue;
  133. if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  134. *idx = i;
  135. return 0;
  136. }
  137. }
  138. return -ENOENT;
  139. }
  140. EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
  141. static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
  142. {
  143. int i, num_eth_ports = 0;
  144. if (!mlx4_is_mfunc(dev))
  145. return false;
  146. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  147. ++num_eth_ports;
  148. return (num_eth_ports == 2) ? true : false;
  149. }
  150. int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  151. {
  152. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  153. struct mlx4_mac_table *table = &info->mac_table;
  154. int i, err = 0;
  155. int free = -1;
  156. int free_for_dup = -1;
  157. bool dup = mlx4_is_mf_bonded(dev);
  158. u8 dup_port = (port == 1) ? 2 : 1;
  159. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  160. bool need_mf_bond = mlx4_need_mf_bond(dev);
  161. bool can_mf_bond = true;
  162. mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
  163. (unsigned long long)mac, port,
  164. dup ? "with" : "without");
  165. if (need_mf_bond) {
  166. if (port == 1) {
  167. mutex_lock(&table->mutex);
  168. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  169. } else {
  170. mutex_lock(&dup_table->mutex);
  171. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  172. }
  173. } else {
  174. mutex_lock(&table->mutex);
  175. }
  176. if (need_mf_bond) {
  177. int index_at_port = -1;
  178. int index_at_dup_port = -1;
  179. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  180. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
  181. index_at_port = i;
  182. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
  183. index_at_dup_port = i;
  184. }
  185. /* check that same mac is not in the tables at different indices */
  186. if ((index_at_port != index_at_dup_port) &&
  187. (index_at_port >= 0) &&
  188. (index_at_dup_port >= 0))
  189. can_mf_bond = false;
  190. /* If the mac is already in the primary table, the slot must be
  191. * available in the duplicate table as well.
  192. */
  193. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  194. dup_table->refs[index_at_port]) {
  195. can_mf_bond = false;
  196. }
  197. /* If the mac is already in the duplicate table, check that the
  198. * corresponding index is not occupied in the primary table, or
  199. * the primary table already contains the mac at the same index.
  200. * Otherwise, you cannot bond (primary contains a different mac
  201. * at that index).
  202. */
  203. if (index_at_dup_port >= 0) {
  204. if (!table->refs[index_at_dup_port] ||
  205. ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
  206. free_for_dup = index_at_dup_port;
  207. else
  208. can_mf_bond = false;
  209. }
  210. }
  211. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  212. if (!table->refs[i]) {
  213. if (free < 0)
  214. free = i;
  215. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  216. if (!dup_table->refs[i])
  217. free_for_dup = i;
  218. }
  219. continue;
  220. }
  221. if ((MLX4_MAC_MASK & mac) ==
  222. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  223. /* MAC already registered, increment ref count */
  224. err = i;
  225. ++table->refs[i];
  226. if (dup) {
  227. u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
  228. if (dup_mac != mac || !dup_table->is_dup[i]) {
  229. mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
  230. mac, dup_port, i);
  231. }
  232. }
  233. goto out;
  234. }
  235. }
  236. if (need_mf_bond && (free_for_dup < 0)) {
  237. if (dup) {
  238. mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
  239. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  240. dup = false;
  241. }
  242. can_mf_bond = false;
  243. }
  244. if (need_mf_bond && can_mf_bond)
  245. free = free_for_dup;
  246. mlx4_dbg(dev, "Free MAC index is %d\n", free);
  247. if (table->total == table->max) {
  248. /* No free mac entries */
  249. err = -ENOSPC;
  250. goto out;
  251. }
  252. /* Register new MAC */
  253. table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  254. err = mlx4_set_port_mac_table(dev, port, table->entries);
  255. if (unlikely(err)) {
  256. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  257. (unsigned long long) mac);
  258. table->entries[free] = 0;
  259. goto out;
  260. }
  261. table->refs[free] = 1;
  262. table->is_dup[free] = false;
  263. ++table->total;
  264. if (dup) {
  265. dup_table->refs[free] = 0;
  266. dup_table->is_dup[free] = true;
  267. dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  268. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  269. if (unlikely(err)) {
  270. mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
  271. dup_table->is_dup[free] = false;
  272. dup_table->entries[free] = 0;
  273. goto out;
  274. }
  275. ++dup_table->total;
  276. }
  277. err = free;
  278. out:
  279. if (need_mf_bond) {
  280. if (port == 2) {
  281. mutex_unlock(&table->mutex);
  282. mutex_unlock(&dup_table->mutex);
  283. } else {
  284. mutex_unlock(&dup_table->mutex);
  285. mutex_unlock(&table->mutex);
  286. }
  287. } else {
  288. mutex_unlock(&table->mutex);
  289. }
  290. return err;
  291. }
  292. EXPORT_SYMBOL_GPL(__mlx4_register_mac);
  293. int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  294. {
  295. u64 out_param = 0;
  296. int err = -EINVAL;
  297. if (mlx4_is_mfunc(dev)) {
  298. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  299. err = mlx4_cmd_imm(dev, mac, &out_param,
  300. ((u32) port) << 8 | (u32) RES_MAC,
  301. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  302. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  303. }
  304. if (err && err == -EINVAL && mlx4_is_slave(dev)) {
  305. /* retry using old REG_MAC format */
  306. set_param_l(&out_param, port);
  307. err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  308. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  309. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  310. if (!err)
  311. dev->flags |= MLX4_FLAG_OLD_REG_MAC;
  312. }
  313. if (err)
  314. return err;
  315. return get_param_l(&out_param);
  316. }
  317. return __mlx4_register_mac(dev, port, mac);
  318. }
  319. EXPORT_SYMBOL_GPL(mlx4_register_mac);
  320. int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
  321. {
  322. return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
  323. (port - 1) * (1 << dev->caps.log_num_macs);
  324. }
  325. EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
  326. void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  327. {
  328. struct mlx4_port_info *info;
  329. struct mlx4_mac_table *table;
  330. int index;
  331. bool dup = mlx4_is_mf_bonded(dev);
  332. u8 dup_port = (port == 1) ? 2 : 1;
  333. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  334. if (port < 1 || port > dev->caps.num_ports) {
  335. mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
  336. return;
  337. }
  338. info = &mlx4_priv(dev)->port[port];
  339. table = &info->mac_table;
  340. if (dup) {
  341. if (port == 1) {
  342. mutex_lock(&table->mutex);
  343. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  344. } else {
  345. mutex_lock(&dup_table->mutex);
  346. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  347. }
  348. } else {
  349. mutex_lock(&table->mutex);
  350. }
  351. index = find_index(dev, table, mac);
  352. if (validate_index(dev, table, index))
  353. goto out;
  354. if (--table->refs[index] || table->is_dup[index]) {
  355. mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
  356. index);
  357. if (!table->refs[index])
  358. dup_table->is_dup[index] = false;
  359. goto out;
  360. }
  361. table->entries[index] = 0;
  362. if (mlx4_set_port_mac_table(dev, port, table->entries))
  363. mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
  364. --table->total;
  365. if (dup) {
  366. dup_table->is_dup[index] = false;
  367. if (dup_table->refs[index])
  368. goto out;
  369. dup_table->entries[index] = 0;
  370. if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
  371. mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
  372. --table->total;
  373. }
  374. out:
  375. if (dup) {
  376. if (port == 2) {
  377. mutex_unlock(&table->mutex);
  378. mutex_unlock(&dup_table->mutex);
  379. } else {
  380. mutex_unlock(&dup_table->mutex);
  381. mutex_unlock(&table->mutex);
  382. }
  383. } else {
  384. mutex_unlock(&table->mutex);
  385. }
  386. }
  387. EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
  388. void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  389. {
  390. u64 out_param = 0;
  391. if (mlx4_is_mfunc(dev)) {
  392. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  393. (void) mlx4_cmd_imm(dev, mac, &out_param,
  394. ((u32) port) << 8 | (u32) RES_MAC,
  395. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  396. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  397. } else {
  398. /* use old unregister mac format */
  399. set_param_l(&out_param, port);
  400. (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  401. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  402. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  403. }
  404. return;
  405. }
  406. __mlx4_unregister_mac(dev, port, mac);
  407. return;
  408. }
  409. EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
  410. int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
  411. {
  412. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  413. struct mlx4_mac_table *table = &info->mac_table;
  414. int index = qpn - info->base_qpn;
  415. int err = 0;
  416. bool dup = mlx4_is_mf_bonded(dev);
  417. u8 dup_port = (port == 1) ? 2 : 1;
  418. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  419. /* CX1 doesn't support multi-functions */
  420. if (dup) {
  421. if (port == 1) {
  422. mutex_lock(&table->mutex);
  423. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  424. } else {
  425. mutex_lock(&dup_table->mutex);
  426. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  427. }
  428. } else {
  429. mutex_lock(&table->mutex);
  430. }
  431. err = validate_index(dev, table, index);
  432. if (err)
  433. goto out;
  434. table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  435. err = mlx4_set_port_mac_table(dev, port, table->entries);
  436. if (unlikely(err)) {
  437. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  438. (unsigned long long) new_mac);
  439. table->entries[index] = 0;
  440. } else {
  441. if (dup) {
  442. dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  443. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  444. if (unlikely(err)) {
  445. mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
  446. (unsigned long long)new_mac);
  447. dup_table->entries[index] = 0;
  448. }
  449. }
  450. }
  451. out:
  452. if (dup) {
  453. if (port == 2) {
  454. mutex_unlock(&table->mutex);
  455. mutex_unlock(&dup_table->mutex);
  456. } else {
  457. mutex_unlock(&dup_table->mutex);
  458. mutex_unlock(&table->mutex);
  459. }
  460. } else {
  461. mutex_unlock(&table->mutex);
  462. }
  463. return err;
  464. }
  465. EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
  466. static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
  467. __be32 *entries)
  468. {
  469. struct mlx4_cmd_mailbox *mailbox;
  470. u32 in_mod;
  471. int err;
  472. mailbox = mlx4_alloc_cmd_mailbox(dev);
  473. if (IS_ERR(mailbox))
  474. return PTR_ERR(mailbox);
  475. memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
  476. in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
  477. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  478. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  479. MLX4_CMD_NATIVE);
  480. mlx4_free_cmd_mailbox(dev, mailbox);
  481. return err;
  482. }
  483. int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
  484. {
  485. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  486. int i;
  487. for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
  488. if (table->refs[i] &&
  489. (vid == (MLX4_VLAN_MASK &
  490. be32_to_cpu(table->entries[i])))) {
  491. /* VLAN already registered, increase reference count */
  492. *idx = i;
  493. return 0;
  494. }
  495. }
  496. return -ENOENT;
  497. }
  498. EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
  499. int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
  500. int *index)
  501. {
  502. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  503. int i, err = 0;
  504. int free = -1;
  505. int free_for_dup = -1;
  506. bool dup = mlx4_is_mf_bonded(dev);
  507. u8 dup_port = (port == 1) ? 2 : 1;
  508. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  509. bool need_mf_bond = mlx4_need_mf_bond(dev);
  510. bool can_mf_bond = true;
  511. mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
  512. vlan, port,
  513. dup ? "with" : "without");
  514. if (need_mf_bond) {
  515. if (port == 1) {
  516. mutex_lock(&table->mutex);
  517. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  518. } else {
  519. mutex_lock(&dup_table->mutex);
  520. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  521. }
  522. } else {
  523. mutex_lock(&table->mutex);
  524. }
  525. if (table->total == table->max) {
  526. /* No free vlan entries */
  527. err = -ENOSPC;
  528. goto out;
  529. }
  530. if (need_mf_bond) {
  531. int index_at_port = -1;
  532. int index_at_dup_port = -1;
  533. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  534. if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))))
  535. index_at_port = i;
  536. if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))))
  537. index_at_dup_port = i;
  538. }
  539. /* check that same vlan is not in the tables at different indices */
  540. if ((index_at_port != index_at_dup_port) &&
  541. (index_at_port >= 0) &&
  542. (index_at_dup_port >= 0))
  543. can_mf_bond = false;
  544. /* If the vlan is already in the primary table, the slot must be
  545. * available in the duplicate table as well.
  546. */
  547. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  548. dup_table->refs[index_at_port]) {
  549. can_mf_bond = false;
  550. }
  551. /* If the vlan is already in the duplicate table, check that the
  552. * corresponding index is not occupied in the primary table, or
  553. * the primary table already contains the vlan at the same index.
  554. * Otherwise, you cannot bond (primary contains a different vlan
  555. * at that index).
  556. */
  557. if (index_at_dup_port >= 0) {
  558. if (!table->refs[index_at_dup_port] ||
  559. (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
  560. free_for_dup = index_at_dup_port;
  561. else
  562. can_mf_bond = false;
  563. }
  564. }
  565. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  566. if (!table->refs[i]) {
  567. if (free < 0)
  568. free = i;
  569. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  570. if (!dup_table->refs[i])
  571. free_for_dup = i;
  572. }
  573. }
  574. if ((table->refs[i] || table->is_dup[i]) &&
  575. (vlan == (MLX4_VLAN_MASK &
  576. be32_to_cpu(table->entries[i])))) {
  577. /* Vlan already registered, increase references count */
  578. mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
  579. *index = i;
  580. ++table->refs[i];
  581. if (dup) {
  582. u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
  583. if (dup_vlan != vlan || !dup_table->is_dup[i]) {
  584. mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
  585. vlan, dup_port, i);
  586. }
  587. }
  588. goto out;
  589. }
  590. }
  591. if (need_mf_bond && (free_for_dup < 0)) {
  592. if (dup) {
  593. mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
  594. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  595. dup = false;
  596. }
  597. can_mf_bond = false;
  598. }
  599. if (need_mf_bond && can_mf_bond)
  600. free = free_for_dup;
  601. if (free < 0) {
  602. err = -ENOMEM;
  603. goto out;
  604. }
  605. /* Register new VLAN */
  606. table->refs[free] = 1;
  607. table->is_dup[free] = false;
  608. table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  609. err = mlx4_set_port_vlan_table(dev, port, table->entries);
  610. if (unlikely(err)) {
  611. mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
  612. table->refs[free] = 0;
  613. table->entries[free] = 0;
  614. goto out;
  615. }
  616. ++table->total;
  617. if (dup) {
  618. dup_table->refs[free] = 0;
  619. dup_table->is_dup[free] = true;
  620. dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  621. err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
  622. if (unlikely(err)) {
  623. mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
  624. dup_table->is_dup[free] = false;
  625. dup_table->entries[free] = 0;
  626. goto out;
  627. }
  628. ++dup_table->total;
  629. }
  630. *index = free;
  631. out:
  632. if (need_mf_bond) {
  633. if (port == 2) {
  634. mutex_unlock(&table->mutex);
  635. mutex_unlock(&dup_table->mutex);
  636. } else {
  637. mutex_unlock(&dup_table->mutex);
  638. mutex_unlock(&table->mutex);
  639. }
  640. } else {
  641. mutex_unlock(&table->mutex);
  642. }
  643. return err;
  644. }
  645. int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
  646. {
  647. u64 out_param = 0;
  648. int err;
  649. if (vlan > 4095)
  650. return -EINVAL;
  651. if (mlx4_is_mfunc(dev)) {
  652. err = mlx4_cmd_imm(dev, vlan, &out_param,
  653. ((u32) port) << 8 | (u32) RES_VLAN,
  654. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  655. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  656. if (!err)
  657. *index = get_param_l(&out_param);
  658. return err;
  659. }
  660. return __mlx4_register_vlan(dev, port, vlan, index);
  661. }
  662. EXPORT_SYMBOL_GPL(mlx4_register_vlan);
  663. void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  664. {
  665. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  666. int index;
  667. bool dup = mlx4_is_mf_bonded(dev);
  668. u8 dup_port = (port == 1) ? 2 : 1;
  669. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  670. if (dup) {
  671. if (port == 1) {
  672. mutex_lock(&table->mutex);
  673. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  674. } else {
  675. mutex_lock(&dup_table->mutex);
  676. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  677. }
  678. } else {
  679. mutex_lock(&table->mutex);
  680. }
  681. if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
  682. mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
  683. goto out;
  684. }
  685. if (index < MLX4_VLAN_REGULAR) {
  686. mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
  687. goto out;
  688. }
  689. if (--table->refs[index] || table->is_dup[index]) {
  690. mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
  691. table->refs[index], index);
  692. if (!table->refs[index])
  693. dup_table->is_dup[index] = false;
  694. goto out;
  695. }
  696. table->entries[index] = 0;
  697. if (mlx4_set_port_vlan_table(dev, port, table->entries))
  698. mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
  699. --table->total;
  700. if (dup) {
  701. dup_table->is_dup[index] = false;
  702. if (dup_table->refs[index])
  703. goto out;
  704. dup_table->entries[index] = 0;
  705. if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
  706. mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
  707. --dup_table->total;
  708. }
  709. out:
  710. if (dup) {
  711. if (port == 2) {
  712. mutex_unlock(&table->mutex);
  713. mutex_unlock(&dup_table->mutex);
  714. } else {
  715. mutex_unlock(&dup_table->mutex);
  716. mutex_unlock(&table->mutex);
  717. }
  718. } else {
  719. mutex_unlock(&table->mutex);
  720. }
  721. }
  722. void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  723. {
  724. u64 out_param = 0;
  725. if (mlx4_is_mfunc(dev)) {
  726. (void) mlx4_cmd_imm(dev, vlan, &out_param,
  727. ((u32) port) << 8 | (u32) RES_VLAN,
  728. RES_OP_RESERVE_AND_MAP,
  729. MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
  730. MLX4_CMD_WRAPPED);
  731. return;
  732. }
  733. __mlx4_unregister_vlan(dev, port, vlan);
  734. }
  735. EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
  736. int mlx4_bond_mac_table(struct mlx4_dev *dev)
  737. {
  738. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  739. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  740. int ret = 0;
  741. int i;
  742. bool update1 = false;
  743. bool update2 = false;
  744. mutex_lock(&t1->mutex);
  745. mutex_lock(&t2->mutex);
  746. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  747. if ((t1->entries[i] != t2->entries[i]) &&
  748. t1->entries[i] && t2->entries[i]) {
  749. mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
  750. ret = -EINVAL;
  751. goto unlock;
  752. }
  753. }
  754. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  755. if (t1->entries[i] && !t2->entries[i]) {
  756. t2->entries[i] = t1->entries[i];
  757. t2->is_dup[i] = true;
  758. update2 = true;
  759. } else if (!t1->entries[i] && t2->entries[i]) {
  760. t1->entries[i] = t2->entries[i];
  761. t1->is_dup[i] = true;
  762. update1 = true;
  763. } else if (t1->entries[i] && t2->entries[i]) {
  764. t1->is_dup[i] = true;
  765. t2->is_dup[i] = true;
  766. }
  767. }
  768. if (update1) {
  769. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  770. if (ret)
  771. mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
  772. }
  773. if (!ret && update2) {
  774. ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
  775. if (ret)
  776. mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
  777. }
  778. if (ret)
  779. mlx4_warn(dev, "failed to create mirror MAC tables\n");
  780. unlock:
  781. mutex_unlock(&t2->mutex);
  782. mutex_unlock(&t1->mutex);
  783. return ret;
  784. }
  785. int mlx4_unbond_mac_table(struct mlx4_dev *dev)
  786. {
  787. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  788. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  789. int ret = 0;
  790. int ret1;
  791. int i;
  792. bool update1 = false;
  793. bool update2 = false;
  794. mutex_lock(&t1->mutex);
  795. mutex_lock(&t2->mutex);
  796. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  797. if (t1->entries[i] != t2->entries[i]) {
  798. mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
  799. ret = -EINVAL;
  800. goto unlock;
  801. }
  802. }
  803. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  804. if (!t1->entries[i])
  805. continue;
  806. t1->is_dup[i] = false;
  807. if (!t1->refs[i]) {
  808. t1->entries[i] = 0;
  809. update1 = true;
  810. }
  811. t2->is_dup[i] = false;
  812. if (!t2->refs[i]) {
  813. t2->entries[i] = 0;
  814. update2 = true;
  815. }
  816. }
  817. if (update1) {
  818. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  819. if (ret)
  820. mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
  821. }
  822. if (update2) {
  823. ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
  824. if (ret1) {
  825. mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
  826. ret = ret1;
  827. }
  828. }
  829. unlock:
  830. mutex_unlock(&t2->mutex);
  831. mutex_unlock(&t1->mutex);
  832. return ret;
  833. }
  834. int mlx4_bond_vlan_table(struct mlx4_dev *dev)
  835. {
  836. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  837. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  838. int ret = 0;
  839. int i;
  840. bool update1 = false;
  841. bool update2 = false;
  842. mutex_lock(&t1->mutex);
  843. mutex_lock(&t2->mutex);
  844. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  845. if ((t1->entries[i] != t2->entries[i]) &&
  846. t1->entries[i] && t2->entries[i]) {
  847. mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
  848. ret = -EINVAL;
  849. goto unlock;
  850. }
  851. }
  852. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  853. if (t1->entries[i] && !t2->entries[i]) {
  854. t2->entries[i] = t1->entries[i];
  855. t2->is_dup[i] = true;
  856. update2 = true;
  857. } else if (!t1->entries[i] && t2->entries[i]) {
  858. t1->entries[i] = t2->entries[i];
  859. t1->is_dup[i] = true;
  860. update1 = true;
  861. } else if (t1->entries[i] && t2->entries[i]) {
  862. t1->is_dup[i] = true;
  863. t2->is_dup[i] = true;
  864. }
  865. }
  866. if (update1) {
  867. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  868. if (ret)
  869. mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
  870. }
  871. if (!ret && update2) {
  872. ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  873. if (ret)
  874. mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
  875. }
  876. if (ret)
  877. mlx4_warn(dev, "failed to create mirror VLAN tables\n");
  878. unlock:
  879. mutex_unlock(&t2->mutex);
  880. mutex_unlock(&t1->mutex);
  881. return ret;
  882. }
  883. int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
  884. {
  885. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  886. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  887. int ret = 0;
  888. int ret1;
  889. int i;
  890. bool update1 = false;
  891. bool update2 = false;
  892. mutex_lock(&t1->mutex);
  893. mutex_lock(&t2->mutex);
  894. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  895. if (t1->entries[i] != t2->entries[i]) {
  896. mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
  897. ret = -EINVAL;
  898. goto unlock;
  899. }
  900. }
  901. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  902. if (!t1->entries[i])
  903. continue;
  904. t1->is_dup[i] = false;
  905. if (!t1->refs[i]) {
  906. t1->entries[i] = 0;
  907. update1 = true;
  908. }
  909. t2->is_dup[i] = false;
  910. if (!t2->refs[i]) {
  911. t2->entries[i] = 0;
  912. update2 = true;
  913. }
  914. }
  915. if (update1) {
  916. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  917. if (ret)
  918. mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
  919. }
  920. if (update2) {
  921. ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  922. if (ret1) {
  923. mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
  924. ret = ret1;
  925. }
  926. }
  927. unlock:
  928. mutex_unlock(&t2->mutex);
  929. mutex_unlock(&t1->mutex);
  930. return ret;
  931. }
  932. int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
  933. {
  934. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  935. u8 *inbuf, *outbuf;
  936. int err;
  937. inmailbox = mlx4_alloc_cmd_mailbox(dev);
  938. if (IS_ERR(inmailbox))
  939. return PTR_ERR(inmailbox);
  940. outmailbox = mlx4_alloc_cmd_mailbox(dev);
  941. if (IS_ERR(outmailbox)) {
  942. mlx4_free_cmd_mailbox(dev, inmailbox);
  943. return PTR_ERR(outmailbox);
  944. }
  945. inbuf = inmailbox->buf;
  946. outbuf = outmailbox->buf;
  947. inbuf[0] = 1;
  948. inbuf[1] = 1;
  949. inbuf[2] = 1;
  950. inbuf[3] = 1;
  951. *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
  952. *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
  953. err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
  954. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  955. MLX4_CMD_NATIVE);
  956. if (!err)
  957. *caps = *(__be32 *) (outbuf + 84);
  958. mlx4_free_cmd_mailbox(dev, inmailbox);
  959. mlx4_free_cmd_mailbox(dev, outmailbox);
  960. return err;
  961. }
  962. static struct mlx4_roce_gid_entry zgid_entry;
  963. int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
  964. {
  965. int vfs;
  966. int slave_gid = slave;
  967. unsigned i;
  968. struct mlx4_slaves_pport slaves_pport;
  969. struct mlx4_active_ports actv_ports;
  970. unsigned max_port_p_one;
  971. if (slave == 0)
  972. return MLX4_ROCE_PF_GIDS;
  973. /* Slave is a VF */
  974. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  975. actv_ports = mlx4_get_active_ports(dev, slave);
  976. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  977. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  978. for (i = 1; i < max_port_p_one; i++) {
  979. struct mlx4_active_ports exclusive_ports;
  980. struct mlx4_slaves_pport slaves_pport_actv;
  981. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  982. set_bit(i - 1, exclusive_ports.ports);
  983. if (i == port)
  984. continue;
  985. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  986. dev, &exclusive_ports);
  987. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  988. dev->persist->num_vfs + 1);
  989. }
  990. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  991. if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
  992. return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
  993. return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
  994. }
  995. int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
  996. {
  997. int gids;
  998. unsigned i;
  999. int slave_gid = slave;
  1000. int vfs;
  1001. struct mlx4_slaves_pport slaves_pport;
  1002. struct mlx4_active_ports actv_ports;
  1003. unsigned max_port_p_one;
  1004. if (slave == 0)
  1005. return 0;
  1006. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1007. actv_ports = mlx4_get_active_ports(dev, slave);
  1008. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  1009. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  1010. for (i = 1; i < max_port_p_one; i++) {
  1011. struct mlx4_active_ports exclusive_ports;
  1012. struct mlx4_slaves_pport slaves_pport_actv;
  1013. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1014. set_bit(i - 1, exclusive_ports.ports);
  1015. if (i == port)
  1016. continue;
  1017. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  1018. dev, &exclusive_ports);
  1019. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  1020. dev->persist->num_vfs + 1);
  1021. }
  1022. gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1023. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  1024. if (slave_gid <= gids % vfs)
  1025. return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
  1026. return MLX4_ROCE_PF_GIDS + (gids % vfs) +
  1027. ((gids / vfs) * (slave_gid - 1));
  1028. }
  1029. EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
  1030. static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
  1031. int port, struct mlx4_cmd_mailbox *mailbox)
  1032. {
  1033. struct mlx4_roce_gid_entry *gid_entry_mbox;
  1034. struct mlx4_priv *priv = mlx4_priv(dev);
  1035. int num_gids, base, offset;
  1036. int i, err;
  1037. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1038. base = mlx4_get_base_gid_ix(dev, slave, port);
  1039. memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
  1040. mutex_lock(&(priv->port[port].gid_table.mutex));
  1041. /* Zero-out gids belonging to that slave in the port GID table */
  1042. for (i = 0, offset = base; i < num_gids; offset++, i++)
  1043. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1044. zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1045. /* Now, copy roce port gids table to mailbox for passing to FW */
  1046. gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
  1047. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1048. memcpy(gid_entry_mbox->raw,
  1049. priv->port[port].gid_table.roce_gids[i].raw,
  1050. MLX4_ROCE_GID_ENTRY_SIZE);
  1051. err = mlx4_cmd(dev, mailbox->dma,
  1052. ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
  1053. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  1054. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1055. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1056. return err;
  1057. }
  1058. void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
  1059. {
  1060. struct mlx4_active_ports actv_ports;
  1061. struct mlx4_cmd_mailbox *mailbox;
  1062. int num_eth_ports, err;
  1063. int i;
  1064. if (slave < 0 || slave > dev->persist->num_vfs)
  1065. return;
  1066. actv_ports = mlx4_get_active_ports(dev, slave);
  1067. for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
  1068. if (test_bit(i, actv_ports.ports)) {
  1069. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1070. continue;
  1071. num_eth_ports++;
  1072. }
  1073. }
  1074. if (!num_eth_ports)
  1075. return;
  1076. /* have ETH ports. Alloc mailbox for SET_PORT command */
  1077. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1078. if (IS_ERR(mailbox))
  1079. return;
  1080. for (i = 0; i < dev->caps.num_ports; i++) {
  1081. if (test_bit(i, actv_ports.ports)) {
  1082. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1083. continue;
  1084. err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
  1085. if (err)
  1086. mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
  1087. slave, i + 1, err);
  1088. }
  1089. }
  1090. mlx4_free_cmd_mailbox(dev, mailbox);
  1091. return;
  1092. }
  1093. static void
  1094. mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
  1095. struct mlx4_set_port_general_context *gen_context)
  1096. {
  1097. struct mlx4_priv *priv = mlx4_priv(dev);
  1098. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1099. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1100. u16 mtu, prev_mtu;
  1101. /* Mtu is configured as the max USER_MTU among all
  1102. * the functions on the port.
  1103. */
  1104. mtu = be16_to_cpu(gen_context->mtu);
  1105. mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
  1106. ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
  1107. prev_mtu = slave_st->mtu[port];
  1108. slave_st->mtu[port] = mtu;
  1109. if (mtu > master->max_mtu[port])
  1110. master->max_mtu[port] = mtu;
  1111. if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
  1112. int i;
  1113. slave_st->mtu[port] = mtu;
  1114. master->max_mtu[port] = mtu;
  1115. for (i = 0; i < dev->num_slaves; i++)
  1116. master->max_mtu[port] =
  1117. max_t(u16, master->max_mtu[port],
  1118. master->slave_state[i].mtu[port]);
  1119. }
  1120. gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
  1121. }
  1122. static void
  1123. mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
  1124. struct mlx4_set_port_general_context *gen_context)
  1125. {
  1126. struct mlx4_priv *priv = mlx4_priv(dev);
  1127. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1128. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1129. u16 user_mtu, prev_user_mtu;
  1130. /* User Mtu is configured as the max USER_MTU among all
  1131. * the functions on the port.
  1132. */
  1133. user_mtu = be16_to_cpu(gen_context->user_mtu);
  1134. user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
  1135. prev_user_mtu = slave_st->user_mtu[port];
  1136. slave_st->user_mtu[port] = user_mtu;
  1137. if (user_mtu > master->max_user_mtu[port])
  1138. master->max_user_mtu[port] = user_mtu;
  1139. if (user_mtu < prev_user_mtu &&
  1140. prev_user_mtu == master->max_user_mtu[port]) {
  1141. int i;
  1142. slave_st->user_mtu[port] = user_mtu;
  1143. master->max_user_mtu[port] = user_mtu;
  1144. for (i = 0; i < dev->num_slaves; i++)
  1145. master->max_user_mtu[port] =
  1146. max_t(u16, master->max_user_mtu[port],
  1147. master->slave_state[i].user_mtu[port]);
  1148. }
  1149. gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
  1150. }
  1151. static void
  1152. mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
  1153. struct mlx4_set_port_general_context *gen_context)
  1154. {
  1155. struct mlx4_priv *priv = mlx4_priv(dev);
  1156. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1157. /* Slave cannot change Global Pause configuration */
  1158. if (slave != mlx4_master_func_num(dev) &&
  1159. (gen_context->pptx != master->pptx ||
  1160. gen_context->pprx != master->pprx)) {
  1161. gen_context->pptx = master->pptx;
  1162. gen_context->pprx = master->pprx;
  1163. mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
  1164. slave);
  1165. } else {
  1166. master->pptx = gen_context->pptx;
  1167. master->pprx = gen_context->pprx;
  1168. }
  1169. }
  1170. static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
  1171. u8 op_mod, struct mlx4_cmd_mailbox *inbox)
  1172. {
  1173. struct mlx4_priv *priv = mlx4_priv(dev);
  1174. struct mlx4_port_info *port_info;
  1175. struct mlx4_set_port_rqp_calc_context *qpn_context;
  1176. struct mlx4_set_port_general_context *gen_context;
  1177. struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
  1178. int reset_qkey_viols;
  1179. int port;
  1180. int is_eth;
  1181. int num_gids;
  1182. int base;
  1183. u32 in_modifier;
  1184. u32 promisc;
  1185. int err;
  1186. int i, j;
  1187. int offset;
  1188. __be32 agg_cap_mask;
  1189. __be32 slave_cap_mask;
  1190. __be32 new_cap_mask;
  1191. port = in_mod & 0xff;
  1192. in_modifier = in_mod >> 8;
  1193. is_eth = op_mod;
  1194. port_info = &priv->port[port];
  1195. /* Slaves cannot perform SET_PORT operations,
  1196. * except for changing MTU and USER_MTU.
  1197. */
  1198. if (is_eth) {
  1199. if (slave != dev->caps.function &&
  1200. in_modifier != MLX4_SET_PORT_GENERAL &&
  1201. in_modifier != MLX4_SET_PORT_GID_TABLE) {
  1202. mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
  1203. slave);
  1204. return -EINVAL;
  1205. }
  1206. switch (in_modifier) {
  1207. case MLX4_SET_PORT_RQP_CALC:
  1208. qpn_context = inbox->buf;
  1209. qpn_context->base_qpn =
  1210. cpu_to_be32(port_info->base_qpn);
  1211. qpn_context->n_mac = 0x7;
  1212. promisc = be32_to_cpu(qpn_context->promisc) >>
  1213. SET_PORT_PROMISC_SHIFT;
  1214. qpn_context->promisc = cpu_to_be32(
  1215. promisc << SET_PORT_PROMISC_SHIFT |
  1216. port_info->base_qpn);
  1217. promisc = be32_to_cpu(qpn_context->mcast) >>
  1218. SET_PORT_MC_PROMISC_SHIFT;
  1219. qpn_context->mcast = cpu_to_be32(
  1220. promisc << SET_PORT_MC_PROMISC_SHIFT |
  1221. port_info->base_qpn);
  1222. break;
  1223. case MLX4_SET_PORT_GENERAL:
  1224. gen_context = inbox->buf;
  1225. if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
  1226. mlx4_en_set_port_mtu(dev, slave, port,
  1227. gen_context);
  1228. if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
  1229. mlx4_en_set_port_user_mtu(dev, slave, port,
  1230. gen_context);
  1231. if (gen_context->flags &
  1232. (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
  1233. mlx4_en_set_port_global_pause(dev, slave,
  1234. gen_context);
  1235. break;
  1236. case MLX4_SET_PORT_GID_TABLE:
  1237. /* change to MULTIPLE entries: number of guest's gids
  1238. * need a FOR-loop here over number of gids the guest has.
  1239. * 1. Check no duplicates in gids passed by slave
  1240. */
  1241. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1242. base = mlx4_get_base_gid_ix(dev, slave, port);
  1243. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1244. for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
  1245. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1246. sizeof(zgid_entry)))
  1247. continue;
  1248. gid_entry_mb1 = gid_entry_mbox + 1;
  1249. for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
  1250. if (!memcmp(gid_entry_mb1->raw,
  1251. zgid_entry.raw, sizeof(zgid_entry)))
  1252. continue;
  1253. if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
  1254. sizeof(gid_entry_mbox->raw))) {
  1255. /* found duplicate */
  1256. return -EINVAL;
  1257. }
  1258. }
  1259. }
  1260. /* 2. Check that do not have duplicates in OTHER
  1261. * entries in the port GID table
  1262. */
  1263. mutex_lock(&(priv->port[port].gid_table.mutex));
  1264. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1265. if (i >= base && i < base + num_gids)
  1266. continue; /* don't compare to slave's current gids */
  1267. gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
  1268. if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
  1269. continue;
  1270. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1271. for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
  1272. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1273. sizeof(zgid_entry)))
  1274. continue;
  1275. if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
  1276. sizeof(gid_entry_tbl->raw))) {
  1277. /* found duplicate */
  1278. mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
  1279. slave, i);
  1280. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1281. return -EINVAL;
  1282. }
  1283. }
  1284. }
  1285. /* insert slave GIDs with memcpy, starting at slave's base index */
  1286. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1287. for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
  1288. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1289. gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1290. /* Now, copy roce port gids table to current mailbox for passing to FW */
  1291. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1292. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1293. memcpy(gid_entry_mbox->raw,
  1294. priv->port[port].gid_table.roce_gids[i].raw,
  1295. MLX4_ROCE_GID_ENTRY_SIZE);
  1296. err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1297. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1298. MLX4_CMD_NATIVE);
  1299. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1300. return err;
  1301. }
  1302. return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1303. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1304. MLX4_CMD_NATIVE);
  1305. }
  1306. /* Slaves are not allowed to SET_PORT beacon (LED) blink */
  1307. if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
  1308. mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
  1309. return -EPERM;
  1310. }
  1311. /* For IB, we only consider:
  1312. * - The capability mask, which is set to the aggregate of all
  1313. * slave function capabilities
  1314. * - The QKey violatin counter - reset according to each request.
  1315. */
  1316. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1317. reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
  1318. new_cap_mask = ((__be32 *) inbox->buf)[2];
  1319. } else {
  1320. reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
  1321. new_cap_mask = ((__be32 *) inbox->buf)[1];
  1322. }
  1323. /* slave may not set the IS_SM capability for the port */
  1324. if (slave != mlx4_master_func_num(dev) &&
  1325. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
  1326. return -EINVAL;
  1327. /* No DEV_MGMT in multifunc mode */
  1328. if (mlx4_is_mfunc(dev) &&
  1329. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
  1330. return -EINVAL;
  1331. agg_cap_mask = 0;
  1332. slave_cap_mask =
  1333. priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
  1334. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
  1335. for (i = 0; i < dev->num_slaves; i++)
  1336. agg_cap_mask |=
  1337. priv->mfunc.master.slave_state[i].ib_cap_mask[port];
  1338. /* only clear mailbox for guests. Master may be setting
  1339. * MTU or PKEY table size
  1340. */
  1341. if (slave != dev->caps.function)
  1342. memset(inbox->buf, 0, 256);
  1343. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1344. *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
  1345. ((__be32 *) inbox->buf)[2] = agg_cap_mask;
  1346. } else {
  1347. ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
  1348. ((__be32 *) inbox->buf)[1] = agg_cap_mask;
  1349. }
  1350. err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  1351. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1352. if (err)
  1353. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
  1354. slave_cap_mask;
  1355. return err;
  1356. }
  1357. int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
  1358. struct mlx4_vhcr *vhcr,
  1359. struct mlx4_cmd_mailbox *inbox,
  1360. struct mlx4_cmd_mailbox *outbox,
  1361. struct mlx4_cmd_info *cmd)
  1362. {
  1363. int port = mlx4_slave_convert_port(
  1364. dev, slave, vhcr->in_modifier & 0xFF);
  1365. if (port < 0)
  1366. return -EINVAL;
  1367. vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
  1368. (port & 0xFF);
  1369. return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
  1370. vhcr->op_modifier, inbox);
  1371. }
  1372. /* bit locations for set port command with zero op modifier */
  1373. enum {
  1374. MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
  1375. MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
  1376. MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
  1377. MLX4_CHANGE_PORT_VL_CAP = 21,
  1378. MLX4_CHANGE_PORT_MTU_CAP = 22,
  1379. };
  1380. int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
  1381. {
  1382. struct mlx4_cmd_mailbox *mailbox;
  1383. int err, vl_cap, pkey_tbl_flag = 0;
  1384. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
  1385. return 0;
  1386. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1387. if (IS_ERR(mailbox))
  1388. return PTR_ERR(mailbox);
  1389. ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
  1390. if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
  1391. pkey_tbl_flag = 1;
  1392. ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
  1393. }
  1394. /* IB VL CAP enum isn't used by the firmware, just numerical values */
  1395. for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
  1396. ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
  1397. (1 << MLX4_CHANGE_PORT_MTU_CAP) |
  1398. (1 << MLX4_CHANGE_PORT_VL_CAP) |
  1399. (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
  1400. (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
  1401. (vl_cap << MLX4_SET_PORT_VL_CAP));
  1402. err = mlx4_cmd(dev, mailbox->dma, port,
  1403. MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
  1404. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  1405. if (err != -ENOMEM)
  1406. break;
  1407. }
  1408. mlx4_free_cmd_mailbox(dev, mailbox);
  1409. return err;
  1410. }
  1411. #define SET_PORT_ROCE_2_FLAGS 0x10
  1412. #define MLX4_SET_PORT_ROCE_V1_V2 0x2
  1413. int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
  1414. u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
  1415. {
  1416. struct mlx4_cmd_mailbox *mailbox;
  1417. struct mlx4_set_port_general_context *context;
  1418. int err;
  1419. u32 in_mod;
  1420. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1421. if (IS_ERR(mailbox))
  1422. return PTR_ERR(mailbox);
  1423. context = mailbox->buf;
  1424. context->flags = SET_PORT_GEN_ALL_VALID;
  1425. context->mtu = cpu_to_be16(mtu);
  1426. context->pptx = (pptx * (!pfctx)) << 7;
  1427. context->pfctx = pfctx;
  1428. context->pprx = (pprx * (!pfcrx)) << 7;
  1429. context->pfcrx = pfcrx;
  1430. if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
  1431. context->flags |= SET_PORT_ROCE_2_FLAGS;
  1432. context->roce_mode |=
  1433. MLX4_SET_PORT_ROCE_V1_V2 << 4;
  1434. }
  1435. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1436. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1437. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1438. MLX4_CMD_WRAPPED);
  1439. mlx4_free_cmd_mailbox(dev, mailbox);
  1440. return err;
  1441. }
  1442. EXPORT_SYMBOL(mlx4_SET_PORT_general);
  1443. int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
  1444. u8 promisc)
  1445. {
  1446. struct mlx4_cmd_mailbox *mailbox;
  1447. struct mlx4_set_port_rqp_calc_context *context;
  1448. int err;
  1449. u32 in_mod;
  1450. u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
  1451. MCAST_DIRECT : MCAST_DEFAULT;
  1452. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  1453. return 0;
  1454. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1455. if (IS_ERR(mailbox))
  1456. return PTR_ERR(mailbox);
  1457. context = mailbox->buf;
  1458. context->base_qpn = cpu_to_be32(base_qpn);
  1459. context->n_mac = dev->caps.log_num_macs;
  1460. context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
  1461. base_qpn);
  1462. context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
  1463. base_qpn);
  1464. context->intra_no_vlan = 0;
  1465. context->no_vlan = MLX4_NO_VLAN_IDX;
  1466. context->intra_vlan_miss = 0;
  1467. context->vlan_miss = MLX4_VLAN_MISS_IDX;
  1468. in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
  1469. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1470. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1471. MLX4_CMD_WRAPPED);
  1472. mlx4_free_cmd_mailbox(dev, mailbox);
  1473. return err;
  1474. }
  1475. EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
  1476. int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
  1477. {
  1478. struct mlx4_cmd_mailbox *mailbox;
  1479. struct mlx4_set_port_general_context *context;
  1480. u32 in_mod;
  1481. int err;
  1482. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1483. if (IS_ERR(mailbox))
  1484. return PTR_ERR(mailbox);
  1485. context = mailbox->buf;
  1486. context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
  1487. context->user_mtu = cpu_to_be16(user_mtu);
  1488. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1489. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1490. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1491. MLX4_CMD_WRAPPED);
  1492. mlx4_free_cmd_mailbox(dev, mailbox);
  1493. return err;
  1494. }
  1495. EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
  1496. int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
  1497. {
  1498. struct mlx4_cmd_mailbox *mailbox;
  1499. struct mlx4_set_port_general_context *context;
  1500. u32 in_mod;
  1501. int err;
  1502. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1503. if (IS_ERR(mailbox))
  1504. return PTR_ERR(mailbox);
  1505. context = mailbox->buf;
  1506. context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
  1507. if (ignore_fcs_value)
  1508. context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
  1509. else
  1510. context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
  1511. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1512. err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
  1513. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1514. mlx4_free_cmd_mailbox(dev, mailbox);
  1515. return err;
  1516. }
  1517. EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
  1518. enum {
  1519. VXLAN_ENABLE_MODIFY = 1 << 7,
  1520. VXLAN_STEERING_MODIFY = 1 << 6,
  1521. VXLAN_ENABLE = 1 << 7,
  1522. };
  1523. struct mlx4_set_port_vxlan_context {
  1524. u32 reserved1;
  1525. u8 modify_flags;
  1526. u8 reserved2;
  1527. u8 enable_flags;
  1528. u8 steering;
  1529. };
  1530. int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
  1531. {
  1532. int err;
  1533. u32 in_mod;
  1534. struct mlx4_cmd_mailbox *mailbox;
  1535. struct mlx4_set_port_vxlan_context *context;
  1536. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1537. if (IS_ERR(mailbox))
  1538. return PTR_ERR(mailbox);
  1539. context = mailbox->buf;
  1540. memset(context, 0, sizeof(*context));
  1541. context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
  1542. if (enable)
  1543. context->enable_flags = VXLAN_ENABLE;
  1544. context->steering = steering;
  1545. in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
  1546. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1547. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1548. MLX4_CMD_NATIVE);
  1549. mlx4_free_cmd_mailbox(dev, mailbox);
  1550. return err;
  1551. }
  1552. EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
  1553. int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
  1554. {
  1555. int err;
  1556. struct mlx4_cmd_mailbox *mailbox;
  1557. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1558. if (IS_ERR(mailbox))
  1559. return PTR_ERR(mailbox);
  1560. *((__be32 *)mailbox->buf) = cpu_to_be32(time);
  1561. err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
  1562. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1563. MLX4_CMD_NATIVE);
  1564. mlx4_free_cmd_mailbox(dev, mailbox);
  1565. return err;
  1566. }
  1567. EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
  1568. int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1569. struct mlx4_vhcr *vhcr,
  1570. struct mlx4_cmd_mailbox *inbox,
  1571. struct mlx4_cmd_mailbox *outbox,
  1572. struct mlx4_cmd_info *cmd)
  1573. {
  1574. int err = 0;
  1575. return err;
  1576. }
  1577. int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
  1578. u64 mac, u64 clear, u8 mode)
  1579. {
  1580. return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
  1581. MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
  1582. MLX4_CMD_WRAPPED);
  1583. }
  1584. EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
  1585. int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1586. struct mlx4_vhcr *vhcr,
  1587. struct mlx4_cmd_mailbox *inbox,
  1588. struct mlx4_cmd_mailbox *outbox,
  1589. struct mlx4_cmd_info *cmd)
  1590. {
  1591. int err = 0;
  1592. return err;
  1593. }
  1594. int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
  1595. struct mlx4_vhcr *vhcr,
  1596. struct mlx4_cmd_mailbox *inbox,
  1597. struct mlx4_cmd_mailbox *outbox,
  1598. struct mlx4_cmd_info *cmd)
  1599. {
  1600. return 0;
  1601. }
  1602. int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
  1603. int *slave_id)
  1604. {
  1605. struct mlx4_priv *priv = mlx4_priv(dev);
  1606. int i, found_ix = -1;
  1607. int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1608. struct mlx4_slaves_pport slaves_pport;
  1609. unsigned num_vfs;
  1610. int slave_gid;
  1611. if (!mlx4_is_mfunc(dev))
  1612. return -EINVAL;
  1613. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1614. num_vfs = bitmap_weight(slaves_pport.slaves,
  1615. dev->persist->num_vfs + 1) - 1;
  1616. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1617. if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
  1618. MLX4_ROCE_GID_ENTRY_SIZE)) {
  1619. found_ix = i;
  1620. break;
  1621. }
  1622. }
  1623. if (found_ix >= 0) {
  1624. /* Calculate a slave_gid which is the slave number in the gid
  1625. * table and not a globally unique slave number.
  1626. */
  1627. if (found_ix < MLX4_ROCE_PF_GIDS)
  1628. slave_gid = 0;
  1629. else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
  1630. (vf_gids / num_vfs + 1))
  1631. slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
  1632. (vf_gids / num_vfs + 1)) + 1;
  1633. else
  1634. slave_gid =
  1635. ((found_ix - MLX4_ROCE_PF_GIDS -
  1636. ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
  1637. (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
  1638. /* Calculate the globally unique slave id */
  1639. if (slave_gid) {
  1640. struct mlx4_active_ports exclusive_ports;
  1641. struct mlx4_active_ports actv_ports;
  1642. struct mlx4_slaves_pport slaves_pport_actv;
  1643. unsigned max_port_p_one;
  1644. int num_vfs_before = 0;
  1645. int candidate_slave_gid;
  1646. /* Calculate how many VFs are on the previous port, if exists */
  1647. for (i = 1; i < port; i++) {
  1648. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1649. set_bit(i - 1, exclusive_ports.ports);
  1650. slaves_pport_actv =
  1651. mlx4_phys_to_slaves_pport_actv(
  1652. dev, &exclusive_ports);
  1653. num_vfs_before += bitmap_weight(
  1654. slaves_pport_actv.slaves,
  1655. dev->persist->num_vfs + 1);
  1656. }
  1657. /* candidate_slave_gid isn't necessarily the correct slave, but
  1658. * it has the same number of ports and is assigned to the same
  1659. * ports as the real slave we're looking for. On dual port VF,
  1660. * slave_gid = [single port VFs on port <port>] +
  1661. * [offset of the current slave from the first dual port VF] +
  1662. * 1 (for the PF).
  1663. */
  1664. candidate_slave_gid = slave_gid + num_vfs_before;
  1665. actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
  1666. max_port_p_one = find_first_bit(
  1667. actv_ports.ports, dev->caps.num_ports) +
  1668. bitmap_weight(actv_ports.ports,
  1669. dev->caps.num_ports) + 1;
  1670. /* Calculate the real slave number */
  1671. for (i = 1; i < max_port_p_one; i++) {
  1672. if (i == port)
  1673. continue;
  1674. bitmap_zero(exclusive_ports.ports,
  1675. dev->caps.num_ports);
  1676. set_bit(i - 1, exclusive_ports.ports);
  1677. slaves_pport_actv =
  1678. mlx4_phys_to_slaves_pport_actv(
  1679. dev, &exclusive_ports);
  1680. slave_gid += bitmap_weight(
  1681. slaves_pport_actv.slaves,
  1682. dev->persist->num_vfs + 1);
  1683. }
  1684. }
  1685. *slave_id = slave_gid;
  1686. }
  1687. return (found_ix >= 0) ? 0 : -EINVAL;
  1688. }
  1689. EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
  1690. int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
  1691. u8 *gid)
  1692. {
  1693. struct mlx4_priv *priv = mlx4_priv(dev);
  1694. if (!mlx4_is_master(dev))
  1695. return -EINVAL;
  1696. memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
  1697. MLX4_ROCE_GID_ENTRY_SIZE);
  1698. return 0;
  1699. }
  1700. EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
  1701. /* Cable Module Info */
  1702. #define MODULE_INFO_MAX_READ 48
  1703. #define I2C_ADDR_LOW 0x50
  1704. #define I2C_ADDR_HIGH 0x51
  1705. #define I2C_PAGE_SIZE 256
  1706. /* Module Info Data */
  1707. struct mlx4_cable_info {
  1708. u8 i2c_addr;
  1709. u8 page_num;
  1710. __be16 dev_mem_address;
  1711. __be16 reserved1;
  1712. __be16 size;
  1713. __be32 reserved2[2];
  1714. u8 data[MODULE_INFO_MAX_READ];
  1715. };
  1716. enum cable_info_err {
  1717. CABLE_INF_INV_PORT = 0x1,
  1718. CABLE_INF_OP_NOSUP = 0x2,
  1719. CABLE_INF_NOT_CONN = 0x3,
  1720. CABLE_INF_NO_EEPRM = 0x4,
  1721. CABLE_INF_PAGE_ERR = 0x5,
  1722. CABLE_INF_INV_ADDR = 0x6,
  1723. CABLE_INF_I2C_ADDR = 0x7,
  1724. CABLE_INF_QSFP_VIO = 0x8,
  1725. CABLE_INF_I2C_BUSY = 0x9,
  1726. };
  1727. #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
  1728. static inline const char *cable_info_mad_err_str(u16 mad_status)
  1729. {
  1730. u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
  1731. switch (err) {
  1732. case CABLE_INF_INV_PORT:
  1733. return "invalid port selected";
  1734. case CABLE_INF_OP_NOSUP:
  1735. return "operation not supported for this port (the port is of type CX4 or internal)";
  1736. case CABLE_INF_NOT_CONN:
  1737. return "cable is not connected";
  1738. case CABLE_INF_NO_EEPRM:
  1739. return "the connected cable has no EPROM (passive copper cable)";
  1740. case CABLE_INF_PAGE_ERR:
  1741. return "page number is greater than 15";
  1742. case CABLE_INF_INV_ADDR:
  1743. return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
  1744. case CABLE_INF_I2C_ADDR:
  1745. return "invalid I2C slave address";
  1746. case CABLE_INF_QSFP_VIO:
  1747. return "at least one cable violates the QSFP specification and ignores the modsel signal";
  1748. case CABLE_INF_I2C_BUSY:
  1749. return "I2C bus is constantly busy";
  1750. }
  1751. return "Unknown Error";
  1752. }
  1753. /**
  1754. * mlx4_get_module_info - Read cable module eeprom data
  1755. * @dev: mlx4_dev.
  1756. * @port: port number.
  1757. * @offset: byte offset in eeprom to start reading data from.
  1758. * @size: num of bytes to read.
  1759. * @data: output buffer to put the requested data into.
  1760. *
  1761. * Reads cable module eeprom data, puts the outcome data into
  1762. * data pointer paramer.
  1763. * Returns num of read bytes on success or a negative error
  1764. * code.
  1765. */
  1766. int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
  1767. u16 offset, u16 size, u8 *data)
  1768. {
  1769. struct mlx4_cmd_mailbox *inbox, *outbox;
  1770. struct mlx4_mad_ifc *inmad, *outmad;
  1771. struct mlx4_cable_info *cable_info;
  1772. u16 i2c_addr;
  1773. int ret;
  1774. if (size > MODULE_INFO_MAX_READ)
  1775. size = MODULE_INFO_MAX_READ;
  1776. inbox = mlx4_alloc_cmd_mailbox(dev);
  1777. if (IS_ERR(inbox))
  1778. return PTR_ERR(inbox);
  1779. outbox = mlx4_alloc_cmd_mailbox(dev);
  1780. if (IS_ERR(outbox)) {
  1781. mlx4_free_cmd_mailbox(dev, inbox);
  1782. return PTR_ERR(outbox);
  1783. }
  1784. inmad = (struct mlx4_mad_ifc *)(inbox->buf);
  1785. outmad = (struct mlx4_mad_ifc *)(outbox->buf);
  1786. inmad->method = 0x1; /* Get */
  1787. inmad->class_version = 0x1;
  1788. inmad->mgmt_class = 0x1;
  1789. inmad->base_version = 0x1;
  1790. inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
  1791. if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
  1792. /* Cross pages reads are not allowed
  1793. * read until offset 256 in low page
  1794. */
  1795. size -= offset + size - I2C_PAGE_SIZE;
  1796. i2c_addr = I2C_ADDR_LOW;
  1797. if (offset >= I2C_PAGE_SIZE) {
  1798. /* Reset offset to high page */
  1799. i2c_addr = I2C_ADDR_HIGH;
  1800. offset -= I2C_PAGE_SIZE;
  1801. }
  1802. cable_info = (struct mlx4_cable_info *)inmad->data;
  1803. cable_info->dev_mem_address = cpu_to_be16(offset);
  1804. cable_info->page_num = 0;
  1805. cable_info->i2c_addr = i2c_addr;
  1806. cable_info->size = cpu_to_be16(size);
  1807. ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
  1808. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  1809. MLX4_CMD_NATIVE);
  1810. if (ret)
  1811. goto out;
  1812. if (be16_to_cpu(outmad->status)) {
  1813. /* Mad returned with bad status */
  1814. ret = be16_to_cpu(outmad->status);
  1815. mlx4_warn(dev,
  1816. "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
  1817. 0xFF60, port, i2c_addr, offset, size,
  1818. ret, cable_info_mad_err_str(ret));
  1819. if (i2c_addr == I2C_ADDR_HIGH &&
  1820. MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
  1821. /* Some SFP cables do not support i2c slave
  1822. * address 0x51 (high page), abort silently.
  1823. */
  1824. ret = 0;
  1825. else
  1826. ret = -ret;
  1827. goto out;
  1828. }
  1829. cable_info = (struct mlx4_cable_info *)outmad->data;
  1830. memcpy(data, cable_info->data, size);
  1831. ret = size;
  1832. out:
  1833. mlx4_free_cmd_mailbox(dev, inbox);
  1834. mlx4_free_cmd_mailbox(dev, outbox);
  1835. return ret;
  1836. }
  1837. EXPORT_SYMBOL(mlx4_get_module_info);
  1838. int mlx4_max_tc(struct mlx4_dev *dev)
  1839. {
  1840. u8 num_tc = dev->caps.max_tc_eth;
  1841. if (!num_tc)
  1842. num_tc = MLX4_TC_MAX_NUMBER;
  1843. return num_tc;
  1844. }
  1845. EXPORT_SYMBOL(mlx4_max_tc);